1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/ptrace.c
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/elf.h>
31 #include <linux/rseq.h>
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
47 struct pt_regs_offset {
52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
54 #define GPR_OFFSET_NAME(r) \
55 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
57 static const struct pt_regs_offset regoffset_table[] = {
89 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
92 REG_OFFSET_NAME(pstate),
97 * regs_query_register_offset() - query register offset from its name
98 * @name: the name of a register
100 * regs_query_register_offset() returns the offset of a register in struct
101 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103 int regs_query_register_offset(const char *name)
105 const struct pt_regs_offset *roff;
107 for (roff = regoffset_table; roff->name != NULL; roff++)
108 if (!strcmp(roff->name, name))
114 * regs_within_kernel_stack() - check the address in the stack
115 * @regs: pt_regs which contains kernel stack pointer.
116 * @addr: address which is checked.
118 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
119 * If @addr is within the kernel stack, it returns true. If not, returns false.
121 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
123 return ((addr & ~(THREAD_SIZE - 1)) ==
124 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
125 on_irq_stack(addr, sizeof(unsigned long));
129 * regs_get_kernel_stack_nth() - get Nth entry of the stack
130 * @regs: pt_regs which contains kernel stack pointer.
131 * @n: stack entry number.
133 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
134 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
137 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
139 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
142 if (regs_within_kernel_stack(regs, (unsigned long)addr))
149 * TODO: does not yet catch signals sent when the child dies.
150 * in exit.c or in signal.c.
154 * Called by kernel/ptrace.c when detaching..
156 void ptrace_disable(struct task_struct *child)
159 * This would be better off in core code, but PTRACE_DETACH has
160 * grown its fair share of arch-specific worts and changing it
161 * is likely to cause regressions on obscure architectures.
163 user_disable_single_step(child);
166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
168 * Handle hitting a HW-breakpoint.
170 static void ptrace_hbptriggered(struct perf_event *bp,
171 struct perf_sample_data *data,
172 struct pt_regs *regs)
174 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
175 const char *desc = "Hardware breakpoint trap (ptrace)";
177 if (is_compat_task()) {
181 for (i = 0; i < ARM_MAX_BRP; ++i) {
182 if (current->thread.debug.hbp_break[i] == bp) {
183 si_errno = (i << 1) + 1;
188 for (i = 0; i < ARM_MAX_WRP; ++i) {
189 if (current->thread.debug.hbp_watch[i] == bp) {
190 si_errno = -((i << 1) + 1);
194 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
199 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
203 * Unregister breakpoints from this task and reset the pointers in
206 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
209 struct thread_struct *t = &tsk->thread;
211 for (i = 0; i < ARM_MAX_BRP; i++) {
212 if (t->debug.hbp_break[i]) {
213 unregister_hw_breakpoint(t->debug.hbp_break[i]);
214 t->debug.hbp_break[i] = NULL;
218 for (i = 0; i < ARM_MAX_WRP; i++) {
219 if (t->debug.hbp_watch[i]) {
220 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
221 t->debug.hbp_watch[i] = NULL;
226 void ptrace_hw_copy_thread(struct task_struct *tsk)
228 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
231 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
232 struct task_struct *tsk,
235 struct perf_event *bp = ERR_PTR(-EINVAL);
238 case NT_ARM_HW_BREAK:
239 if (idx >= ARM_MAX_BRP)
241 idx = array_index_nospec(idx, ARM_MAX_BRP);
242 bp = tsk->thread.debug.hbp_break[idx];
244 case NT_ARM_HW_WATCH:
245 if (idx >= ARM_MAX_WRP)
247 idx = array_index_nospec(idx, ARM_MAX_WRP);
248 bp = tsk->thread.debug.hbp_watch[idx];
256 static int ptrace_hbp_set_event(unsigned int note_type,
257 struct task_struct *tsk,
259 struct perf_event *bp)
264 case NT_ARM_HW_BREAK:
265 if (idx >= ARM_MAX_BRP)
267 idx = array_index_nospec(idx, ARM_MAX_BRP);
268 tsk->thread.debug.hbp_break[idx] = bp;
271 case NT_ARM_HW_WATCH:
272 if (idx >= ARM_MAX_WRP)
274 idx = array_index_nospec(idx, ARM_MAX_WRP);
275 tsk->thread.debug.hbp_watch[idx] = bp;
284 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
285 struct task_struct *tsk,
288 struct perf_event *bp;
289 struct perf_event_attr attr;
293 case NT_ARM_HW_BREAK:
294 type = HW_BREAKPOINT_X;
296 case NT_ARM_HW_WATCH:
297 type = HW_BREAKPOINT_RW;
300 return ERR_PTR(-EINVAL);
303 ptrace_breakpoint_init(&attr);
306 * Initialise fields to sane defaults
307 * (i.e. values that will pass validation).
310 attr.bp_len = HW_BREAKPOINT_LEN_4;
314 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
318 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
325 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
326 struct arch_hw_breakpoint_ctrl ctrl,
327 struct perf_event_attr *attr)
329 int err, len, type, offset, disabled = !ctrl.enabled;
331 attr->disabled = disabled;
335 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
340 case NT_ARM_HW_BREAK:
341 if ((type & HW_BREAKPOINT_X) != type)
344 case NT_ARM_HW_WATCH:
345 if ((type & HW_BREAKPOINT_RW) != type)
353 attr->bp_type = type;
354 attr->bp_addr += offset;
359 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
365 case NT_ARM_HW_BREAK:
366 num = hw_breakpoint_slots(TYPE_INST);
368 case NT_ARM_HW_WATCH:
369 num = hw_breakpoint_slots(TYPE_DATA);
375 reg |= debug_monitors_arch();
383 static int ptrace_hbp_get_ctrl(unsigned int note_type,
384 struct task_struct *tsk,
388 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
393 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
397 static int ptrace_hbp_get_addr(unsigned int note_type,
398 struct task_struct *tsk,
402 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
407 *addr = bp ? counter_arch_bp(bp)->address : 0;
411 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
412 struct task_struct *tsk,
415 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
418 bp = ptrace_hbp_create(note_type, tsk, idx);
423 static int ptrace_hbp_set_ctrl(unsigned int note_type,
424 struct task_struct *tsk,
429 struct perf_event *bp;
430 struct perf_event_attr attr;
431 struct arch_hw_breakpoint_ctrl ctrl;
433 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
440 decode_ctrl_reg(uctrl, &ctrl);
441 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
445 return modify_user_hw_breakpoint(bp, &attr);
448 static int ptrace_hbp_set_addr(unsigned int note_type,
449 struct task_struct *tsk,
454 struct perf_event *bp;
455 struct perf_event_attr attr;
457 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
465 err = modify_user_hw_breakpoint(bp, &attr);
469 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
470 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
471 #define PTRACE_HBP_PAD_SZ sizeof(u32)
473 static int hw_break_get(struct task_struct *target,
474 const struct user_regset *regset,
477 unsigned int note_type = regset->core_note_type;
483 ret = ptrace_hbp_get_resource_info(note_type, &info);
487 membuf_write(&to, &info, sizeof(info));
488 membuf_zero(&to, sizeof(u32));
489 /* (address, ctrl) registers */
491 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
494 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
497 membuf_store(&to, addr);
498 membuf_store(&to, ctrl);
499 membuf_zero(&to, sizeof(u32));
505 static int hw_break_set(struct task_struct *target,
506 const struct user_regset *regset,
507 unsigned int pos, unsigned int count,
508 const void *kbuf, const void __user *ubuf)
510 unsigned int note_type = regset->core_note_type;
511 int ret, idx = 0, offset, limit;
515 /* Resource info and pad */
516 offset = offsetof(struct user_hwdebug_state, dbg_regs);
517 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
519 /* (address, ctrl) registers */
520 limit = regset->n * regset->size;
521 while (count && offset < limit) {
522 if (count < PTRACE_HBP_ADDR_SZ)
524 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
525 offset, offset + PTRACE_HBP_ADDR_SZ);
528 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
531 offset += PTRACE_HBP_ADDR_SZ;
535 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
536 offset, offset + PTRACE_HBP_CTRL_SZ);
539 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
542 offset += PTRACE_HBP_CTRL_SZ;
544 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
545 offset, offset + PTRACE_HBP_PAD_SZ);
546 offset += PTRACE_HBP_PAD_SZ;
552 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
554 static int gpr_get(struct task_struct *target,
555 const struct user_regset *regset,
558 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
559 return membuf_write(&to, uregs, sizeof(*uregs));
562 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
563 unsigned int pos, unsigned int count,
564 const void *kbuf, const void __user *ubuf)
567 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
569 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
573 if (!valid_user_regs(&newregs, target))
576 task_pt_regs(target)->user_regs = newregs;
580 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
582 if (!system_supports_fpsimd())
588 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
590 static int __fpr_get(struct task_struct *target,
591 const struct user_regset *regset,
594 struct user_fpsimd_state *uregs;
596 sve_sync_to_fpsimd(target);
598 uregs = &target->thread.uw.fpsimd_state;
600 return membuf_write(&to, uregs, sizeof(*uregs));
603 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
606 if (!system_supports_fpsimd())
609 if (target == current)
610 fpsimd_preserve_current_state();
612 return __fpr_get(target, regset, to);
615 static int __fpr_set(struct task_struct *target,
616 const struct user_regset *regset,
617 unsigned int pos, unsigned int count,
618 const void *kbuf, const void __user *ubuf,
619 unsigned int start_pos)
622 struct user_fpsimd_state newstate;
625 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
626 * short copyin can't resurrect stale data.
628 sve_sync_to_fpsimd(target);
630 newstate = target->thread.uw.fpsimd_state;
632 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
633 start_pos, start_pos + sizeof(newstate));
637 target->thread.uw.fpsimd_state = newstate;
642 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
643 unsigned int pos, unsigned int count,
644 const void *kbuf, const void __user *ubuf)
648 if (!system_supports_fpsimd())
651 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
655 sve_sync_from_fpsimd_zeropad(target);
656 fpsimd_flush_task_state(target);
661 static int tls_get(struct task_struct *target, const struct user_regset *regset,
666 if (target == current)
667 tls_preserve_current_state();
669 ret = membuf_store(&to, target->thread.uw.tp_value);
670 if (system_supports_tpidr2())
671 ret = membuf_store(&to, target->thread.tpidr2_el0);
673 ret = membuf_zero(&to, sizeof(u64));
678 static int tls_set(struct task_struct *target, const struct user_regset *regset,
679 unsigned int pos, unsigned int count,
680 const void *kbuf, const void __user *ubuf)
683 unsigned long tls[2];
685 tls[0] = target->thread.uw.tp_value;
686 if (system_supports_tpidr2())
687 tls[1] = target->thread.tpidr2_el0;
689 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
693 target->thread.uw.tp_value = tls[0];
694 if (system_supports_tpidr2())
695 target->thread.tpidr2_el0 = tls[1];
700 static int fpmr_get(struct task_struct *target, const struct user_regset *regset,
703 if (!system_supports_fpmr())
706 if (target == current)
707 fpsimd_preserve_current_state();
709 return membuf_store(&to, target->thread.uw.fpmr);
712 static int fpmr_set(struct task_struct *target, const struct user_regset *regset,
713 unsigned int pos, unsigned int count,
714 const void *kbuf, const void __user *ubuf)
719 if (!system_supports_fpmr())
722 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
726 target->thread.uw.fpmr = fpmr;
728 fpsimd_flush_task_state(target);
733 static int system_call_get(struct task_struct *target,
734 const struct user_regset *regset,
737 return membuf_store(&to, task_pt_regs(target)->syscallno);
740 static int system_call_set(struct task_struct *target,
741 const struct user_regset *regset,
742 unsigned int pos, unsigned int count,
743 const void *kbuf, const void __user *ubuf)
745 int syscallno = task_pt_regs(target)->syscallno;
748 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
752 task_pt_regs(target)->syscallno = syscallno;
756 #ifdef CONFIG_ARM64_SVE
758 static void sve_init_header_from_task(struct user_sve_header *header,
759 struct task_struct *target,
764 enum vec_type task_type;
766 memset(header, 0, sizeof(*header));
768 /* Check if the requested registers are active for the task */
769 if (thread_sm_enabled(&target->thread))
770 task_type = ARM64_VEC_SME;
772 task_type = ARM64_VEC_SVE;
773 active = (task_type == type);
777 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
778 header->flags |= SVE_PT_VL_INHERIT;
781 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
782 header->flags |= SVE_PT_VL_INHERIT;
790 if (target->thread.fp_type == FP_STATE_FPSIMD) {
791 header->flags |= SVE_PT_REGS_FPSIMD;
793 header->flags |= SVE_PT_REGS_SVE;
797 header->vl = task_get_vl(target, type);
798 vq = sve_vq_from_vl(header->vl);
800 header->max_vl = vec_max_vl(type);
801 header->size = SVE_PT_SIZE(vq, header->flags);
802 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
806 static unsigned int sve_size_from_header(struct user_sve_header const *header)
808 return ALIGN(header->size, SVE_VQ_BYTES);
811 static int sve_get_common(struct task_struct *target,
812 const struct user_regset *regset,
816 struct user_sve_header header;
818 unsigned long start, end;
821 sve_init_header_from_task(&header, target, type);
822 vq = sve_vq_from_vl(header.vl);
824 membuf_write(&to, &header, sizeof(header));
826 if (target == current)
827 fpsimd_preserve_current_state();
829 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
830 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
832 switch ((header.flags & SVE_PT_REGS_MASK)) {
833 case SVE_PT_REGS_FPSIMD:
834 return __fpr_get(target, regset, to);
836 case SVE_PT_REGS_SVE:
837 start = SVE_PT_SVE_OFFSET;
838 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
839 membuf_write(&to, target->thread.sve_state, end - start);
842 end = SVE_PT_SVE_FPSR_OFFSET(vq);
843 membuf_zero(&to, end - start);
846 * Copy fpsr, and fpcr which must follow contiguously in
847 * struct fpsimd_state:
850 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
851 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
855 end = sve_size_from_header(&header);
856 return membuf_zero(&to, end - start);
863 static int sve_get(struct task_struct *target,
864 const struct user_regset *regset,
867 if (!system_supports_sve())
870 return sve_get_common(target, regset, to, ARM64_VEC_SVE);
873 static int sve_set_common(struct task_struct *target,
874 const struct user_regset *regset,
875 unsigned int pos, unsigned int count,
876 const void *kbuf, const void __user *ubuf,
880 struct user_sve_header header;
882 unsigned long start, end;
885 if (count < sizeof(header))
887 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
893 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
894 * vec_set_vector_length(), which will also validate them for us:
896 ret = vec_set_vector_length(target, type, header.vl,
897 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
901 /* Actual VL set may be less than the user asked for: */
902 vq = sve_vq_from_vl(task_get_vl(target, type));
904 /* Enter/exit streaming mode */
905 if (system_supports_sme()) {
906 u64 old_svcr = target->thread.svcr;
910 target->thread.svcr &= ~SVCR_SM_MASK;
913 target->thread.svcr |= SVCR_SM_MASK;
916 * Disable traps and ensure there is SME storage but
917 * preserve any currently set values in ZA/ZT.
919 sme_alloc(target, false);
920 set_tsk_thread_flag(target, TIF_SME);
929 * If we switched then invalidate any existing SVE
930 * state and ensure there's storage.
932 if (target->thread.svcr != old_svcr)
933 sve_alloc(target, true);
936 /* Registers: FPSIMD-only case */
938 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
939 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
940 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
941 SVE_PT_FPSIMD_OFFSET);
942 clear_tsk_thread_flag(target, TIF_SVE);
943 target->thread.fp_type = FP_STATE_FPSIMD;
948 * Otherwise: no registers or full SVE case. For backwards
949 * compatibility reasons we treat empty flags as SVE registers.
953 * If setting a different VL from the requested VL and there is
954 * register data, the data layout will be wrong: don't even
955 * try to set the registers in this case.
957 if (count && vq != sve_vq_from_vl(header.vl)) {
962 sve_alloc(target, true);
963 if (!target->thread.sve_state) {
965 clear_tsk_thread_flag(target, TIF_SVE);
966 target->thread.fp_type = FP_STATE_FPSIMD;
971 * Ensure target->thread.sve_state is up to date with target's
972 * FPSIMD regs, so that a short copyin leaves trailing
973 * registers unmodified. Only enable SVE if we are
974 * configuring normal SVE, a system with streaming SVE may not
977 fpsimd_sync_to_sve(target);
978 if (type == ARM64_VEC_SVE)
979 set_tsk_thread_flag(target, TIF_SVE);
980 target->thread.fp_type = FP_STATE_SVE;
982 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
983 start = SVE_PT_SVE_OFFSET;
984 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
985 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
986 target->thread.sve_state,
992 end = SVE_PT_SVE_FPSR_OFFSET(vq);
993 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end);
996 * Copy fpsr, and fpcr which must follow contiguously in
997 * struct fpsimd_state:
1000 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
1001 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1002 &target->thread.uw.fpsimd_state.fpsr,
1006 fpsimd_flush_task_state(target);
1010 static int sve_set(struct task_struct *target,
1011 const struct user_regset *regset,
1012 unsigned int pos, unsigned int count,
1013 const void *kbuf, const void __user *ubuf)
1015 if (!system_supports_sve())
1018 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1022 #endif /* CONFIG_ARM64_SVE */
1024 #ifdef CONFIG_ARM64_SME
1026 static int ssve_get(struct task_struct *target,
1027 const struct user_regset *regset,
1030 if (!system_supports_sme())
1033 return sve_get_common(target, regset, to, ARM64_VEC_SME);
1036 static int ssve_set(struct task_struct *target,
1037 const struct user_regset *regset,
1038 unsigned int pos, unsigned int count,
1039 const void *kbuf, const void __user *ubuf)
1041 if (!system_supports_sme())
1044 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1048 static int za_get(struct task_struct *target,
1049 const struct user_regset *regset,
1052 struct user_za_header header;
1054 unsigned long start, end;
1056 if (!system_supports_sme())
1060 memset(&header, 0, sizeof(header));
1062 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1063 header.flags |= ZA_PT_VL_INHERIT;
1065 header.vl = task_get_sme_vl(target);
1066 vq = sve_vq_from_vl(header.vl);
1067 header.max_vl = sme_max_vl();
1068 header.max_size = ZA_PT_SIZE(vq);
1070 /* If ZA is not active there is only the header */
1071 if (thread_za_enabled(&target->thread))
1072 header.size = ZA_PT_SIZE(vq);
1074 header.size = ZA_PT_ZA_OFFSET;
1076 membuf_write(&to, &header, sizeof(header));
1078 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1079 end = ZA_PT_ZA_OFFSET;
1081 if (target == current)
1082 fpsimd_preserve_current_state();
1084 /* Any register data to include? */
1085 if (thread_za_enabled(&target->thread)) {
1087 end = ZA_PT_SIZE(vq);
1088 membuf_write(&to, target->thread.sme_state, end - start);
1091 /* Zero any trailing padding */
1093 end = ALIGN(header.size, SVE_VQ_BYTES);
1094 return membuf_zero(&to, end - start);
1097 static int za_set(struct task_struct *target,
1098 const struct user_regset *regset,
1099 unsigned int pos, unsigned int count,
1100 const void *kbuf, const void __user *ubuf)
1103 struct user_za_header header;
1105 unsigned long start, end;
1107 if (!system_supports_sme())
1111 if (count < sizeof(header))
1113 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1119 * All current ZA_PT_* flags are consumed by
1120 * vec_set_vector_length(), which will also validate them for
1123 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1124 ((unsigned long)header.flags) << 16);
1128 /* Actual VL set may be less than the user asked for: */
1129 vq = sve_vq_from_vl(task_get_sme_vl(target));
1131 /* Ensure there is some SVE storage for streaming mode */
1132 if (!target->thread.sve_state) {
1133 sve_alloc(target, false);
1134 if (!target->thread.sve_state) {
1141 * Only flush the storage if PSTATE.ZA was not already set,
1142 * otherwise preserve any existing data.
1144 sme_alloc(target, !thread_za_enabled(&target->thread));
1145 if (!target->thread.sme_state)
1148 /* If there is no data then disable ZA */
1150 target->thread.svcr &= ~SVCR_ZA_MASK;
1155 * If setting a different VL from the requested VL and there is
1156 * register data, the data layout will be wrong: don't even
1157 * try to set the registers in this case.
1159 if (vq != sve_vq_from_vl(header.vl)) {
1164 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1165 start = ZA_PT_ZA_OFFSET;
1166 end = ZA_PT_SIZE(vq);
1167 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1168 target->thread.sme_state,
1173 /* Mark ZA as active and let userspace use it */
1174 set_tsk_thread_flag(target, TIF_SME);
1175 target->thread.svcr |= SVCR_ZA_MASK;
1178 fpsimd_flush_task_state(target);
1182 static int zt_get(struct task_struct *target,
1183 const struct user_regset *regset,
1186 if (!system_supports_sme2())
1190 * If PSTATE.ZA is not set then ZT will be zeroed when it is
1191 * enabled so report the current register value as zero.
1193 if (thread_za_enabled(&target->thread))
1194 membuf_write(&to, thread_zt_state(&target->thread),
1197 membuf_zero(&to, ZT_SIG_REG_BYTES);
1202 static int zt_set(struct task_struct *target,
1203 const struct user_regset *regset,
1204 unsigned int pos, unsigned int count,
1205 const void *kbuf, const void __user *ubuf)
1209 if (!system_supports_sme2())
1212 /* Ensure SVE storage in case this is first use of SME */
1213 sve_alloc(target, false);
1214 if (!target->thread.sve_state)
1217 if (!thread_za_enabled(&target->thread)) {
1218 sme_alloc(target, true);
1219 if (!target->thread.sme_state)
1223 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1224 thread_zt_state(&target->thread),
1225 0, ZT_SIG_REG_BYTES);
1227 target->thread.svcr |= SVCR_ZA_MASK;
1228 set_tsk_thread_flag(target, TIF_SME);
1231 fpsimd_flush_task_state(target);
1236 #endif /* CONFIG_ARM64_SME */
1238 #ifdef CONFIG_ARM64_PTR_AUTH
1239 static int pac_mask_get(struct task_struct *target,
1240 const struct user_regset *regset,
1244 * The PAC bits can differ across data and instruction pointers
1245 * depending on TCR_EL1.TBID*, which we may make use of in future, so
1246 * we expose separate masks.
1248 unsigned long mask = ptrauth_user_pac_mask();
1249 struct user_pac_mask uregs = {
1254 if (!system_supports_address_auth())
1257 return membuf_write(&to, &uregs, sizeof(uregs));
1260 static int pac_enabled_keys_get(struct task_struct *target,
1261 const struct user_regset *regset,
1264 long enabled_keys = ptrauth_get_enabled_keys(target);
1266 if (IS_ERR_VALUE(enabled_keys))
1267 return enabled_keys;
1269 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1272 static int pac_enabled_keys_set(struct task_struct *target,
1273 const struct user_regset *regset,
1274 unsigned int pos, unsigned int count,
1275 const void *kbuf, const void __user *ubuf)
1278 long enabled_keys = ptrauth_get_enabled_keys(target);
1280 if (IS_ERR_VALUE(enabled_keys))
1281 return enabled_keys;
1283 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1288 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1292 #ifdef CONFIG_CHECKPOINT_RESTORE
1293 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1295 return (__uint128_t)key->hi << 64 | key->lo;
1298 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1300 struct ptrauth_key key = {
1301 .lo = (unsigned long)ukey,
1302 .hi = (unsigned long)(ukey >> 64),
1308 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1309 const struct ptrauth_keys_user *keys)
1311 ukeys->apiakey = pac_key_to_user(&keys->apia);
1312 ukeys->apibkey = pac_key_to_user(&keys->apib);
1313 ukeys->apdakey = pac_key_to_user(&keys->apda);
1314 ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1317 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1318 const struct user_pac_address_keys *ukeys)
1320 keys->apia = pac_key_from_user(ukeys->apiakey);
1321 keys->apib = pac_key_from_user(ukeys->apibkey);
1322 keys->apda = pac_key_from_user(ukeys->apdakey);
1323 keys->apdb = pac_key_from_user(ukeys->apdbkey);
1326 static int pac_address_keys_get(struct task_struct *target,
1327 const struct user_regset *regset,
1330 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1331 struct user_pac_address_keys user_keys;
1333 if (!system_supports_address_auth())
1336 pac_address_keys_to_user(&user_keys, keys);
1338 return membuf_write(&to, &user_keys, sizeof(user_keys));
1341 static int pac_address_keys_set(struct task_struct *target,
1342 const struct user_regset *regset,
1343 unsigned int pos, unsigned int count,
1344 const void *kbuf, const void __user *ubuf)
1346 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1347 struct user_pac_address_keys user_keys;
1350 if (!system_supports_address_auth())
1353 pac_address_keys_to_user(&user_keys, keys);
1354 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1358 pac_address_keys_from_user(keys, &user_keys);
1363 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1364 const struct ptrauth_keys_user *keys)
1366 ukeys->apgakey = pac_key_to_user(&keys->apga);
1369 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1370 const struct user_pac_generic_keys *ukeys)
1372 keys->apga = pac_key_from_user(ukeys->apgakey);
1375 static int pac_generic_keys_get(struct task_struct *target,
1376 const struct user_regset *regset,
1379 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1380 struct user_pac_generic_keys user_keys;
1382 if (!system_supports_generic_auth())
1385 pac_generic_keys_to_user(&user_keys, keys);
1387 return membuf_write(&to, &user_keys, sizeof(user_keys));
1390 static int pac_generic_keys_set(struct task_struct *target,
1391 const struct user_regset *regset,
1392 unsigned int pos, unsigned int count,
1393 const void *kbuf, const void __user *ubuf)
1395 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1396 struct user_pac_generic_keys user_keys;
1399 if (!system_supports_generic_auth())
1402 pac_generic_keys_to_user(&user_keys, keys);
1403 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1407 pac_generic_keys_from_user(keys, &user_keys);
1411 #endif /* CONFIG_CHECKPOINT_RESTORE */
1412 #endif /* CONFIG_ARM64_PTR_AUTH */
1414 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1415 static int tagged_addr_ctrl_get(struct task_struct *target,
1416 const struct user_regset *regset,
1419 long ctrl = get_tagged_addr_ctrl(target);
1421 if (IS_ERR_VALUE(ctrl))
1424 return membuf_write(&to, &ctrl, sizeof(ctrl));
1427 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1428 user_regset *regset, unsigned int pos,
1429 unsigned int count, const void *kbuf, const
1435 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1439 return set_tagged_addr_ctrl(target, ctrl);
1443 enum aarch64_regset {
1447 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1453 #ifdef CONFIG_ARM64_SVE
1456 #ifdef CONFIG_ARM64_SME
1461 #ifdef CONFIG_ARM64_PTR_AUTH
1463 REGSET_PAC_ENABLED_KEYS,
1464 #ifdef CONFIG_CHECKPOINT_RESTORE
1469 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1470 REGSET_TAGGED_ADDR_CTRL,
1474 static const struct user_regset aarch64_regsets[] = {
1476 .core_note_type = NT_PRSTATUS,
1477 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1478 .size = sizeof(u64),
1479 .align = sizeof(u64),
1480 .regset_get = gpr_get,
1484 .core_note_type = NT_PRFPREG,
1485 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1487 * We pretend we have 32-bit registers because the fpsr and
1488 * fpcr are 32-bits wide.
1490 .size = sizeof(u32),
1491 .align = sizeof(u32),
1492 .active = fpr_active,
1493 .regset_get = fpr_get,
1497 .core_note_type = NT_ARM_TLS,
1499 .size = sizeof(void *),
1500 .align = sizeof(void *),
1501 .regset_get = tls_get,
1504 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1505 [REGSET_HW_BREAK] = {
1506 .core_note_type = NT_ARM_HW_BREAK,
1507 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1508 .size = sizeof(u32),
1509 .align = sizeof(u32),
1510 .regset_get = hw_break_get,
1511 .set = hw_break_set,
1513 [REGSET_HW_WATCH] = {
1514 .core_note_type = NT_ARM_HW_WATCH,
1515 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1516 .size = sizeof(u32),
1517 .align = sizeof(u32),
1518 .regset_get = hw_break_get,
1519 .set = hw_break_set,
1522 [REGSET_SYSTEM_CALL] = {
1523 .core_note_type = NT_ARM_SYSTEM_CALL,
1525 .size = sizeof(int),
1526 .align = sizeof(int),
1527 .regset_get = system_call_get,
1528 .set = system_call_set,
1531 .core_note_type = NT_ARM_FPMR,
1533 .size = sizeof(u64),
1534 .align = sizeof(u64),
1535 .regset_get = fpmr_get,
1538 #ifdef CONFIG_ARM64_SVE
1539 [REGSET_SVE] = { /* Scalable Vector Extension */
1540 .core_note_type = NT_ARM_SVE,
1541 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
1544 .size = SVE_VQ_BYTES,
1545 .align = SVE_VQ_BYTES,
1546 .regset_get = sve_get,
1550 #ifdef CONFIG_ARM64_SME
1551 [REGSET_SSVE] = { /* Streaming mode SVE */
1552 .core_note_type = NT_ARM_SSVE,
1553 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
1555 .size = SVE_VQ_BYTES,
1556 .align = SVE_VQ_BYTES,
1557 .regset_get = ssve_get,
1560 [REGSET_ZA] = { /* SME ZA */
1561 .core_note_type = NT_ARM_ZA,
1563 * ZA is a single register but it's variably sized and
1564 * the ptrace core requires that the size of any data
1565 * be an exact multiple of the configured register
1566 * size so report as though we had SVE_VQ_BYTES
1567 * registers. These values aren't exposed to
1570 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
1571 .size = SVE_VQ_BYTES,
1572 .align = SVE_VQ_BYTES,
1573 .regset_get = za_get,
1576 [REGSET_ZT] = { /* SME ZT */
1577 .core_note_type = NT_ARM_ZT,
1579 .size = ZT_SIG_REG_BYTES,
1580 .align = sizeof(u64),
1581 .regset_get = zt_get,
1585 #ifdef CONFIG_ARM64_PTR_AUTH
1586 [REGSET_PAC_MASK] = {
1587 .core_note_type = NT_ARM_PAC_MASK,
1588 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1589 .size = sizeof(u64),
1590 .align = sizeof(u64),
1591 .regset_get = pac_mask_get,
1592 /* this cannot be set dynamically */
1594 [REGSET_PAC_ENABLED_KEYS] = {
1595 .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1597 .size = sizeof(long),
1598 .align = sizeof(long),
1599 .regset_get = pac_enabled_keys_get,
1600 .set = pac_enabled_keys_set,
1602 #ifdef CONFIG_CHECKPOINT_RESTORE
1603 [REGSET_PACA_KEYS] = {
1604 .core_note_type = NT_ARM_PACA_KEYS,
1605 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1606 .size = sizeof(__uint128_t),
1607 .align = sizeof(__uint128_t),
1608 .regset_get = pac_address_keys_get,
1609 .set = pac_address_keys_set,
1611 [REGSET_PACG_KEYS] = {
1612 .core_note_type = NT_ARM_PACG_KEYS,
1613 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1614 .size = sizeof(__uint128_t),
1615 .align = sizeof(__uint128_t),
1616 .regset_get = pac_generic_keys_get,
1617 .set = pac_generic_keys_set,
1621 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1622 [REGSET_TAGGED_ADDR_CTRL] = {
1623 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1625 .size = sizeof(long),
1626 .align = sizeof(long),
1627 .regset_get = tagged_addr_ctrl_get,
1628 .set = tagged_addr_ctrl_set,
1633 static const struct user_regset_view user_aarch64_view = {
1634 .name = "aarch64", .e_machine = EM_AARCH64,
1635 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1638 enum compat_regset {
1643 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1645 struct pt_regs *regs = task_pt_regs(task);
1651 return pstate_to_compat_psr(regs->pstate);
1653 return regs->orig_x0;
1655 return regs->regs[idx];
1659 static int compat_gpr_get(struct task_struct *target,
1660 const struct user_regset *regset,
1666 membuf_store(&to, compat_get_user_reg(target, i++));
1670 static int compat_gpr_set(struct task_struct *target,
1671 const struct user_regset *regset,
1672 unsigned int pos, unsigned int count,
1673 const void *kbuf, const void __user *ubuf)
1675 struct pt_regs newregs;
1677 unsigned int i, start, num_regs;
1679 /* Calculate the number of AArch32 registers contained in count */
1680 num_regs = count / regset->size;
1682 /* Convert pos into an register number */
1683 start = pos / regset->size;
1685 if (start + num_regs > regset->n)
1688 newregs = *task_pt_regs(target);
1690 for (i = 0; i < num_regs; ++i) {
1691 unsigned int idx = start + i;
1695 memcpy(®, kbuf, sizeof(reg));
1696 kbuf += sizeof(reg);
1698 ret = copy_from_user(®, ubuf, sizeof(reg));
1704 ubuf += sizeof(reg);
1712 reg = compat_psr_to_pstate(reg);
1713 newregs.pstate = reg;
1716 newregs.orig_x0 = reg;
1719 newregs.regs[idx] = reg;
1724 if (valid_user_regs(&newregs.user_regs, target))
1725 *task_pt_regs(target) = newregs;
1732 static int compat_vfp_get(struct task_struct *target,
1733 const struct user_regset *regset,
1736 struct user_fpsimd_state *uregs;
1737 compat_ulong_t fpscr;
1739 if (!system_supports_fpsimd())
1742 uregs = &target->thread.uw.fpsimd_state;
1744 if (target == current)
1745 fpsimd_preserve_current_state();
1748 * The VFP registers are packed into the fpsimd_state, so they all sit
1749 * nicely together for us. We just need to create the fpscr separately.
1751 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1752 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1753 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1754 return membuf_store(&to, fpscr);
1757 static int compat_vfp_set(struct task_struct *target,
1758 const struct user_regset *regset,
1759 unsigned int pos, unsigned int count,
1760 const void *kbuf, const void __user *ubuf)
1762 struct user_fpsimd_state *uregs;
1763 compat_ulong_t fpscr;
1764 int ret, vregs_end_pos;
1766 if (!system_supports_fpsimd())
1769 uregs = &target->thread.uw.fpsimd_state;
1771 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1772 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1775 if (count && !ret) {
1776 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1777 vregs_end_pos, VFP_STATE_SIZE);
1779 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1780 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1784 fpsimd_flush_task_state(target);
1788 static int compat_tls_get(struct task_struct *target,
1789 const struct user_regset *regset,
1792 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1795 static int compat_tls_set(struct task_struct *target,
1796 const struct user_regset *regset, unsigned int pos,
1797 unsigned int count, const void *kbuf,
1798 const void __user *ubuf)
1801 compat_ulong_t tls = target->thread.uw.tp_value;
1803 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1807 target->thread.uw.tp_value = tls;
1811 static const struct user_regset aarch32_regsets[] = {
1812 [REGSET_COMPAT_GPR] = {
1813 .core_note_type = NT_PRSTATUS,
1814 .n = COMPAT_ELF_NGREG,
1815 .size = sizeof(compat_elf_greg_t),
1816 .align = sizeof(compat_elf_greg_t),
1817 .regset_get = compat_gpr_get,
1818 .set = compat_gpr_set
1820 [REGSET_COMPAT_VFP] = {
1821 .core_note_type = NT_ARM_VFP,
1822 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1823 .size = sizeof(compat_ulong_t),
1824 .align = sizeof(compat_ulong_t),
1825 .active = fpr_active,
1826 .regset_get = compat_vfp_get,
1827 .set = compat_vfp_set
1831 static const struct user_regset_view user_aarch32_view = {
1832 .name = "aarch32", .e_machine = EM_ARM,
1833 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1836 static const struct user_regset aarch32_ptrace_regsets[] = {
1838 .core_note_type = NT_PRSTATUS,
1839 .n = COMPAT_ELF_NGREG,
1840 .size = sizeof(compat_elf_greg_t),
1841 .align = sizeof(compat_elf_greg_t),
1842 .regset_get = compat_gpr_get,
1843 .set = compat_gpr_set
1846 .core_note_type = NT_ARM_VFP,
1847 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1848 .size = sizeof(compat_ulong_t),
1849 .align = sizeof(compat_ulong_t),
1850 .regset_get = compat_vfp_get,
1851 .set = compat_vfp_set
1854 .core_note_type = NT_ARM_TLS,
1856 .size = sizeof(compat_ulong_t),
1857 .align = sizeof(compat_ulong_t),
1858 .regset_get = compat_tls_get,
1859 .set = compat_tls_set,
1861 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1862 [REGSET_HW_BREAK] = {
1863 .core_note_type = NT_ARM_HW_BREAK,
1864 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1865 .size = sizeof(u32),
1866 .align = sizeof(u32),
1867 .regset_get = hw_break_get,
1868 .set = hw_break_set,
1870 [REGSET_HW_WATCH] = {
1871 .core_note_type = NT_ARM_HW_WATCH,
1872 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1873 .size = sizeof(u32),
1874 .align = sizeof(u32),
1875 .regset_get = hw_break_get,
1876 .set = hw_break_set,
1879 [REGSET_SYSTEM_CALL] = {
1880 .core_note_type = NT_ARM_SYSTEM_CALL,
1882 .size = sizeof(int),
1883 .align = sizeof(int),
1884 .regset_get = system_call_get,
1885 .set = system_call_set,
1889 static const struct user_regset_view user_aarch32_ptrace_view = {
1890 .name = "aarch32", .e_machine = EM_ARM,
1891 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1894 #ifdef CONFIG_COMPAT
1895 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1896 compat_ulong_t __user *ret)
1903 if (off == COMPAT_PT_TEXT_ADDR)
1904 tmp = tsk->mm->start_code;
1905 else if (off == COMPAT_PT_DATA_ADDR)
1906 tmp = tsk->mm->start_data;
1907 else if (off == COMPAT_PT_TEXT_END_ADDR)
1908 tmp = tsk->mm->end_code;
1909 else if (off < sizeof(compat_elf_gregset_t))
1910 tmp = compat_get_user_reg(tsk, off >> 2);
1911 else if (off >= COMPAT_USER_SZ)
1916 return put_user(tmp, ret);
1919 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1922 struct pt_regs newregs = *task_pt_regs(tsk);
1923 unsigned int idx = off / 4;
1925 if (off & 3 || off >= COMPAT_USER_SZ)
1928 if (off >= sizeof(compat_elf_gregset_t))
1936 newregs.pstate = compat_psr_to_pstate(val);
1939 newregs.orig_x0 = val;
1942 newregs.regs[idx] = val;
1945 if (!valid_user_regs(&newregs.user_regs, tsk))
1948 *task_pt_regs(tsk) = newregs;
1952 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1955 * Convert a virtual register number into an index for a thread_info
1956 * breakpoint array. Breakpoints are identified using positive numbers
1957 * whilst watchpoints are negative. The registers are laid out as pairs
1958 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1959 * Register 0 is reserved for describing resource information.
1961 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1963 return (abs(num) - 1) >> 1;
1966 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1968 u8 num_brps, num_wrps, debug_arch, wp_len;
1971 num_brps = hw_breakpoint_slots(TYPE_INST);
1972 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1974 debug_arch = debug_monitors_arch();
1988 static int compat_ptrace_hbp_get(unsigned int note_type,
1989 struct task_struct *tsk,
1996 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1999 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
2002 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
2009 static int compat_ptrace_hbp_set(unsigned int note_type,
2010 struct task_struct *tsk,
2017 int err, idx = compat_ptrace_hbp_num_to_idx(num);
2021 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
2024 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
2030 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
2031 compat_ulong_t __user *data)
2038 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
2040 } else if (num == 0) {
2041 ret = compat_ptrace_hbp_get_resource_info(&kdata);
2044 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
2048 ret = put_user(kdata, data);
2053 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
2054 compat_ulong_t __user *data)
2062 ret = get_user(kdata, data);
2067 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
2069 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
2073 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2075 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
2076 compat_ulong_t caddr, compat_ulong_t cdata)
2078 unsigned long addr = caddr;
2079 unsigned long data = cdata;
2080 void __user *datap = compat_ptr(data);
2084 case PTRACE_PEEKUSR:
2085 ret = compat_ptrace_read_user(child, addr, datap);
2088 case PTRACE_POKEUSR:
2089 ret = compat_ptrace_write_user(child, addr, data);
2092 case COMPAT_PTRACE_GETREGS:
2093 ret = copy_regset_to_user(child,
2096 0, sizeof(compat_elf_gregset_t),
2100 case COMPAT_PTRACE_SETREGS:
2101 ret = copy_regset_from_user(child,
2104 0, sizeof(compat_elf_gregset_t),
2108 case COMPAT_PTRACE_GET_THREAD_AREA:
2109 ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
2110 (compat_ulong_t __user *)datap);
2113 case COMPAT_PTRACE_SET_SYSCALL:
2114 task_pt_regs(child)->syscallno = data;
2118 case COMPAT_PTRACE_GETVFPREGS:
2119 ret = copy_regset_to_user(child,
2126 case COMPAT_PTRACE_SETVFPREGS:
2127 ret = copy_regset_from_user(child,
2134 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2135 case COMPAT_PTRACE_GETHBPREGS:
2136 ret = compat_ptrace_gethbpregs(child, addr, datap);
2139 case COMPAT_PTRACE_SETHBPREGS:
2140 ret = compat_ptrace_sethbpregs(child, addr, datap);
2145 ret = compat_ptrace_request(child, request, addr,
2152 #endif /* CONFIG_COMPAT */
2154 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2157 * Core dumping of 32-bit tasks or compat ptrace requests must use the
2158 * user_aarch32_view compatible with arm32. Native ptrace requests on
2159 * 32-bit children use an extended user_aarch32_ptrace_view to allow
2160 * access to the TLS register.
2162 if (is_compat_task())
2163 return &user_aarch32_view;
2164 else if (is_compat_thread(task_thread_info(task)))
2165 return &user_aarch32_ptrace_view;
2167 return &user_aarch64_view;
2170 long arch_ptrace(struct task_struct *child, long request,
2171 unsigned long addr, unsigned long data)
2174 case PTRACE_PEEKMTETAGS:
2175 case PTRACE_POKEMTETAGS:
2176 return mte_ptrace_copy_tags(child, request, addr, data);
2179 return ptrace_request(child, request, addr, data);
2182 enum ptrace_syscall_dir {
2183 PTRACE_SYSCALL_ENTER = 0,
2184 PTRACE_SYSCALL_EXIT,
2187 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
2190 unsigned long saved_reg;
2193 * We have some ABI weirdness here in the way that we handle syscall
2194 * exit stops because we indicate whether or not the stop has been
2195 * signalled from syscall entry or syscall exit by clobbering a general
2196 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
2197 * and restoring its old value after the stop. This means that:
2199 * - Any writes by the tracer to this register during the stop are
2200 * ignored/discarded.
2202 * - The actual value of the register is not available during the stop,
2203 * so the tracer cannot save it and restore it later.
2205 * - Syscall stops behave differently to seccomp and pseudo-step traps
2206 * (the latter do not nobble any registers).
2208 regno = (is_compat_task() ? 12 : 7);
2209 saved_reg = regs->regs[regno];
2210 regs->regs[regno] = dir;
2212 if (dir == PTRACE_SYSCALL_ENTER) {
2213 if (ptrace_report_syscall_entry(regs))
2214 forget_syscall(regs);
2215 regs->regs[regno] = saved_reg;
2216 } else if (!test_thread_flag(TIF_SINGLESTEP)) {
2217 ptrace_report_syscall_exit(regs, 0);
2218 regs->regs[regno] = saved_reg;
2220 regs->regs[regno] = saved_reg;
2223 * Signal a pseudo-step exception since we are stepping but
2224 * tracer modifications to the registers may have rewound the
2227 ptrace_report_syscall_exit(regs, 1);
2231 int syscall_trace_enter(struct pt_regs *regs)
2233 unsigned long flags = read_thread_flags();
2235 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2236 report_syscall(regs, PTRACE_SYSCALL_ENTER);
2237 if (flags & _TIF_SYSCALL_EMU)
2241 /* Do the secure computing after ptrace; failures should be fast. */
2242 if (secure_computing() == -1)
2245 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2246 trace_sys_enter(regs, regs->syscallno);
2248 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2249 regs->regs[2], regs->regs[3]);
2251 return regs->syscallno;
2254 void syscall_trace_exit(struct pt_regs *regs)
2256 unsigned long flags = read_thread_flags();
2258 audit_syscall_exit(regs);
2260 if (flags & _TIF_SYSCALL_TRACEPOINT)
2261 trace_sys_exit(regs, syscall_get_return_value(current, regs));
2263 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2264 report_syscall(regs, PTRACE_SYSCALL_EXIT);
2270 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2271 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2272 * not described in ARM DDI 0487D.a.
2273 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2274 * be allocated an EL0 meaning in future.
2275 * Userspace cannot use these until they have an architectural meaning.
2276 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2277 * We also reserve IL for the kernel; SS is handled dynamically.
2279 #define SPSR_EL1_AARCH64_RES0_BITS \
2280 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2281 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2282 #define SPSR_EL1_AARCH32_RES0_BITS \
2283 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2285 static int valid_compat_regs(struct user_pt_regs *regs)
2287 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2289 if (!system_supports_mixed_endian_el0()) {
2290 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2291 regs->pstate |= PSR_AA32_E_BIT;
2293 regs->pstate &= ~PSR_AA32_E_BIT;
2296 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2297 (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2298 (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2299 (regs->pstate & PSR_AA32_F_BIT) == 0) {
2304 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
2307 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2308 PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2309 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2310 PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2312 regs->pstate |= PSR_MODE32_BIT;
2317 static int valid_native_regs(struct user_pt_regs *regs)
2319 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2321 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2322 (regs->pstate & PSR_D_BIT) == 0 &&
2323 (regs->pstate & PSR_A_BIT) == 0 &&
2324 (regs->pstate & PSR_I_BIT) == 0 &&
2325 (regs->pstate & PSR_F_BIT) == 0) {
2329 /* Force PSR to a valid 64-bit EL0t */
2330 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2336 * Are the current registers suitable for user mode? (used to maintain
2337 * security in signal handlers)
2339 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2341 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
2342 user_regs_reset_single_step(regs, task);
2344 if (is_compat_thread(task_thread_info(task)))
2345 return valid_compat_regs(regs);
2347 return valid_native_regs(regs);