1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/ptrace.c
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/elf.h>
32 #include <asm/compat.h>
33 #include <asm/cpufeature.h>
34 #include <asm/debug-monitors.h>
35 #include <asm/fpsimd.h>
37 #include <asm/pointer_auth.h>
38 #include <asm/stacktrace.h>
39 #include <asm/syscall.h>
40 #include <asm/traps.h>
41 #include <asm/system_misc.h>
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/syscalls.h>
46 struct pt_regs_offset {
51 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
52 #define REG_OFFSET_END {.name = NULL, .offset = 0}
53 #define GPR_OFFSET_NAME(r) \
54 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
56 static const struct pt_regs_offset regoffset_table[] = {
88 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
91 REG_OFFSET_NAME(pstate),
96 * regs_query_register_offset() - query register offset from its name
97 * @name: the name of a register
99 * regs_query_register_offset() returns the offset of a register in struct
100 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
102 int regs_query_register_offset(const char *name)
104 const struct pt_regs_offset *roff;
106 for (roff = regoffset_table; roff->name != NULL; roff++)
107 if (!strcmp(roff->name, name))
113 * regs_within_kernel_stack() - check the address in the stack
114 * @regs: pt_regs which contains kernel stack pointer.
115 * @addr: address which is checked.
117 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
118 * If @addr is within the kernel stack, it returns true. If not, returns false.
120 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
122 return ((addr & ~(THREAD_SIZE - 1)) ==
123 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
124 on_irq_stack(addr, sizeof(unsigned long));
128 * regs_get_kernel_stack_nth() - get Nth entry of the stack
129 * @regs: pt_regs which contains kernel stack pointer.
130 * @n: stack entry number.
132 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
133 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
136 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
138 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
141 if (regs_within_kernel_stack(regs, (unsigned long)addr))
148 * TODO: does not yet catch signals sent when the child dies.
149 * in exit.c or in signal.c.
153 * Called by kernel/ptrace.c when detaching..
155 void ptrace_disable(struct task_struct *child)
158 * This would be better off in core code, but PTRACE_DETACH has
159 * grown its fair share of arch-specific worts and changing it
160 * is likely to cause regressions on obscure architectures.
162 user_disable_single_step(child);
165 #ifdef CONFIG_HAVE_HW_BREAKPOINT
167 * Handle hitting a HW-breakpoint.
169 static void ptrace_hbptriggered(struct perf_event *bp,
170 struct perf_sample_data *data,
171 struct pt_regs *regs)
173 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
174 const char *desc = "Hardware breakpoint trap (ptrace)";
177 if (is_compat_task()) {
181 for (i = 0; i < ARM_MAX_BRP; ++i) {
182 if (current->thread.debug.hbp_break[i] == bp) {
183 si_errno = (i << 1) + 1;
188 for (i = 0; i < ARM_MAX_WRP; ++i) {
189 if (current->thread.debug.hbp_watch[i] == bp) {
190 si_errno = -((i << 1) + 1);
194 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
199 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
203 * Unregister breakpoints from this task and reset the pointers in
206 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
209 struct thread_struct *t = &tsk->thread;
211 for (i = 0; i < ARM_MAX_BRP; i++) {
212 if (t->debug.hbp_break[i]) {
213 unregister_hw_breakpoint(t->debug.hbp_break[i]);
214 t->debug.hbp_break[i] = NULL;
218 for (i = 0; i < ARM_MAX_WRP; i++) {
219 if (t->debug.hbp_watch[i]) {
220 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
221 t->debug.hbp_watch[i] = NULL;
226 void ptrace_hw_copy_thread(struct task_struct *tsk)
228 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
231 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
232 struct task_struct *tsk,
235 struct perf_event *bp = ERR_PTR(-EINVAL);
238 case NT_ARM_HW_BREAK:
239 if (idx >= ARM_MAX_BRP)
241 idx = array_index_nospec(idx, ARM_MAX_BRP);
242 bp = tsk->thread.debug.hbp_break[idx];
244 case NT_ARM_HW_WATCH:
245 if (idx >= ARM_MAX_WRP)
247 idx = array_index_nospec(idx, ARM_MAX_WRP);
248 bp = tsk->thread.debug.hbp_watch[idx];
256 static int ptrace_hbp_set_event(unsigned int note_type,
257 struct task_struct *tsk,
259 struct perf_event *bp)
264 case NT_ARM_HW_BREAK:
265 if (idx >= ARM_MAX_BRP)
267 idx = array_index_nospec(idx, ARM_MAX_BRP);
268 tsk->thread.debug.hbp_break[idx] = bp;
271 case NT_ARM_HW_WATCH:
272 if (idx >= ARM_MAX_WRP)
274 idx = array_index_nospec(idx, ARM_MAX_WRP);
275 tsk->thread.debug.hbp_watch[idx] = bp;
284 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
285 struct task_struct *tsk,
288 struct perf_event *bp;
289 struct perf_event_attr attr;
293 case NT_ARM_HW_BREAK:
294 type = HW_BREAKPOINT_X;
296 case NT_ARM_HW_WATCH:
297 type = HW_BREAKPOINT_RW;
300 return ERR_PTR(-EINVAL);
303 ptrace_breakpoint_init(&attr);
306 * Initialise fields to sane defaults
307 * (i.e. values that will pass validation).
310 attr.bp_len = HW_BREAKPOINT_LEN_4;
314 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
318 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
325 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
326 struct arch_hw_breakpoint_ctrl ctrl,
327 struct perf_event_attr *attr)
329 int err, len, type, offset, disabled = !ctrl.enabled;
331 attr->disabled = disabled;
335 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
340 case NT_ARM_HW_BREAK:
341 if ((type & HW_BREAKPOINT_X) != type)
344 case NT_ARM_HW_WATCH:
345 if ((type & HW_BREAKPOINT_RW) != type)
353 attr->bp_type = type;
354 attr->bp_addr += offset;
359 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
365 case NT_ARM_HW_BREAK:
366 num = hw_breakpoint_slots(TYPE_INST);
368 case NT_ARM_HW_WATCH:
369 num = hw_breakpoint_slots(TYPE_DATA);
375 reg |= debug_monitors_arch();
383 static int ptrace_hbp_get_ctrl(unsigned int note_type,
384 struct task_struct *tsk,
388 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
393 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
397 static int ptrace_hbp_get_addr(unsigned int note_type,
398 struct task_struct *tsk,
402 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
407 *addr = bp ? counter_arch_bp(bp)->address : 0;
411 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
412 struct task_struct *tsk,
415 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
418 bp = ptrace_hbp_create(note_type, tsk, idx);
423 static int ptrace_hbp_set_ctrl(unsigned int note_type,
424 struct task_struct *tsk,
429 struct perf_event *bp;
430 struct perf_event_attr attr;
431 struct arch_hw_breakpoint_ctrl ctrl;
433 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
440 decode_ctrl_reg(uctrl, &ctrl);
441 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
445 return modify_user_hw_breakpoint(bp, &attr);
448 static int ptrace_hbp_set_addr(unsigned int note_type,
449 struct task_struct *tsk,
454 struct perf_event *bp;
455 struct perf_event_attr attr;
457 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
465 err = modify_user_hw_breakpoint(bp, &attr);
469 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
470 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
471 #define PTRACE_HBP_PAD_SZ sizeof(u32)
473 static int hw_break_get(struct task_struct *target,
474 const struct user_regset *regset,
477 unsigned int note_type = regset->core_note_type;
483 ret = ptrace_hbp_get_resource_info(note_type, &info);
487 membuf_write(&to, &info, sizeof(info));
488 membuf_zero(&to, sizeof(u32));
489 /* (address, ctrl) registers */
491 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
494 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
497 membuf_store(&to, addr);
498 membuf_store(&to, ctrl);
499 membuf_zero(&to, sizeof(u32));
505 static int hw_break_set(struct task_struct *target,
506 const struct user_regset *regset,
507 unsigned int pos, unsigned int count,
508 const void *kbuf, const void __user *ubuf)
510 unsigned int note_type = regset->core_note_type;
511 int ret, idx = 0, offset, limit;
515 /* Resource info and pad */
516 offset = offsetof(struct user_hwdebug_state, dbg_regs);
517 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
521 /* (address, ctrl) registers */
522 limit = regset->n * regset->size;
523 while (count && offset < limit) {
524 if (count < PTRACE_HBP_ADDR_SZ)
526 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
527 offset, offset + PTRACE_HBP_ADDR_SZ);
530 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
533 offset += PTRACE_HBP_ADDR_SZ;
537 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
538 offset, offset + PTRACE_HBP_CTRL_SZ);
541 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
544 offset += PTRACE_HBP_CTRL_SZ;
546 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
548 offset + PTRACE_HBP_PAD_SZ);
551 offset += PTRACE_HBP_PAD_SZ;
557 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
559 static int gpr_get(struct task_struct *target,
560 const struct user_regset *regset,
563 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
564 return membuf_write(&to, uregs, sizeof(*uregs));
567 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
568 unsigned int pos, unsigned int count,
569 const void *kbuf, const void __user *ubuf)
572 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
574 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
578 if (!valid_user_regs(&newregs, target))
581 task_pt_regs(target)->user_regs = newregs;
585 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
587 if (!system_supports_fpsimd())
593 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
595 static int __fpr_get(struct task_struct *target,
596 const struct user_regset *regset,
599 struct user_fpsimd_state *uregs;
601 sve_sync_to_fpsimd(target);
603 uregs = &target->thread.uw.fpsimd_state;
605 return membuf_write(&to, uregs, sizeof(*uregs));
608 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
611 if (!system_supports_fpsimd())
614 if (target == current)
615 fpsimd_preserve_current_state();
617 return __fpr_get(target, regset, to);
620 static int __fpr_set(struct task_struct *target,
621 const struct user_regset *regset,
622 unsigned int pos, unsigned int count,
623 const void *kbuf, const void __user *ubuf,
624 unsigned int start_pos)
627 struct user_fpsimd_state newstate;
630 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
631 * short copyin can't resurrect stale data.
633 sve_sync_to_fpsimd(target);
635 newstate = target->thread.uw.fpsimd_state;
637 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
638 start_pos, start_pos + sizeof(newstate));
642 target->thread.uw.fpsimd_state = newstate;
647 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
648 unsigned int pos, unsigned int count,
649 const void *kbuf, const void __user *ubuf)
653 if (!system_supports_fpsimd())
656 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
660 sve_sync_from_fpsimd_zeropad(target);
661 fpsimd_flush_task_state(target);
666 static int tls_get(struct task_struct *target, const struct user_regset *regset,
671 if (target == current)
672 tls_preserve_current_state();
674 ret = membuf_store(&to, target->thread.uw.tp_value);
675 if (system_supports_tpidr2())
676 ret = membuf_store(&to, target->thread.tpidr2_el0);
678 ret = membuf_zero(&to, sizeof(u64));
683 static int tls_set(struct task_struct *target, const struct user_regset *regset,
684 unsigned int pos, unsigned int count,
685 const void *kbuf, const void __user *ubuf)
688 unsigned long tls[2];
690 tls[0] = target->thread.uw.tp_value;
691 if (system_supports_sme())
692 tls[1] = target->thread.tpidr2_el0;
694 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
698 target->thread.uw.tp_value = tls[0];
699 if (system_supports_sme())
700 target->thread.tpidr2_el0 = tls[1];
705 static int system_call_get(struct task_struct *target,
706 const struct user_regset *regset,
709 return membuf_store(&to, task_pt_regs(target)->syscallno);
712 static int system_call_set(struct task_struct *target,
713 const struct user_regset *regset,
714 unsigned int pos, unsigned int count,
715 const void *kbuf, const void __user *ubuf)
717 int syscallno = task_pt_regs(target)->syscallno;
720 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
724 task_pt_regs(target)->syscallno = syscallno;
728 #ifdef CONFIG_ARM64_SVE
730 static void sve_init_header_from_task(struct user_sve_header *header,
731 struct task_struct *target,
737 enum vec_type task_type;
739 memset(header, 0, sizeof(*header));
741 /* Check if the requested registers are active for the task */
742 if (thread_sm_enabled(&target->thread))
743 task_type = ARM64_VEC_SME;
745 task_type = ARM64_VEC_SVE;
746 active = (task_type == type);
750 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
751 header->flags |= SVE_PT_VL_INHERIT;
752 fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
755 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
756 header->flags |= SVE_PT_VL_INHERIT;
766 header->flags |= SVE_PT_REGS_FPSIMD;
768 header->flags |= SVE_PT_REGS_SVE;
772 header->vl = task_get_vl(target, type);
773 vq = sve_vq_from_vl(header->vl);
775 header->max_vl = vec_max_vl(type);
776 header->size = SVE_PT_SIZE(vq, header->flags);
777 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
781 static unsigned int sve_size_from_header(struct user_sve_header const *header)
783 return ALIGN(header->size, SVE_VQ_BYTES);
786 static int sve_get_common(struct task_struct *target,
787 const struct user_regset *regset,
791 struct user_sve_header header;
793 unsigned long start, end;
796 sve_init_header_from_task(&header, target, type);
797 vq = sve_vq_from_vl(header.vl);
799 membuf_write(&to, &header, sizeof(header));
801 if (target == current)
802 fpsimd_preserve_current_state();
804 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
805 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
807 switch ((header.flags & SVE_PT_REGS_MASK)) {
808 case SVE_PT_REGS_FPSIMD:
809 return __fpr_get(target, regset, to);
811 case SVE_PT_REGS_SVE:
812 start = SVE_PT_SVE_OFFSET;
813 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
814 membuf_write(&to, target->thread.sve_state, end - start);
817 end = SVE_PT_SVE_FPSR_OFFSET(vq);
818 membuf_zero(&to, end - start);
821 * Copy fpsr, and fpcr which must follow contiguously in
822 * struct fpsimd_state:
825 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
826 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
830 end = sve_size_from_header(&header);
831 return membuf_zero(&to, end - start);
838 static int sve_get(struct task_struct *target,
839 const struct user_regset *regset,
842 if (!system_supports_sve())
845 return sve_get_common(target, regset, to, ARM64_VEC_SVE);
848 static int sve_set_common(struct task_struct *target,
849 const struct user_regset *regset,
850 unsigned int pos, unsigned int count,
851 const void *kbuf, const void __user *ubuf,
855 struct user_sve_header header;
857 unsigned long start, end;
860 if (count < sizeof(header))
862 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
868 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
869 * vec_set_vector_length(), which will also validate them for us:
871 ret = vec_set_vector_length(target, type, header.vl,
872 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
876 /* Actual VL set may be less than the user asked for: */
877 vq = sve_vq_from_vl(task_get_vl(target, type));
879 /* Enter/exit streaming mode */
880 if (system_supports_sme()) {
881 u64 old_svcr = target->thread.svcr;
885 target->thread.svcr &= ~SVCR_SM_MASK;
888 target->thread.svcr |= SVCR_SM_MASK;
891 * Disable traps and ensure there is SME storage but
892 * preserve any currently set values in ZA/ZT.
894 sme_alloc(target, false);
895 set_tsk_thread_flag(target, TIF_SME);
904 * If we switched then invalidate any existing SVE
905 * state and ensure there's storage.
907 if (target->thread.svcr != old_svcr)
908 sve_alloc(target, true);
911 /* Registers: FPSIMD-only case */
913 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
914 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
915 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
916 SVE_PT_FPSIMD_OFFSET);
917 clear_tsk_thread_flag(target, TIF_SVE);
918 if (type == ARM64_VEC_SME)
919 fpsimd_force_sync_to_sve(target);
924 * Otherwise: no registers or full SVE case. For backwards
925 * compatibility reasons we treat empty flags as SVE registers.
929 * If setting a different VL from the requested VL and there is
930 * register data, the data layout will be wrong: don't even
931 * try to set the registers in this case.
933 if (count && vq != sve_vq_from_vl(header.vl)) {
938 sve_alloc(target, true);
939 if (!target->thread.sve_state) {
941 clear_tsk_thread_flag(target, TIF_SVE);
946 * Ensure target->thread.sve_state is up to date with target's
947 * FPSIMD regs, so that a short copyin leaves trailing
948 * registers unmodified. Only enable SVE if we are
949 * configuring normal SVE, a system with streaming SVE may not
952 fpsimd_sync_to_sve(target);
953 if (type == ARM64_VEC_SVE)
954 set_tsk_thread_flag(target, TIF_SVE);
956 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
957 start = SVE_PT_SVE_OFFSET;
958 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
959 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
960 target->thread.sve_state,
966 end = SVE_PT_SVE_FPSR_OFFSET(vq);
967 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
973 * Copy fpsr, and fpcr which must follow contiguously in
974 * struct fpsimd_state:
977 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
978 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
979 &target->thread.uw.fpsimd_state.fpsr,
983 fpsimd_flush_task_state(target);
987 static int sve_set(struct task_struct *target,
988 const struct user_regset *regset,
989 unsigned int pos, unsigned int count,
990 const void *kbuf, const void __user *ubuf)
992 if (!system_supports_sve())
995 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
999 #endif /* CONFIG_ARM64_SVE */
1001 #ifdef CONFIG_ARM64_SME
1003 static int ssve_get(struct task_struct *target,
1004 const struct user_regset *regset,
1007 if (!system_supports_sme())
1010 return sve_get_common(target, regset, to, ARM64_VEC_SME);
1013 static int ssve_set(struct task_struct *target,
1014 const struct user_regset *regset,
1015 unsigned int pos, unsigned int count,
1016 const void *kbuf, const void __user *ubuf)
1018 if (!system_supports_sme())
1021 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1025 static int za_get(struct task_struct *target,
1026 const struct user_regset *regset,
1029 struct user_za_header header;
1031 unsigned long start, end;
1033 if (!system_supports_sme())
1037 memset(&header, 0, sizeof(header));
1039 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1040 header.flags |= ZA_PT_VL_INHERIT;
1042 header.vl = task_get_sme_vl(target);
1043 vq = sve_vq_from_vl(header.vl);
1044 header.max_vl = sme_max_vl();
1045 header.max_size = ZA_PT_SIZE(vq);
1047 /* If ZA is not active there is only the header */
1048 if (thread_za_enabled(&target->thread))
1049 header.size = ZA_PT_SIZE(vq);
1051 header.size = ZA_PT_ZA_OFFSET;
1053 membuf_write(&to, &header, sizeof(header));
1055 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1056 end = ZA_PT_ZA_OFFSET;
1058 if (target == current)
1059 fpsimd_preserve_current_state();
1061 /* Any register data to include? */
1062 if (thread_za_enabled(&target->thread)) {
1064 end = ZA_PT_SIZE(vq);
1065 membuf_write(&to, target->thread.za_state, end - start);
1068 /* Zero any trailing padding */
1070 end = ALIGN(header.size, SVE_VQ_BYTES);
1071 return membuf_zero(&to, end - start);
1074 static int za_set(struct task_struct *target,
1075 const struct user_regset *regset,
1076 unsigned int pos, unsigned int count,
1077 const void *kbuf, const void __user *ubuf)
1080 struct user_za_header header;
1082 unsigned long start, end;
1084 if (!system_supports_sme())
1088 if (count < sizeof(header))
1090 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1096 * All current ZA_PT_* flags are consumed by
1097 * vec_set_vector_length(), which will also validate them for
1100 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1101 ((unsigned long)header.flags) << 16);
1105 /* Actual VL set may be less than the user asked for: */
1106 vq = sve_vq_from_vl(task_get_sme_vl(target));
1108 /* Ensure there is some SVE storage for streaming mode */
1109 if (!target->thread.sve_state) {
1110 sve_alloc(target, false);
1111 if (!target->thread.sve_state) {
1117 /* Allocate/reinit ZA storage */
1118 sme_alloc(target, true);
1119 if (!target->thread.za_state) {
1124 /* If there is no data then disable ZA */
1126 target->thread.svcr &= ~SVCR_ZA_MASK;
1131 * If setting a different VL from the requested VL and there is
1132 * register data, the data layout will be wrong: don't even
1133 * try to set the registers in this case.
1135 if (vq != sve_vq_from_vl(header.vl)) {
1140 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1141 start = ZA_PT_ZA_OFFSET;
1142 end = ZA_PT_SIZE(vq);
1143 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1144 target->thread.za_state,
1149 /* Mark ZA as active and let userspace use it */
1150 set_tsk_thread_flag(target, TIF_SME);
1151 target->thread.svcr |= SVCR_ZA_MASK;
1154 fpsimd_flush_task_state(target);
1158 #endif /* CONFIG_ARM64_SME */
1160 #ifdef CONFIG_ARM64_PTR_AUTH
1161 static int pac_mask_get(struct task_struct *target,
1162 const struct user_regset *regset,
1166 * The PAC bits can differ across data and instruction pointers
1167 * depending on TCR_EL1.TBID*, which we may make use of in future, so
1168 * we expose separate masks.
1170 unsigned long mask = ptrauth_user_pac_mask();
1171 struct user_pac_mask uregs = {
1176 if (!system_supports_address_auth())
1179 return membuf_write(&to, &uregs, sizeof(uregs));
1182 static int pac_enabled_keys_get(struct task_struct *target,
1183 const struct user_regset *regset,
1186 long enabled_keys = ptrauth_get_enabled_keys(target);
1188 if (IS_ERR_VALUE(enabled_keys))
1189 return enabled_keys;
1191 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1194 static int pac_enabled_keys_set(struct task_struct *target,
1195 const struct user_regset *regset,
1196 unsigned int pos, unsigned int count,
1197 const void *kbuf, const void __user *ubuf)
1200 long enabled_keys = ptrauth_get_enabled_keys(target);
1202 if (IS_ERR_VALUE(enabled_keys))
1203 return enabled_keys;
1205 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1210 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1214 #ifdef CONFIG_CHECKPOINT_RESTORE
1215 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1217 return (__uint128_t)key->hi << 64 | key->lo;
1220 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1222 struct ptrauth_key key = {
1223 .lo = (unsigned long)ukey,
1224 .hi = (unsigned long)(ukey >> 64),
1230 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1231 const struct ptrauth_keys_user *keys)
1233 ukeys->apiakey = pac_key_to_user(&keys->apia);
1234 ukeys->apibkey = pac_key_to_user(&keys->apib);
1235 ukeys->apdakey = pac_key_to_user(&keys->apda);
1236 ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1239 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1240 const struct user_pac_address_keys *ukeys)
1242 keys->apia = pac_key_from_user(ukeys->apiakey);
1243 keys->apib = pac_key_from_user(ukeys->apibkey);
1244 keys->apda = pac_key_from_user(ukeys->apdakey);
1245 keys->apdb = pac_key_from_user(ukeys->apdbkey);
1248 static int pac_address_keys_get(struct task_struct *target,
1249 const struct user_regset *regset,
1252 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1253 struct user_pac_address_keys user_keys;
1255 if (!system_supports_address_auth())
1258 pac_address_keys_to_user(&user_keys, keys);
1260 return membuf_write(&to, &user_keys, sizeof(user_keys));
1263 static int pac_address_keys_set(struct task_struct *target,
1264 const struct user_regset *regset,
1265 unsigned int pos, unsigned int count,
1266 const void *kbuf, const void __user *ubuf)
1268 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1269 struct user_pac_address_keys user_keys;
1272 if (!system_supports_address_auth())
1275 pac_address_keys_to_user(&user_keys, keys);
1276 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1280 pac_address_keys_from_user(keys, &user_keys);
1285 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1286 const struct ptrauth_keys_user *keys)
1288 ukeys->apgakey = pac_key_to_user(&keys->apga);
1291 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1292 const struct user_pac_generic_keys *ukeys)
1294 keys->apga = pac_key_from_user(ukeys->apgakey);
1297 static int pac_generic_keys_get(struct task_struct *target,
1298 const struct user_regset *regset,
1301 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1302 struct user_pac_generic_keys user_keys;
1304 if (!system_supports_generic_auth())
1307 pac_generic_keys_to_user(&user_keys, keys);
1309 return membuf_write(&to, &user_keys, sizeof(user_keys));
1312 static int pac_generic_keys_set(struct task_struct *target,
1313 const struct user_regset *regset,
1314 unsigned int pos, unsigned int count,
1315 const void *kbuf, const void __user *ubuf)
1317 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1318 struct user_pac_generic_keys user_keys;
1321 if (!system_supports_generic_auth())
1324 pac_generic_keys_to_user(&user_keys, keys);
1325 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1329 pac_generic_keys_from_user(keys, &user_keys);
1333 #endif /* CONFIG_CHECKPOINT_RESTORE */
1334 #endif /* CONFIG_ARM64_PTR_AUTH */
1336 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1337 static int tagged_addr_ctrl_get(struct task_struct *target,
1338 const struct user_regset *regset,
1341 long ctrl = get_tagged_addr_ctrl(target);
1343 if (IS_ERR_VALUE(ctrl))
1346 return membuf_write(&to, &ctrl, sizeof(ctrl));
1349 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1350 user_regset *regset, unsigned int pos,
1351 unsigned int count, const void *kbuf, const
1357 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1361 return set_tagged_addr_ctrl(target, ctrl);
1365 enum aarch64_regset {
1369 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1374 #ifdef CONFIG_ARM64_SVE
1377 #ifdef CONFIG_ARM64_SME
1381 #ifdef CONFIG_ARM64_PTR_AUTH
1383 REGSET_PAC_ENABLED_KEYS,
1384 #ifdef CONFIG_CHECKPOINT_RESTORE
1389 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1390 REGSET_TAGGED_ADDR_CTRL,
1394 static const struct user_regset aarch64_regsets[] = {
1396 .core_note_type = NT_PRSTATUS,
1397 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1398 .size = sizeof(u64),
1399 .align = sizeof(u64),
1400 .regset_get = gpr_get,
1404 .core_note_type = NT_PRFPREG,
1405 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1407 * We pretend we have 32-bit registers because the fpsr and
1408 * fpcr are 32-bits wide.
1410 .size = sizeof(u32),
1411 .align = sizeof(u32),
1412 .active = fpr_active,
1413 .regset_get = fpr_get,
1417 .core_note_type = NT_ARM_TLS,
1419 .size = sizeof(void *),
1420 .align = sizeof(void *),
1421 .regset_get = tls_get,
1424 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1425 [REGSET_HW_BREAK] = {
1426 .core_note_type = NT_ARM_HW_BREAK,
1427 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1428 .size = sizeof(u32),
1429 .align = sizeof(u32),
1430 .regset_get = hw_break_get,
1431 .set = hw_break_set,
1433 [REGSET_HW_WATCH] = {
1434 .core_note_type = NT_ARM_HW_WATCH,
1435 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1436 .size = sizeof(u32),
1437 .align = sizeof(u32),
1438 .regset_get = hw_break_get,
1439 .set = hw_break_set,
1442 [REGSET_SYSTEM_CALL] = {
1443 .core_note_type = NT_ARM_SYSTEM_CALL,
1445 .size = sizeof(int),
1446 .align = sizeof(int),
1447 .regset_get = system_call_get,
1448 .set = system_call_set,
1450 #ifdef CONFIG_ARM64_SVE
1451 [REGSET_SVE] = { /* Scalable Vector Extension */
1452 .core_note_type = NT_ARM_SVE,
1453 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
1456 .size = SVE_VQ_BYTES,
1457 .align = SVE_VQ_BYTES,
1458 .regset_get = sve_get,
1462 #ifdef CONFIG_ARM64_SME
1463 [REGSET_SSVE] = { /* Streaming mode SVE */
1464 .core_note_type = NT_ARM_SSVE,
1465 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
1467 .size = SVE_VQ_BYTES,
1468 .align = SVE_VQ_BYTES,
1469 .regset_get = ssve_get,
1472 [REGSET_ZA] = { /* SME ZA */
1473 .core_note_type = NT_ARM_ZA,
1475 * ZA is a single register but it's variably sized and
1476 * the ptrace core requires that the size of any data
1477 * be an exact multiple of the configured register
1478 * size so report as though we had SVE_VQ_BYTES
1479 * registers. These values aren't exposed to
1482 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
1483 .size = SVE_VQ_BYTES,
1484 .align = SVE_VQ_BYTES,
1485 .regset_get = za_get,
1489 #ifdef CONFIG_ARM64_PTR_AUTH
1490 [REGSET_PAC_MASK] = {
1491 .core_note_type = NT_ARM_PAC_MASK,
1492 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1493 .size = sizeof(u64),
1494 .align = sizeof(u64),
1495 .regset_get = pac_mask_get,
1496 /* this cannot be set dynamically */
1498 [REGSET_PAC_ENABLED_KEYS] = {
1499 .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1501 .size = sizeof(long),
1502 .align = sizeof(long),
1503 .regset_get = pac_enabled_keys_get,
1504 .set = pac_enabled_keys_set,
1506 #ifdef CONFIG_CHECKPOINT_RESTORE
1507 [REGSET_PACA_KEYS] = {
1508 .core_note_type = NT_ARM_PACA_KEYS,
1509 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1510 .size = sizeof(__uint128_t),
1511 .align = sizeof(__uint128_t),
1512 .regset_get = pac_address_keys_get,
1513 .set = pac_address_keys_set,
1515 [REGSET_PACG_KEYS] = {
1516 .core_note_type = NT_ARM_PACG_KEYS,
1517 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1518 .size = sizeof(__uint128_t),
1519 .align = sizeof(__uint128_t),
1520 .regset_get = pac_generic_keys_get,
1521 .set = pac_generic_keys_set,
1525 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1526 [REGSET_TAGGED_ADDR_CTRL] = {
1527 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1529 .size = sizeof(long),
1530 .align = sizeof(long),
1531 .regset_get = tagged_addr_ctrl_get,
1532 .set = tagged_addr_ctrl_set,
1537 static const struct user_regset_view user_aarch64_view = {
1538 .name = "aarch64", .e_machine = EM_AARCH64,
1539 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1542 #ifdef CONFIG_COMPAT
1543 enum compat_regset {
1548 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1550 struct pt_regs *regs = task_pt_regs(task);
1556 return pstate_to_compat_psr(regs->pstate);
1558 return regs->orig_x0;
1560 return regs->regs[idx];
1564 static int compat_gpr_get(struct task_struct *target,
1565 const struct user_regset *regset,
1571 membuf_store(&to, compat_get_user_reg(target, i++));
1575 static int compat_gpr_set(struct task_struct *target,
1576 const struct user_regset *regset,
1577 unsigned int pos, unsigned int count,
1578 const void *kbuf, const void __user *ubuf)
1580 struct pt_regs newregs;
1582 unsigned int i, start, num_regs;
1584 /* Calculate the number of AArch32 registers contained in count */
1585 num_regs = count / regset->size;
1587 /* Convert pos into an register number */
1588 start = pos / regset->size;
1590 if (start + num_regs > regset->n)
1593 newregs = *task_pt_regs(target);
1595 for (i = 0; i < num_regs; ++i) {
1596 unsigned int idx = start + i;
1600 memcpy(®, kbuf, sizeof(reg));
1601 kbuf += sizeof(reg);
1603 ret = copy_from_user(®, ubuf, sizeof(reg));
1609 ubuf += sizeof(reg);
1617 reg = compat_psr_to_pstate(reg);
1618 newregs.pstate = reg;
1621 newregs.orig_x0 = reg;
1624 newregs.regs[idx] = reg;
1629 if (valid_user_regs(&newregs.user_regs, target))
1630 *task_pt_regs(target) = newregs;
1637 static int compat_vfp_get(struct task_struct *target,
1638 const struct user_regset *regset,
1641 struct user_fpsimd_state *uregs;
1642 compat_ulong_t fpscr;
1644 if (!system_supports_fpsimd())
1647 uregs = &target->thread.uw.fpsimd_state;
1649 if (target == current)
1650 fpsimd_preserve_current_state();
1653 * The VFP registers are packed into the fpsimd_state, so they all sit
1654 * nicely together for us. We just need to create the fpscr separately.
1656 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1657 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1658 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1659 return membuf_store(&to, fpscr);
1662 static int compat_vfp_set(struct task_struct *target,
1663 const struct user_regset *regset,
1664 unsigned int pos, unsigned int count,
1665 const void *kbuf, const void __user *ubuf)
1667 struct user_fpsimd_state *uregs;
1668 compat_ulong_t fpscr;
1669 int ret, vregs_end_pos;
1671 if (!system_supports_fpsimd())
1674 uregs = &target->thread.uw.fpsimd_state;
1676 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1677 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1680 if (count && !ret) {
1681 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1682 vregs_end_pos, VFP_STATE_SIZE);
1684 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1685 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1689 fpsimd_flush_task_state(target);
1693 static int compat_tls_get(struct task_struct *target,
1694 const struct user_regset *regset,
1697 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1700 static int compat_tls_set(struct task_struct *target,
1701 const struct user_regset *regset, unsigned int pos,
1702 unsigned int count, const void *kbuf,
1703 const void __user *ubuf)
1706 compat_ulong_t tls = target->thread.uw.tp_value;
1708 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1712 target->thread.uw.tp_value = tls;
1716 static const struct user_regset aarch32_regsets[] = {
1717 [REGSET_COMPAT_GPR] = {
1718 .core_note_type = NT_PRSTATUS,
1719 .n = COMPAT_ELF_NGREG,
1720 .size = sizeof(compat_elf_greg_t),
1721 .align = sizeof(compat_elf_greg_t),
1722 .regset_get = compat_gpr_get,
1723 .set = compat_gpr_set
1725 [REGSET_COMPAT_VFP] = {
1726 .core_note_type = NT_ARM_VFP,
1727 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1728 .size = sizeof(compat_ulong_t),
1729 .align = sizeof(compat_ulong_t),
1730 .active = fpr_active,
1731 .regset_get = compat_vfp_get,
1732 .set = compat_vfp_set
1736 static const struct user_regset_view user_aarch32_view = {
1737 .name = "aarch32", .e_machine = EM_ARM,
1738 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1741 static const struct user_regset aarch32_ptrace_regsets[] = {
1743 .core_note_type = NT_PRSTATUS,
1744 .n = COMPAT_ELF_NGREG,
1745 .size = sizeof(compat_elf_greg_t),
1746 .align = sizeof(compat_elf_greg_t),
1747 .regset_get = compat_gpr_get,
1748 .set = compat_gpr_set
1751 .core_note_type = NT_ARM_VFP,
1752 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1753 .size = sizeof(compat_ulong_t),
1754 .align = sizeof(compat_ulong_t),
1755 .regset_get = compat_vfp_get,
1756 .set = compat_vfp_set
1759 .core_note_type = NT_ARM_TLS,
1761 .size = sizeof(compat_ulong_t),
1762 .align = sizeof(compat_ulong_t),
1763 .regset_get = compat_tls_get,
1764 .set = compat_tls_set,
1766 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1767 [REGSET_HW_BREAK] = {
1768 .core_note_type = NT_ARM_HW_BREAK,
1769 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1770 .size = sizeof(u32),
1771 .align = sizeof(u32),
1772 .regset_get = hw_break_get,
1773 .set = hw_break_set,
1775 [REGSET_HW_WATCH] = {
1776 .core_note_type = NT_ARM_HW_WATCH,
1777 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1778 .size = sizeof(u32),
1779 .align = sizeof(u32),
1780 .regset_get = hw_break_get,
1781 .set = hw_break_set,
1784 [REGSET_SYSTEM_CALL] = {
1785 .core_note_type = NT_ARM_SYSTEM_CALL,
1787 .size = sizeof(int),
1788 .align = sizeof(int),
1789 .regset_get = system_call_get,
1790 .set = system_call_set,
1794 static const struct user_regset_view user_aarch32_ptrace_view = {
1795 .name = "aarch32", .e_machine = EM_ARM,
1796 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1799 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1800 compat_ulong_t __user *ret)
1807 if (off == COMPAT_PT_TEXT_ADDR)
1808 tmp = tsk->mm->start_code;
1809 else if (off == COMPAT_PT_DATA_ADDR)
1810 tmp = tsk->mm->start_data;
1811 else if (off == COMPAT_PT_TEXT_END_ADDR)
1812 tmp = tsk->mm->end_code;
1813 else if (off < sizeof(compat_elf_gregset_t))
1814 tmp = compat_get_user_reg(tsk, off >> 2);
1815 else if (off >= COMPAT_USER_SZ)
1820 return put_user(tmp, ret);
1823 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1826 struct pt_regs newregs = *task_pt_regs(tsk);
1827 unsigned int idx = off / 4;
1829 if (off & 3 || off >= COMPAT_USER_SZ)
1832 if (off >= sizeof(compat_elf_gregset_t))
1840 newregs.pstate = compat_psr_to_pstate(val);
1843 newregs.orig_x0 = val;
1846 newregs.regs[idx] = val;
1849 if (!valid_user_regs(&newregs.user_regs, tsk))
1852 *task_pt_regs(tsk) = newregs;
1856 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1859 * Convert a virtual register number into an index for a thread_info
1860 * breakpoint array. Breakpoints are identified using positive numbers
1861 * whilst watchpoints are negative. The registers are laid out as pairs
1862 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1863 * Register 0 is reserved for describing resource information.
1865 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1867 return (abs(num) - 1) >> 1;
1870 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1872 u8 num_brps, num_wrps, debug_arch, wp_len;
1875 num_brps = hw_breakpoint_slots(TYPE_INST);
1876 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1878 debug_arch = debug_monitors_arch();
1892 static int compat_ptrace_hbp_get(unsigned int note_type,
1893 struct task_struct *tsk,
1900 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1903 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1906 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1913 static int compat_ptrace_hbp_set(unsigned int note_type,
1914 struct task_struct *tsk,
1921 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1925 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1928 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1934 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1935 compat_ulong_t __user *data)
1942 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1944 } else if (num == 0) {
1945 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1948 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1952 ret = put_user(kdata, data);
1957 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1958 compat_ulong_t __user *data)
1966 ret = get_user(kdata, data);
1971 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1973 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1977 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1979 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1980 compat_ulong_t caddr, compat_ulong_t cdata)
1982 unsigned long addr = caddr;
1983 unsigned long data = cdata;
1984 void __user *datap = compat_ptr(data);
1988 case PTRACE_PEEKUSR:
1989 ret = compat_ptrace_read_user(child, addr, datap);
1992 case PTRACE_POKEUSR:
1993 ret = compat_ptrace_write_user(child, addr, data);
1996 case COMPAT_PTRACE_GETREGS:
1997 ret = copy_regset_to_user(child,
2000 0, sizeof(compat_elf_gregset_t),
2004 case COMPAT_PTRACE_SETREGS:
2005 ret = copy_regset_from_user(child,
2008 0, sizeof(compat_elf_gregset_t),
2012 case COMPAT_PTRACE_GET_THREAD_AREA:
2013 ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
2014 (compat_ulong_t __user *)datap);
2017 case COMPAT_PTRACE_SET_SYSCALL:
2018 task_pt_regs(child)->syscallno = data;
2022 case COMPAT_PTRACE_GETVFPREGS:
2023 ret = copy_regset_to_user(child,
2030 case COMPAT_PTRACE_SETVFPREGS:
2031 ret = copy_regset_from_user(child,
2038 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2039 case COMPAT_PTRACE_GETHBPREGS:
2040 ret = compat_ptrace_gethbpregs(child, addr, datap);
2043 case COMPAT_PTRACE_SETHBPREGS:
2044 ret = compat_ptrace_sethbpregs(child, addr, datap);
2049 ret = compat_ptrace_request(child, request, addr,
2056 #endif /* CONFIG_COMPAT */
2058 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2060 #ifdef CONFIG_COMPAT
2062 * Core dumping of 32-bit tasks or compat ptrace requests must use the
2063 * user_aarch32_view compatible with arm32. Native ptrace requests on
2064 * 32-bit children use an extended user_aarch32_ptrace_view to allow
2065 * access to the TLS register.
2067 if (is_compat_task())
2068 return &user_aarch32_view;
2069 else if (is_compat_thread(task_thread_info(task)))
2070 return &user_aarch32_ptrace_view;
2072 return &user_aarch64_view;
2075 long arch_ptrace(struct task_struct *child, long request,
2076 unsigned long addr, unsigned long data)
2079 case PTRACE_PEEKMTETAGS:
2080 case PTRACE_POKEMTETAGS:
2081 return mte_ptrace_copy_tags(child, request, addr, data);
2084 return ptrace_request(child, request, addr, data);
2087 enum ptrace_syscall_dir {
2088 PTRACE_SYSCALL_ENTER = 0,
2089 PTRACE_SYSCALL_EXIT,
2092 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
2095 unsigned long saved_reg;
2098 * We have some ABI weirdness here in the way that we handle syscall
2099 * exit stops because we indicate whether or not the stop has been
2100 * signalled from syscall entry or syscall exit by clobbering a general
2101 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
2102 * and restoring its old value after the stop. This means that:
2104 * - Any writes by the tracer to this register during the stop are
2105 * ignored/discarded.
2107 * - The actual value of the register is not available during the stop,
2108 * so the tracer cannot save it and restore it later.
2110 * - Syscall stops behave differently to seccomp and pseudo-step traps
2111 * (the latter do not nobble any registers).
2113 regno = (is_compat_task() ? 12 : 7);
2114 saved_reg = regs->regs[regno];
2115 regs->regs[regno] = dir;
2117 if (dir == PTRACE_SYSCALL_ENTER) {
2118 if (ptrace_report_syscall_entry(regs))
2119 forget_syscall(regs);
2120 regs->regs[regno] = saved_reg;
2121 } else if (!test_thread_flag(TIF_SINGLESTEP)) {
2122 ptrace_report_syscall_exit(regs, 0);
2123 regs->regs[regno] = saved_reg;
2125 regs->regs[regno] = saved_reg;
2128 * Signal a pseudo-step exception since we are stepping but
2129 * tracer modifications to the registers may have rewound the
2132 ptrace_report_syscall_exit(regs, 1);
2136 int syscall_trace_enter(struct pt_regs *regs)
2138 unsigned long flags = read_thread_flags();
2140 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2141 report_syscall(regs, PTRACE_SYSCALL_ENTER);
2142 if (flags & _TIF_SYSCALL_EMU)
2146 /* Do the secure computing after ptrace; failures should be fast. */
2147 if (secure_computing() == -1)
2150 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2151 trace_sys_enter(regs, regs->syscallno);
2153 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2154 regs->regs[2], regs->regs[3]);
2156 return regs->syscallno;
2159 void syscall_trace_exit(struct pt_regs *regs)
2161 unsigned long flags = read_thread_flags();
2163 audit_syscall_exit(regs);
2165 if (flags & _TIF_SYSCALL_TRACEPOINT)
2166 trace_sys_exit(regs, syscall_get_return_value(current, regs));
2168 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2169 report_syscall(regs, PTRACE_SYSCALL_EXIT);
2175 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2176 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2177 * not described in ARM DDI 0487D.a.
2178 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2179 * be allocated an EL0 meaning in future.
2180 * Userspace cannot use these until they have an architectural meaning.
2181 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2182 * We also reserve IL for the kernel; SS is handled dynamically.
2184 #define SPSR_EL1_AARCH64_RES0_BITS \
2185 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2186 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2187 #define SPSR_EL1_AARCH32_RES0_BITS \
2188 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2190 static int valid_compat_regs(struct user_pt_regs *regs)
2192 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2194 if (!system_supports_mixed_endian_el0()) {
2195 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2196 regs->pstate |= PSR_AA32_E_BIT;
2198 regs->pstate &= ~PSR_AA32_E_BIT;
2201 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2202 (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2203 (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2204 (regs->pstate & PSR_AA32_F_BIT) == 0) {
2209 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
2212 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2213 PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2214 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2215 PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2217 regs->pstate |= PSR_MODE32_BIT;
2222 static int valid_native_regs(struct user_pt_regs *regs)
2224 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2226 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2227 (regs->pstate & PSR_D_BIT) == 0 &&
2228 (regs->pstate & PSR_A_BIT) == 0 &&
2229 (regs->pstate & PSR_I_BIT) == 0 &&
2230 (regs->pstate & PSR_F_BIT) == 0) {
2234 /* Force PSR to a valid 64-bit EL0t */
2235 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2241 * Are the current registers suitable for user mode? (used to maintain
2242 * security in signal handlers)
2244 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2246 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
2247 user_regs_reset_single_step(regs, task);
2249 if (is_compat_thread(task_thread_info(task)))
2250 return valid_compat_regs(regs);
2252 return valid_native_regs(regs);