2 * Based on arch/arm/kernel/ptrace.c
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/task_stack.h>
28 #include <linux/nospec.h>
29 #include <linux/smp.h>
30 #include <linux/ptrace.h>
31 #include <linux/user.h>
32 #include <linux/seccomp.h>
33 #include <linux/security.h>
34 #include <linux/init.h>
35 #include <linux/signal.h>
36 #include <linux/string.h>
37 #include <linux/uaccess.h>
38 #include <linux/perf_event.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/regset.h>
41 #include <linux/tracehook.h>
42 #include <linux/elf.h>
44 #include <asm/compat.h>
45 #include <asm/cpufeature.h>
46 #include <asm/debug-monitors.h>
47 #include <asm/fpsimd.h>
48 #include <asm/pgtable.h>
49 #include <asm/stacktrace.h>
50 #include <asm/syscall.h>
51 #include <asm/traps.h>
52 #include <asm/system_misc.h>
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/syscalls.h>
57 struct pt_regs_offset {
62 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
63 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64 #define GPR_OFFSET_NAME(r) \
65 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
67 static const struct pt_regs_offset regoffset_table[] = {
99 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
102 REG_OFFSET_NAME(pstate),
107 * regs_query_register_offset() - query register offset from its name
108 * @name: the name of a register
110 * regs_query_register_offset() returns the offset of a register in struct
111 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
113 int regs_query_register_offset(const char *name)
115 const struct pt_regs_offset *roff;
117 for (roff = regoffset_table; roff->name != NULL; roff++)
118 if (!strcmp(roff->name, name))
124 * regs_within_kernel_stack() - check the address in the stack
125 * @regs: pt_regs which contains kernel stack pointer.
126 * @addr: address which is checked.
128 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
129 * If @addr is within the kernel stack, it returns true. If not, returns false.
131 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
133 return ((addr & ~(THREAD_SIZE - 1)) ==
134 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
135 on_irq_stack(addr, NULL);
139 * regs_get_kernel_stack_nth() - get Nth entry of the stack
140 * @regs: pt_regs which contains kernel stack pointer.
141 * @n: stack entry number.
143 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
144 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
147 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
149 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
152 if (regs_within_kernel_stack(regs, (unsigned long)addr))
159 * TODO: does not yet catch signals sent when the child dies.
160 * in exit.c or in signal.c.
164 * Called by kernel/ptrace.c when detaching..
166 void ptrace_disable(struct task_struct *child)
169 * This would be better off in core code, but PTRACE_DETACH has
170 * grown its fair share of arch-specific worts and changing it
171 * is likely to cause regressions on obscure architectures.
173 user_disable_single_step(child);
176 #ifdef CONFIG_HAVE_HW_BREAKPOINT
178 * Handle hitting a HW-breakpoint.
180 static void ptrace_hbptriggered(struct perf_event *bp,
181 struct perf_sample_data *data,
182 struct pt_regs *regs)
184 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
187 clear_siginfo(&info);
188 info.si_signo = SIGTRAP;
190 info.si_code = TRAP_HWBKPT;
191 info.si_addr = (void __user *)(bkpt->trigger);
194 if (is_compat_task()) {
198 for (i = 0; i < ARM_MAX_BRP; ++i) {
199 if (current->thread.debug.hbp_break[i] == bp) {
200 si_errno = (i << 1) + 1;
205 for (i = 0; i < ARM_MAX_WRP; ++i) {
206 if (current->thread.debug.hbp_watch[i] == bp) {
207 si_errno = -((i << 1) + 1);
211 force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger);
214 arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current);
218 * Unregister breakpoints from this task and reset the pointers in
221 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
224 struct thread_struct *t = &tsk->thread;
226 for (i = 0; i < ARM_MAX_BRP; i++) {
227 if (t->debug.hbp_break[i]) {
228 unregister_hw_breakpoint(t->debug.hbp_break[i]);
229 t->debug.hbp_break[i] = NULL;
233 for (i = 0; i < ARM_MAX_WRP; i++) {
234 if (t->debug.hbp_watch[i]) {
235 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
236 t->debug.hbp_watch[i] = NULL;
241 void ptrace_hw_copy_thread(struct task_struct *tsk)
243 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
246 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
247 struct task_struct *tsk,
250 struct perf_event *bp = ERR_PTR(-EINVAL);
253 case NT_ARM_HW_BREAK:
254 if (idx >= ARM_MAX_BRP)
256 idx = array_index_nospec(idx, ARM_MAX_BRP);
257 bp = tsk->thread.debug.hbp_break[idx];
259 case NT_ARM_HW_WATCH:
260 if (idx >= ARM_MAX_WRP)
262 idx = array_index_nospec(idx, ARM_MAX_WRP);
263 bp = tsk->thread.debug.hbp_watch[idx];
271 static int ptrace_hbp_set_event(unsigned int note_type,
272 struct task_struct *tsk,
274 struct perf_event *bp)
279 case NT_ARM_HW_BREAK:
280 if (idx >= ARM_MAX_BRP)
282 idx = array_index_nospec(idx, ARM_MAX_BRP);
283 tsk->thread.debug.hbp_break[idx] = bp;
286 case NT_ARM_HW_WATCH:
287 if (idx >= ARM_MAX_WRP)
289 idx = array_index_nospec(idx, ARM_MAX_WRP);
290 tsk->thread.debug.hbp_watch[idx] = bp;
299 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
300 struct task_struct *tsk,
303 struct perf_event *bp;
304 struct perf_event_attr attr;
308 case NT_ARM_HW_BREAK:
309 type = HW_BREAKPOINT_X;
311 case NT_ARM_HW_WATCH:
312 type = HW_BREAKPOINT_RW;
315 return ERR_PTR(-EINVAL);
318 ptrace_breakpoint_init(&attr);
321 * Initialise fields to sane defaults
322 * (i.e. values that will pass validation).
325 attr.bp_len = HW_BREAKPOINT_LEN_4;
329 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
333 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
340 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
341 struct arch_hw_breakpoint_ctrl ctrl,
342 struct perf_event_attr *attr)
344 int err, len, type, offset, disabled = !ctrl.enabled;
346 attr->disabled = disabled;
350 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
355 case NT_ARM_HW_BREAK:
356 if ((type & HW_BREAKPOINT_X) != type)
359 case NT_ARM_HW_WATCH:
360 if ((type & HW_BREAKPOINT_RW) != type)
368 attr->bp_type = type;
369 attr->bp_addr += offset;
374 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
380 case NT_ARM_HW_BREAK:
381 num = hw_breakpoint_slots(TYPE_INST);
383 case NT_ARM_HW_WATCH:
384 num = hw_breakpoint_slots(TYPE_DATA);
390 reg |= debug_monitors_arch();
398 static int ptrace_hbp_get_ctrl(unsigned int note_type,
399 struct task_struct *tsk,
403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
408 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
412 static int ptrace_hbp_get_addr(unsigned int note_type,
413 struct task_struct *tsk,
417 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
422 *addr = bp ? counter_arch_bp(bp)->address : 0;
426 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
427 struct task_struct *tsk,
430 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
433 bp = ptrace_hbp_create(note_type, tsk, idx);
438 static int ptrace_hbp_set_ctrl(unsigned int note_type,
439 struct task_struct *tsk,
444 struct perf_event *bp;
445 struct perf_event_attr attr;
446 struct arch_hw_breakpoint_ctrl ctrl;
448 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
455 decode_ctrl_reg(uctrl, &ctrl);
456 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
460 return modify_user_hw_breakpoint(bp, &attr);
463 static int ptrace_hbp_set_addr(unsigned int note_type,
464 struct task_struct *tsk,
469 struct perf_event *bp;
470 struct perf_event_attr attr;
472 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
480 err = modify_user_hw_breakpoint(bp, &attr);
484 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
485 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
486 #define PTRACE_HBP_PAD_SZ sizeof(u32)
488 static int hw_break_get(struct task_struct *target,
489 const struct user_regset *regset,
490 unsigned int pos, unsigned int count,
491 void *kbuf, void __user *ubuf)
493 unsigned int note_type = regset->core_note_type;
494 int ret, idx = 0, offset, limit;
499 ret = ptrace_hbp_get_resource_info(note_type, &info);
503 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
509 offset = offsetof(struct user_hwdebug_state, pad);
510 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
511 offset + PTRACE_HBP_PAD_SZ);
515 /* (address, ctrl) registers */
516 offset = offsetof(struct user_hwdebug_state, dbg_regs);
517 limit = regset->n * regset->size;
518 while (count && offset < limit) {
519 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
522 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
523 offset, offset + PTRACE_HBP_ADDR_SZ);
526 offset += PTRACE_HBP_ADDR_SZ;
528 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
531 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
532 offset, offset + PTRACE_HBP_CTRL_SZ);
535 offset += PTRACE_HBP_CTRL_SZ;
537 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
539 offset + PTRACE_HBP_PAD_SZ);
542 offset += PTRACE_HBP_PAD_SZ;
549 static int hw_break_set(struct task_struct *target,
550 const struct user_regset *regset,
551 unsigned int pos, unsigned int count,
552 const void *kbuf, const void __user *ubuf)
554 unsigned int note_type = regset->core_note_type;
555 int ret, idx = 0, offset, limit;
559 /* Resource info and pad */
560 offset = offsetof(struct user_hwdebug_state, dbg_regs);
561 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
565 /* (address, ctrl) registers */
566 limit = regset->n * regset->size;
567 while (count && offset < limit) {
568 if (count < PTRACE_HBP_ADDR_SZ)
570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
571 offset, offset + PTRACE_HBP_ADDR_SZ);
574 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
577 offset += PTRACE_HBP_ADDR_SZ;
581 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
582 offset, offset + PTRACE_HBP_CTRL_SZ);
585 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
588 offset += PTRACE_HBP_CTRL_SZ;
590 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
592 offset + PTRACE_HBP_PAD_SZ);
595 offset += PTRACE_HBP_PAD_SZ;
601 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
603 static int gpr_get(struct task_struct *target,
604 const struct user_regset *regset,
605 unsigned int pos, unsigned int count,
606 void *kbuf, void __user *ubuf)
608 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
609 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
612 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
613 unsigned int pos, unsigned int count,
614 const void *kbuf, const void __user *ubuf)
617 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
619 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
623 if (!valid_user_regs(&newregs, target))
626 task_pt_regs(target)->user_regs = newregs;
630 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
632 if (!system_supports_fpsimd())
638 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
640 static int __fpr_get(struct task_struct *target,
641 const struct user_regset *regset,
642 unsigned int pos, unsigned int count,
643 void *kbuf, void __user *ubuf, unsigned int start_pos)
645 struct user_fpsimd_state *uregs;
647 sve_sync_to_fpsimd(target);
649 uregs = &target->thread.uw.fpsimd_state;
651 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
652 start_pos, start_pos + sizeof(*uregs));
655 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
656 unsigned int pos, unsigned int count,
657 void *kbuf, void __user *ubuf)
659 if (!system_supports_fpsimd())
662 if (target == current)
663 fpsimd_preserve_current_state();
665 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
668 static int __fpr_set(struct task_struct *target,
669 const struct user_regset *regset,
670 unsigned int pos, unsigned int count,
671 const void *kbuf, const void __user *ubuf,
672 unsigned int start_pos)
675 struct user_fpsimd_state newstate;
678 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
679 * short copyin can't resurrect stale data.
681 sve_sync_to_fpsimd(target);
683 newstate = target->thread.uw.fpsimd_state;
685 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
686 start_pos, start_pos + sizeof(newstate));
690 target->thread.uw.fpsimd_state = newstate;
695 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
696 unsigned int pos, unsigned int count,
697 const void *kbuf, const void __user *ubuf)
701 if (!system_supports_fpsimd())
704 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
708 sve_sync_from_fpsimd_zeropad(target);
709 fpsimd_flush_task_state(target);
714 static int tls_get(struct task_struct *target, const struct user_regset *regset,
715 unsigned int pos, unsigned int count,
716 void *kbuf, void __user *ubuf)
718 unsigned long *tls = &target->thread.uw.tp_value;
720 if (target == current)
721 tls_preserve_current_state();
723 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
726 static int tls_set(struct task_struct *target, const struct user_regset *regset,
727 unsigned int pos, unsigned int count,
728 const void *kbuf, const void __user *ubuf)
731 unsigned long tls = target->thread.uw.tp_value;
733 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
737 target->thread.uw.tp_value = tls;
741 static int system_call_get(struct task_struct *target,
742 const struct user_regset *regset,
743 unsigned int pos, unsigned int count,
744 void *kbuf, void __user *ubuf)
746 int syscallno = task_pt_regs(target)->syscallno;
748 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
752 static int system_call_set(struct task_struct *target,
753 const struct user_regset *regset,
754 unsigned int pos, unsigned int count,
755 const void *kbuf, const void __user *ubuf)
757 int syscallno = task_pt_regs(target)->syscallno;
760 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
764 task_pt_regs(target)->syscallno = syscallno;
768 #ifdef CONFIG_ARM64_SVE
770 static void sve_init_header_from_task(struct user_sve_header *header,
771 struct task_struct *target)
775 memset(header, 0, sizeof(*header));
777 header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
778 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
779 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
780 header->flags |= SVE_PT_VL_INHERIT;
782 header->vl = target->thread.sve_vl;
783 vq = sve_vq_from_vl(header->vl);
785 header->max_vl = sve_max_vl;
786 header->size = SVE_PT_SIZE(vq, header->flags);
787 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
791 static unsigned int sve_size_from_header(struct user_sve_header const *header)
793 return ALIGN(header->size, SVE_VQ_BYTES);
796 static unsigned int sve_get_size(struct task_struct *target,
797 const struct user_regset *regset)
799 struct user_sve_header header;
801 if (!system_supports_sve())
804 sve_init_header_from_task(&header, target);
805 return sve_size_from_header(&header);
808 static int sve_get(struct task_struct *target,
809 const struct user_regset *regset,
810 unsigned int pos, unsigned int count,
811 void *kbuf, void __user *ubuf)
814 struct user_sve_header header;
816 unsigned long start, end;
818 if (!system_supports_sve())
822 sve_init_header_from_task(&header, target);
823 vq = sve_vq_from_vl(header.vl);
825 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
830 if (target == current)
831 fpsimd_preserve_current_state();
833 /* Registers: FPSIMD-only case */
835 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
836 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
837 return __fpr_get(target, regset, pos, count, kbuf, ubuf,
838 SVE_PT_FPSIMD_OFFSET);
840 /* Otherwise: full SVE case */
842 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
843 start = SVE_PT_SVE_OFFSET;
844 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
845 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
846 target->thread.sve_state,
852 end = SVE_PT_SVE_FPSR_OFFSET(vq);
853 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
859 * Copy fpsr, and fpcr which must follow contiguously in
860 * struct fpsimd_state:
863 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
864 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
865 &target->thread.uw.fpsimd_state.fpsr,
871 end = sve_size_from_header(&header);
872 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
876 static int sve_set(struct task_struct *target,
877 const struct user_regset *regset,
878 unsigned int pos, unsigned int count,
879 const void *kbuf, const void __user *ubuf)
882 struct user_sve_header header;
884 unsigned long start, end;
886 if (!system_supports_sve())
890 if (count < sizeof(header))
892 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
898 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
899 * sve_set_vector_length(), which will also validate them for us:
901 ret = sve_set_vector_length(target, header.vl,
902 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
906 /* Actual VL set may be less than the user asked for: */
907 vq = sve_vq_from_vl(target->thread.sve_vl);
909 /* Registers: FPSIMD-only case */
911 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
912 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
913 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
914 SVE_PT_FPSIMD_OFFSET);
915 clear_tsk_thread_flag(target, TIF_SVE);
919 /* Otherwise: full SVE case */
922 * If setting a different VL from the requested VL and there is
923 * register data, the data layout will be wrong: don't even
924 * try to set the registers in this case.
926 if (count && vq != sve_vq_from_vl(header.vl)) {
934 * Ensure target->thread.sve_state is up to date with target's
935 * FPSIMD regs, so that a short copyin leaves trailing registers
938 fpsimd_sync_to_sve(target);
939 set_tsk_thread_flag(target, TIF_SVE);
941 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
942 start = SVE_PT_SVE_OFFSET;
943 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
944 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
945 target->thread.sve_state,
951 end = SVE_PT_SVE_FPSR_OFFSET(vq);
952 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
958 * Copy fpsr, and fpcr which must follow contiguously in
959 * struct fpsimd_state:
962 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
963 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
964 &target->thread.uw.fpsimd_state.fpsr,
968 fpsimd_flush_task_state(target);
972 #endif /* CONFIG_ARM64_SVE */
974 enum aarch64_regset {
978 #ifdef CONFIG_HAVE_HW_BREAKPOINT
983 #ifdef CONFIG_ARM64_SVE
988 static const struct user_regset aarch64_regsets[] = {
990 .core_note_type = NT_PRSTATUS,
991 .n = sizeof(struct user_pt_regs) / sizeof(u64),
993 .align = sizeof(u64),
998 .core_note_type = NT_PRFPREG,
999 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1001 * We pretend we have 32-bit registers because the fpsr and
1002 * fpcr are 32-bits wide.
1004 .size = sizeof(u32),
1005 .align = sizeof(u32),
1006 .active = fpr_active,
1011 .core_note_type = NT_ARM_TLS,
1013 .size = sizeof(void *),
1014 .align = sizeof(void *),
1018 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1019 [REGSET_HW_BREAK] = {
1020 .core_note_type = NT_ARM_HW_BREAK,
1021 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1022 .size = sizeof(u32),
1023 .align = sizeof(u32),
1024 .get = hw_break_get,
1025 .set = hw_break_set,
1027 [REGSET_HW_WATCH] = {
1028 .core_note_type = NT_ARM_HW_WATCH,
1029 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1030 .size = sizeof(u32),
1031 .align = sizeof(u32),
1032 .get = hw_break_get,
1033 .set = hw_break_set,
1036 [REGSET_SYSTEM_CALL] = {
1037 .core_note_type = NT_ARM_SYSTEM_CALL,
1039 .size = sizeof(int),
1040 .align = sizeof(int),
1041 .get = system_call_get,
1042 .set = system_call_set,
1044 #ifdef CONFIG_ARM64_SVE
1045 [REGSET_SVE] = { /* Scalable Vector Extension */
1046 .core_note_type = NT_ARM_SVE,
1047 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1049 .size = SVE_VQ_BYTES,
1050 .align = SVE_VQ_BYTES,
1053 .get_size = sve_get_size,
1058 static const struct user_regset_view user_aarch64_view = {
1059 .name = "aarch64", .e_machine = EM_AARCH64,
1060 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1063 #ifdef CONFIG_COMPAT
1064 enum compat_regset {
1069 static int compat_gpr_get(struct task_struct *target,
1070 const struct user_regset *regset,
1071 unsigned int pos, unsigned int count,
1072 void *kbuf, void __user *ubuf)
1075 unsigned int i, start, num_regs;
1077 /* Calculate the number of AArch32 registers contained in count */
1078 num_regs = count / regset->size;
1080 /* Convert pos into an register number */
1081 start = pos / regset->size;
1083 if (start + num_regs > regset->n)
1086 for (i = 0; i < num_regs; ++i) {
1087 unsigned int idx = start + i;
1092 reg = task_pt_regs(target)->pc;
1095 reg = task_pt_regs(target)->pstate;
1096 reg = pstate_to_compat_psr(reg);
1099 reg = task_pt_regs(target)->orig_x0;
1102 reg = task_pt_regs(target)->regs[idx];
1106 memcpy(kbuf, ®, sizeof(reg));
1107 kbuf += sizeof(reg);
1109 ret = copy_to_user(ubuf, ®, sizeof(reg));
1115 ubuf += sizeof(reg);
1122 static int compat_gpr_set(struct task_struct *target,
1123 const struct user_regset *regset,
1124 unsigned int pos, unsigned int count,
1125 const void *kbuf, const void __user *ubuf)
1127 struct pt_regs newregs;
1129 unsigned int i, start, num_regs;
1131 /* Calculate the number of AArch32 registers contained in count */
1132 num_regs = count / regset->size;
1134 /* Convert pos into an register number */
1135 start = pos / regset->size;
1137 if (start + num_regs > regset->n)
1140 newregs = *task_pt_regs(target);
1142 for (i = 0; i < num_regs; ++i) {
1143 unsigned int idx = start + i;
1147 memcpy(®, kbuf, sizeof(reg));
1148 kbuf += sizeof(reg);
1150 ret = copy_from_user(®, ubuf, sizeof(reg));
1156 ubuf += sizeof(reg);
1164 reg = compat_psr_to_pstate(reg);
1165 newregs.pstate = reg;
1168 newregs.orig_x0 = reg;
1171 newregs.regs[idx] = reg;
1176 if (valid_user_regs(&newregs.user_regs, target))
1177 *task_pt_regs(target) = newregs;
1184 static int compat_vfp_get(struct task_struct *target,
1185 const struct user_regset *regset,
1186 unsigned int pos, unsigned int count,
1187 void *kbuf, void __user *ubuf)
1189 struct user_fpsimd_state *uregs;
1190 compat_ulong_t fpscr;
1191 int ret, vregs_end_pos;
1193 if (!system_supports_fpsimd())
1196 uregs = &target->thread.uw.fpsimd_state;
1198 if (target == current)
1199 fpsimd_preserve_current_state();
1202 * The VFP registers are packed into the fpsimd_state, so they all sit
1203 * nicely together for us. We just need to create the fpscr separately.
1205 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1206 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
1209 if (count && !ret) {
1210 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1211 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1213 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
1214 vregs_end_pos, VFP_STATE_SIZE);
1220 static int compat_vfp_set(struct task_struct *target,
1221 const struct user_regset *regset,
1222 unsigned int pos, unsigned int count,
1223 const void *kbuf, const void __user *ubuf)
1225 struct user_fpsimd_state *uregs;
1226 compat_ulong_t fpscr;
1227 int ret, vregs_end_pos;
1229 if (!system_supports_fpsimd())
1232 uregs = &target->thread.uw.fpsimd_state;
1234 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1235 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1238 if (count && !ret) {
1239 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1240 vregs_end_pos, VFP_STATE_SIZE);
1242 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1243 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1247 fpsimd_flush_task_state(target);
1251 static int compat_tls_get(struct task_struct *target,
1252 const struct user_regset *regset, unsigned int pos,
1253 unsigned int count, void *kbuf, void __user *ubuf)
1255 compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value;
1256 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1259 static int compat_tls_set(struct task_struct *target,
1260 const struct user_regset *regset, unsigned int pos,
1261 unsigned int count, const void *kbuf,
1262 const void __user *ubuf)
1265 compat_ulong_t tls = target->thread.uw.tp_value;
1267 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1271 target->thread.uw.tp_value = tls;
1275 static const struct user_regset aarch32_regsets[] = {
1276 [REGSET_COMPAT_GPR] = {
1277 .core_note_type = NT_PRSTATUS,
1278 .n = COMPAT_ELF_NGREG,
1279 .size = sizeof(compat_elf_greg_t),
1280 .align = sizeof(compat_elf_greg_t),
1281 .get = compat_gpr_get,
1282 .set = compat_gpr_set
1284 [REGSET_COMPAT_VFP] = {
1285 .core_note_type = NT_ARM_VFP,
1286 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1287 .size = sizeof(compat_ulong_t),
1288 .align = sizeof(compat_ulong_t),
1289 .active = fpr_active,
1290 .get = compat_vfp_get,
1291 .set = compat_vfp_set
1295 static const struct user_regset_view user_aarch32_view = {
1296 .name = "aarch32", .e_machine = EM_ARM,
1297 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1300 static const struct user_regset aarch32_ptrace_regsets[] = {
1302 .core_note_type = NT_PRSTATUS,
1303 .n = COMPAT_ELF_NGREG,
1304 .size = sizeof(compat_elf_greg_t),
1305 .align = sizeof(compat_elf_greg_t),
1306 .get = compat_gpr_get,
1307 .set = compat_gpr_set
1310 .core_note_type = NT_ARM_VFP,
1311 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1312 .size = sizeof(compat_ulong_t),
1313 .align = sizeof(compat_ulong_t),
1314 .get = compat_vfp_get,
1315 .set = compat_vfp_set
1318 .core_note_type = NT_ARM_TLS,
1320 .size = sizeof(compat_ulong_t),
1321 .align = sizeof(compat_ulong_t),
1322 .get = compat_tls_get,
1323 .set = compat_tls_set,
1325 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1326 [REGSET_HW_BREAK] = {
1327 .core_note_type = NT_ARM_HW_BREAK,
1328 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1329 .size = sizeof(u32),
1330 .align = sizeof(u32),
1331 .get = hw_break_get,
1332 .set = hw_break_set,
1334 [REGSET_HW_WATCH] = {
1335 .core_note_type = NT_ARM_HW_WATCH,
1336 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1337 .size = sizeof(u32),
1338 .align = sizeof(u32),
1339 .get = hw_break_get,
1340 .set = hw_break_set,
1343 [REGSET_SYSTEM_CALL] = {
1344 .core_note_type = NT_ARM_SYSTEM_CALL,
1346 .size = sizeof(int),
1347 .align = sizeof(int),
1348 .get = system_call_get,
1349 .set = system_call_set,
1353 static const struct user_regset_view user_aarch32_ptrace_view = {
1354 .name = "aarch32", .e_machine = EM_ARM,
1355 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1358 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1359 compat_ulong_t __user *ret)
1366 if (off == COMPAT_PT_TEXT_ADDR)
1367 tmp = tsk->mm->start_code;
1368 else if (off == COMPAT_PT_DATA_ADDR)
1369 tmp = tsk->mm->start_data;
1370 else if (off == COMPAT_PT_TEXT_END_ADDR)
1371 tmp = tsk->mm->end_code;
1372 else if (off < sizeof(compat_elf_gregset_t))
1373 return copy_regset_to_user(tsk, &user_aarch32_view,
1374 REGSET_COMPAT_GPR, off,
1375 sizeof(compat_ulong_t), ret);
1376 else if (off >= COMPAT_USER_SZ)
1381 return put_user(tmp, ret);
1384 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1388 mm_segment_t old_fs = get_fs();
1390 if (off & 3 || off >= COMPAT_USER_SZ)
1393 if (off >= sizeof(compat_elf_gregset_t))
1397 ret = copy_regset_from_user(tsk, &user_aarch32_view,
1398 REGSET_COMPAT_GPR, off,
1399 sizeof(compat_ulong_t),
1406 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1409 * Convert a virtual register number into an index for a thread_info
1410 * breakpoint array. Breakpoints are identified using positive numbers
1411 * whilst watchpoints are negative. The registers are laid out as pairs
1412 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1413 * Register 0 is reserved for describing resource information.
1415 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1417 return (abs(num) - 1) >> 1;
1420 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1422 u8 num_brps, num_wrps, debug_arch, wp_len;
1425 num_brps = hw_breakpoint_slots(TYPE_INST);
1426 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1428 debug_arch = debug_monitors_arch();
1442 static int compat_ptrace_hbp_get(unsigned int note_type,
1443 struct task_struct *tsk,
1450 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1453 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1456 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1463 static int compat_ptrace_hbp_set(unsigned int note_type,
1464 struct task_struct *tsk,
1471 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1475 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1478 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1484 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1485 compat_ulong_t __user *data)
1492 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1494 } else if (num == 0) {
1495 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1498 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1502 ret = put_user(kdata, data);
1507 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1508 compat_ulong_t __user *data)
1516 ret = get_user(kdata, data);
1521 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1523 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1527 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1529 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1530 compat_ulong_t caddr, compat_ulong_t cdata)
1532 unsigned long addr = caddr;
1533 unsigned long data = cdata;
1534 void __user *datap = compat_ptr(data);
1538 case PTRACE_PEEKUSR:
1539 ret = compat_ptrace_read_user(child, addr, datap);
1542 case PTRACE_POKEUSR:
1543 ret = compat_ptrace_write_user(child, addr, data);
1546 case COMPAT_PTRACE_GETREGS:
1547 ret = copy_regset_to_user(child,
1550 0, sizeof(compat_elf_gregset_t),
1554 case COMPAT_PTRACE_SETREGS:
1555 ret = copy_regset_from_user(child,
1558 0, sizeof(compat_elf_gregset_t),
1562 case COMPAT_PTRACE_GET_THREAD_AREA:
1563 ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1564 (compat_ulong_t __user *)datap);
1567 case COMPAT_PTRACE_SET_SYSCALL:
1568 task_pt_regs(child)->syscallno = data;
1572 case COMPAT_PTRACE_GETVFPREGS:
1573 ret = copy_regset_to_user(child,
1580 case COMPAT_PTRACE_SETVFPREGS:
1581 ret = copy_regset_from_user(child,
1588 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1589 case COMPAT_PTRACE_GETHBPREGS:
1590 ret = compat_ptrace_gethbpregs(child, addr, datap);
1593 case COMPAT_PTRACE_SETHBPREGS:
1594 ret = compat_ptrace_sethbpregs(child, addr, datap);
1599 ret = compat_ptrace_request(child, request, addr,
1606 #endif /* CONFIG_COMPAT */
1608 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1610 #ifdef CONFIG_COMPAT
1612 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1613 * user_aarch32_view compatible with arm32. Native ptrace requests on
1614 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1615 * access to the TLS register.
1617 if (is_compat_task())
1618 return &user_aarch32_view;
1619 else if (is_compat_thread(task_thread_info(task)))
1620 return &user_aarch32_ptrace_view;
1622 return &user_aarch64_view;
1625 long arch_ptrace(struct task_struct *child, long request,
1626 unsigned long addr, unsigned long data)
1628 return ptrace_request(child, request, addr, data);
1631 enum ptrace_syscall_dir {
1632 PTRACE_SYSCALL_ENTER = 0,
1633 PTRACE_SYSCALL_EXIT,
1636 static void tracehook_report_syscall(struct pt_regs *regs,
1637 enum ptrace_syscall_dir dir)
1640 unsigned long saved_reg;
1643 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1644 * used to denote syscall entry/exit:
1646 regno = (is_compat_task() ? 12 : 7);
1647 saved_reg = regs->regs[regno];
1648 regs->regs[regno] = dir;
1650 if (dir == PTRACE_SYSCALL_ENTER) {
1651 if (tracehook_report_syscall_entry(regs))
1652 forget_syscall(regs);
1653 regs->regs[regno] = saved_reg;
1654 } else if (!test_thread_flag(TIF_SINGLESTEP)) {
1655 tracehook_report_syscall_exit(regs, 0);
1656 regs->regs[regno] = saved_reg;
1658 regs->regs[regno] = saved_reg;
1661 * Signal a pseudo-step exception since we are stepping but
1662 * tracer modifications to the registers may have rewound the
1665 tracehook_report_syscall_exit(regs, 1);
1669 int syscall_trace_enter(struct pt_regs *regs)
1671 if (test_thread_flag(TIF_SYSCALL_TRACE))
1672 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1674 /* Do the secure computing after ptrace; failures should be fast. */
1675 if (secure_computing(NULL) == -1)
1678 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1679 trace_sys_enter(regs, regs->syscallno);
1681 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1682 regs->regs[2], regs->regs[3]);
1684 return regs->syscallno;
1687 void syscall_trace_exit(struct pt_regs *regs)
1689 unsigned long flags = READ_ONCE(current_thread_info()->flags);
1691 audit_syscall_exit(regs);
1693 if (flags & _TIF_SYSCALL_TRACEPOINT)
1694 trace_sys_exit(regs, regs_return_value(regs));
1696 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
1697 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1703 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1704 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1705 * not described in ARM DDI 0487D.a.
1706 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1707 * be allocated an EL0 meaning in future.
1708 * Userspace cannot use these until they have an architectural meaning.
1709 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1710 * We also reserve IL for the kernel; SS is handled dynamically.
1712 #define SPSR_EL1_AARCH64_RES0_BITS \
1713 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1714 GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1715 #define SPSR_EL1_AARCH32_RES0_BITS \
1716 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1718 static int valid_compat_regs(struct user_pt_regs *regs)
1720 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1722 if (!system_supports_mixed_endian_el0()) {
1723 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1724 regs->pstate |= PSR_AA32_E_BIT;
1726 regs->pstate &= ~PSR_AA32_E_BIT;
1729 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1730 (regs->pstate & PSR_AA32_A_BIT) == 0 &&
1731 (regs->pstate & PSR_AA32_I_BIT) == 0 &&
1732 (regs->pstate & PSR_AA32_F_BIT) == 0) {
1737 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1740 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
1741 PSR_AA32_C_BIT | PSR_AA32_V_BIT |
1742 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
1743 PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
1745 regs->pstate |= PSR_MODE32_BIT;
1750 static int valid_native_regs(struct user_pt_regs *regs)
1752 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1754 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1755 (regs->pstate & PSR_D_BIT) == 0 &&
1756 (regs->pstate & PSR_A_BIT) == 0 &&
1757 (regs->pstate & PSR_I_BIT) == 0 &&
1758 (regs->pstate & PSR_F_BIT) == 0) {
1762 /* Force PSR to a valid 64-bit EL0t */
1763 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1769 * Are the current registers suitable for user mode? (used to maintain
1770 * security in signal handlers)
1772 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1774 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
1775 user_regs_reset_single_step(regs, task);
1777 if (is_compat_thread(task_thread_info(task)))
1778 return valid_compat_regs(regs);
1780 return valid_native_regs(regs);