GNU Linux-libre 4.19.245-gnu1
[releases.git] / arch / arm64 / kernel / ptrace.c
1 /*
2  * Based on arch/arm/kernel/ptrace.c
3  *
4  * By Ross Biro 1/23/92
5  * edited by Linus Torvalds
6  * ARM modifications Copyright (C) 2000 Russell King
7  * Copyright (C) 2012 ARM Ltd.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/mm.h>
28 #include <linux/nospec.h>
29 #include <linux/smp.h>
30 #include <linux/ptrace.h>
31 #include <linux/user.h>
32 #include <linux/seccomp.h>
33 #include <linux/security.h>
34 #include <linux/init.h>
35 #include <linux/signal.h>
36 #include <linux/string.h>
37 #include <linux/uaccess.h>
38 #include <linux/perf_event.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/regset.h>
41 #include <linux/tracehook.h>
42 #include <linux/elf.h>
43
44 #include <asm/compat.h>
45 #include <asm/cpufeature.h>
46 #include <asm/debug-monitors.h>
47 #include <asm/fpsimd.h>
48 #include <asm/pgtable.h>
49 #include <asm/stacktrace.h>
50 #include <asm/syscall.h>
51 #include <asm/traps.h>
52 #include <asm/system_misc.h>
53
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/syscalls.h>
56
57 struct pt_regs_offset {
58         const char *name;
59         int offset;
60 };
61
62 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
63 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64 #define GPR_OFFSET_NAME(r) \
65         {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
66
67 static const struct pt_regs_offset regoffset_table[] = {
68         GPR_OFFSET_NAME(0),
69         GPR_OFFSET_NAME(1),
70         GPR_OFFSET_NAME(2),
71         GPR_OFFSET_NAME(3),
72         GPR_OFFSET_NAME(4),
73         GPR_OFFSET_NAME(5),
74         GPR_OFFSET_NAME(6),
75         GPR_OFFSET_NAME(7),
76         GPR_OFFSET_NAME(8),
77         GPR_OFFSET_NAME(9),
78         GPR_OFFSET_NAME(10),
79         GPR_OFFSET_NAME(11),
80         GPR_OFFSET_NAME(12),
81         GPR_OFFSET_NAME(13),
82         GPR_OFFSET_NAME(14),
83         GPR_OFFSET_NAME(15),
84         GPR_OFFSET_NAME(16),
85         GPR_OFFSET_NAME(17),
86         GPR_OFFSET_NAME(18),
87         GPR_OFFSET_NAME(19),
88         GPR_OFFSET_NAME(20),
89         GPR_OFFSET_NAME(21),
90         GPR_OFFSET_NAME(22),
91         GPR_OFFSET_NAME(23),
92         GPR_OFFSET_NAME(24),
93         GPR_OFFSET_NAME(25),
94         GPR_OFFSET_NAME(26),
95         GPR_OFFSET_NAME(27),
96         GPR_OFFSET_NAME(28),
97         GPR_OFFSET_NAME(29),
98         GPR_OFFSET_NAME(30),
99         {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
100         REG_OFFSET_NAME(sp),
101         REG_OFFSET_NAME(pc),
102         REG_OFFSET_NAME(pstate),
103         REG_OFFSET_END,
104 };
105
106 /**
107  * regs_query_register_offset() - query register offset from its name
108  * @name:       the name of a register
109  *
110  * regs_query_register_offset() returns the offset of a register in struct
111  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
112  */
113 int regs_query_register_offset(const char *name)
114 {
115         const struct pt_regs_offset *roff;
116
117         for (roff = regoffset_table; roff->name != NULL; roff++)
118                 if (!strcmp(roff->name, name))
119                         return roff->offset;
120         return -EINVAL;
121 }
122
123 /**
124  * regs_within_kernel_stack() - check the address in the stack
125  * @regs:      pt_regs which contains kernel stack pointer.
126  * @addr:      address which is checked.
127  *
128  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
129  * If @addr is within the kernel stack, it returns true. If not, returns false.
130  */
131 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
132 {
133         return ((addr & ~(THREAD_SIZE - 1))  ==
134                 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
135                 on_irq_stack(addr, NULL);
136 }
137
138 /**
139  * regs_get_kernel_stack_nth() - get Nth entry of the stack
140  * @regs:       pt_regs which contains kernel stack pointer.
141  * @n:          stack entry number.
142  *
143  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
144  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
145  * this returns 0.
146  */
147 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
148 {
149         unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
150
151         addr += n;
152         if (regs_within_kernel_stack(regs, (unsigned long)addr))
153                 return *addr;
154         else
155                 return 0;
156 }
157
158 /*
159  * TODO: does not yet catch signals sent when the child dies.
160  * in exit.c or in signal.c.
161  */
162
163 /*
164  * Called by kernel/ptrace.c when detaching..
165  */
166 void ptrace_disable(struct task_struct *child)
167 {
168         /*
169          * This would be better off in core code, but PTRACE_DETACH has
170          * grown its fair share of arch-specific worts and changing it
171          * is likely to cause regressions on obscure architectures.
172          */
173         user_disable_single_step(child);
174 }
175
176 #ifdef CONFIG_HAVE_HW_BREAKPOINT
177 /*
178  * Handle hitting a HW-breakpoint.
179  */
180 static void ptrace_hbptriggered(struct perf_event *bp,
181                                 struct perf_sample_data *data,
182                                 struct pt_regs *regs)
183 {
184         struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
185         siginfo_t info;
186
187         clear_siginfo(&info);
188         info.si_signo   = SIGTRAP;
189         info.si_errno   = 0;
190         info.si_code    = TRAP_HWBKPT;
191         info.si_addr    = (void __user *)(bkpt->trigger);
192
193 #ifdef CONFIG_COMPAT
194         if (is_compat_task()) {
195                 int si_errno = 0;
196                 int i;
197
198                 for (i = 0; i < ARM_MAX_BRP; ++i) {
199                         if (current->thread.debug.hbp_break[i] == bp) {
200                                 si_errno = (i << 1) + 1;
201                                 break;
202                         }
203                 }
204
205                 for (i = 0; i < ARM_MAX_WRP; ++i) {
206                         if (current->thread.debug.hbp_watch[i] == bp) {
207                                 si_errno = -((i << 1) + 1);
208                                 break;
209                         }
210                 }
211                 force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger);
212         }
213 #endif
214         arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current);
215 }
216
217 /*
218  * Unregister breakpoints from this task and reset the pointers in
219  * the thread_struct.
220  */
221 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
222 {
223         int i;
224         struct thread_struct *t = &tsk->thread;
225
226         for (i = 0; i < ARM_MAX_BRP; i++) {
227                 if (t->debug.hbp_break[i]) {
228                         unregister_hw_breakpoint(t->debug.hbp_break[i]);
229                         t->debug.hbp_break[i] = NULL;
230                 }
231         }
232
233         for (i = 0; i < ARM_MAX_WRP; i++) {
234                 if (t->debug.hbp_watch[i]) {
235                         unregister_hw_breakpoint(t->debug.hbp_watch[i]);
236                         t->debug.hbp_watch[i] = NULL;
237                 }
238         }
239 }
240
241 void ptrace_hw_copy_thread(struct task_struct *tsk)
242 {
243         memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
244 }
245
246 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
247                                                struct task_struct *tsk,
248                                                unsigned long idx)
249 {
250         struct perf_event *bp = ERR_PTR(-EINVAL);
251
252         switch (note_type) {
253         case NT_ARM_HW_BREAK:
254                 if (idx >= ARM_MAX_BRP)
255                         goto out;
256                 idx = array_index_nospec(idx, ARM_MAX_BRP);
257                 bp = tsk->thread.debug.hbp_break[idx];
258                 break;
259         case NT_ARM_HW_WATCH:
260                 if (idx >= ARM_MAX_WRP)
261                         goto out;
262                 idx = array_index_nospec(idx, ARM_MAX_WRP);
263                 bp = tsk->thread.debug.hbp_watch[idx];
264                 break;
265         }
266
267 out:
268         return bp;
269 }
270
271 static int ptrace_hbp_set_event(unsigned int note_type,
272                                 struct task_struct *tsk,
273                                 unsigned long idx,
274                                 struct perf_event *bp)
275 {
276         int err = -EINVAL;
277
278         switch (note_type) {
279         case NT_ARM_HW_BREAK:
280                 if (idx >= ARM_MAX_BRP)
281                         goto out;
282                 idx = array_index_nospec(idx, ARM_MAX_BRP);
283                 tsk->thread.debug.hbp_break[idx] = bp;
284                 err = 0;
285                 break;
286         case NT_ARM_HW_WATCH:
287                 if (idx >= ARM_MAX_WRP)
288                         goto out;
289                 idx = array_index_nospec(idx, ARM_MAX_WRP);
290                 tsk->thread.debug.hbp_watch[idx] = bp;
291                 err = 0;
292                 break;
293         }
294
295 out:
296         return err;
297 }
298
299 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
300                                             struct task_struct *tsk,
301                                             unsigned long idx)
302 {
303         struct perf_event *bp;
304         struct perf_event_attr attr;
305         int err, type;
306
307         switch (note_type) {
308         case NT_ARM_HW_BREAK:
309                 type = HW_BREAKPOINT_X;
310                 break;
311         case NT_ARM_HW_WATCH:
312                 type = HW_BREAKPOINT_RW;
313                 break;
314         default:
315                 return ERR_PTR(-EINVAL);
316         }
317
318         ptrace_breakpoint_init(&attr);
319
320         /*
321          * Initialise fields to sane defaults
322          * (i.e. values that will pass validation).
323          */
324         attr.bp_addr    = 0;
325         attr.bp_len     = HW_BREAKPOINT_LEN_4;
326         attr.bp_type    = type;
327         attr.disabled   = 1;
328
329         bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
330         if (IS_ERR(bp))
331                 return bp;
332
333         err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
334         if (err)
335                 return ERR_PTR(err);
336
337         return bp;
338 }
339
340 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
341                                      struct arch_hw_breakpoint_ctrl ctrl,
342                                      struct perf_event_attr *attr)
343 {
344         int err, len, type, offset, disabled = !ctrl.enabled;
345
346         attr->disabled = disabled;
347         if (disabled)
348                 return 0;
349
350         err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
351         if (err)
352                 return err;
353
354         switch (note_type) {
355         case NT_ARM_HW_BREAK:
356                 if ((type & HW_BREAKPOINT_X) != type)
357                         return -EINVAL;
358                 break;
359         case NT_ARM_HW_WATCH:
360                 if ((type & HW_BREAKPOINT_RW) != type)
361                         return -EINVAL;
362                 break;
363         default:
364                 return -EINVAL;
365         }
366
367         attr->bp_len    = len;
368         attr->bp_type   = type;
369         attr->bp_addr   += offset;
370
371         return 0;
372 }
373
374 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
375 {
376         u8 num;
377         u32 reg = 0;
378
379         switch (note_type) {
380         case NT_ARM_HW_BREAK:
381                 num = hw_breakpoint_slots(TYPE_INST);
382                 break;
383         case NT_ARM_HW_WATCH:
384                 num = hw_breakpoint_slots(TYPE_DATA);
385                 break;
386         default:
387                 return -EINVAL;
388         }
389
390         reg |= debug_monitors_arch();
391         reg <<= 8;
392         reg |= num;
393
394         *info = reg;
395         return 0;
396 }
397
398 static int ptrace_hbp_get_ctrl(unsigned int note_type,
399                                struct task_struct *tsk,
400                                unsigned long idx,
401                                u32 *ctrl)
402 {
403         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
404
405         if (IS_ERR(bp))
406                 return PTR_ERR(bp);
407
408         *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
409         return 0;
410 }
411
412 static int ptrace_hbp_get_addr(unsigned int note_type,
413                                struct task_struct *tsk,
414                                unsigned long idx,
415                                u64 *addr)
416 {
417         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
418
419         if (IS_ERR(bp))
420                 return PTR_ERR(bp);
421
422         *addr = bp ? counter_arch_bp(bp)->address : 0;
423         return 0;
424 }
425
426 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
427                                                         struct task_struct *tsk,
428                                                         unsigned long idx)
429 {
430         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
431
432         if (!bp)
433                 bp = ptrace_hbp_create(note_type, tsk, idx);
434
435         return bp;
436 }
437
438 static int ptrace_hbp_set_ctrl(unsigned int note_type,
439                                struct task_struct *tsk,
440                                unsigned long idx,
441                                u32 uctrl)
442 {
443         int err;
444         struct perf_event *bp;
445         struct perf_event_attr attr;
446         struct arch_hw_breakpoint_ctrl ctrl;
447
448         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
449         if (IS_ERR(bp)) {
450                 err = PTR_ERR(bp);
451                 return err;
452         }
453
454         attr = bp->attr;
455         decode_ctrl_reg(uctrl, &ctrl);
456         err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
457         if (err)
458                 return err;
459
460         return modify_user_hw_breakpoint(bp, &attr);
461 }
462
463 static int ptrace_hbp_set_addr(unsigned int note_type,
464                                struct task_struct *tsk,
465                                unsigned long idx,
466                                u64 addr)
467 {
468         int err;
469         struct perf_event *bp;
470         struct perf_event_attr attr;
471
472         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
473         if (IS_ERR(bp)) {
474                 err = PTR_ERR(bp);
475                 return err;
476         }
477
478         attr = bp->attr;
479         attr.bp_addr = addr;
480         err = modify_user_hw_breakpoint(bp, &attr);
481         return err;
482 }
483
484 #define PTRACE_HBP_ADDR_SZ      sizeof(u64)
485 #define PTRACE_HBP_CTRL_SZ      sizeof(u32)
486 #define PTRACE_HBP_PAD_SZ       sizeof(u32)
487
488 static int hw_break_get(struct task_struct *target,
489                         const struct user_regset *regset,
490                         unsigned int pos, unsigned int count,
491                         void *kbuf, void __user *ubuf)
492 {
493         unsigned int note_type = regset->core_note_type;
494         int ret, idx = 0, offset, limit;
495         u32 info, ctrl;
496         u64 addr;
497
498         /* Resource info */
499         ret = ptrace_hbp_get_resource_info(note_type, &info);
500         if (ret)
501                 return ret;
502
503         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
504                                   sizeof(info));
505         if (ret)
506                 return ret;
507
508         /* Pad */
509         offset = offsetof(struct user_hwdebug_state, pad);
510         ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
511                                        offset + PTRACE_HBP_PAD_SZ);
512         if (ret)
513                 return ret;
514
515         /* (address, ctrl) registers */
516         offset = offsetof(struct user_hwdebug_state, dbg_regs);
517         limit = regset->n * regset->size;
518         while (count && offset < limit) {
519                 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
520                 if (ret)
521                         return ret;
522                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
523                                           offset, offset + PTRACE_HBP_ADDR_SZ);
524                 if (ret)
525                         return ret;
526                 offset += PTRACE_HBP_ADDR_SZ;
527
528                 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
529                 if (ret)
530                         return ret;
531                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
532                                           offset, offset + PTRACE_HBP_CTRL_SZ);
533                 if (ret)
534                         return ret;
535                 offset += PTRACE_HBP_CTRL_SZ;
536
537                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
538                                                offset,
539                                                offset + PTRACE_HBP_PAD_SZ);
540                 if (ret)
541                         return ret;
542                 offset += PTRACE_HBP_PAD_SZ;
543                 idx++;
544         }
545
546         return 0;
547 }
548
549 static int hw_break_set(struct task_struct *target,
550                         const struct user_regset *regset,
551                         unsigned int pos, unsigned int count,
552                         const void *kbuf, const void __user *ubuf)
553 {
554         unsigned int note_type = regset->core_note_type;
555         int ret, idx = 0, offset, limit;
556         u32 ctrl;
557         u64 addr;
558
559         /* Resource info and pad */
560         offset = offsetof(struct user_hwdebug_state, dbg_regs);
561         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
562         if (ret)
563                 return ret;
564
565         /* (address, ctrl) registers */
566         limit = regset->n * regset->size;
567         while (count && offset < limit) {
568                 if (count < PTRACE_HBP_ADDR_SZ)
569                         return -EINVAL;
570                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
571                                          offset, offset + PTRACE_HBP_ADDR_SZ);
572                 if (ret)
573                         return ret;
574                 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
575                 if (ret)
576                         return ret;
577                 offset += PTRACE_HBP_ADDR_SZ;
578
579                 if (!count)
580                         break;
581                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
582                                          offset, offset + PTRACE_HBP_CTRL_SZ);
583                 if (ret)
584                         return ret;
585                 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
586                 if (ret)
587                         return ret;
588                 offset += PTRACE_HBP_CTRL_SZ;
589
590                 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
591                                                 offset,
592                                                 offset + PTRACE_HBP_PAD_SZ);
593                 if (ret)
594                         return ret;
595                 offset += PTRACE_HBP_PAD_SZ;
596                 idx++;
597         }
598
599         return 0;
600 }
601 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
602
603 static int gpr_get(struct task_struct *target,
604                    const struct user_regset *regset,
605                    unsigned int pos, unsigned int count,
606                    void *kbuf, void __user *ubuf)
607 {
608         struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
609         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
610 }
611
612 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
613                    unsigned int pos, unsigned int count,
614                    const void *kbuf, const void __user *ubuf)
615 {
616         int ret;
617         struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
618
619         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
620         if (ret)
621                 return ret;
622
623         if (!valid_user_regs(&newregs, target))
624                 return -EINVAL;
625
626         task_pt_regs(target)->user_regs = newregs;
627         return 0;
628 }
629
630 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
631 {
632         if (!system_supports_fpsimd())
633                 return -ENODEV;
634         return regset->n;
635 }
636
637 /*
638  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
639  */
640 static int __fpr_get(struct task_struct *target,
641                      const struct user_regset *regset,
642                      unsigned int pos, unsigned int count,
643                      void *kbuf, void __user *ubuf, unsigned int start_pos)
644 {
645         struct user_fpsimd_state *uregs;
646
647         sve_sync_to_fpsimd(target);
648
649         uregs = &target->thread.uw.fpsimd_state;
650
651         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
652                                    start_pos, start_pos + sizeof(*uregs));
653 }
654
655 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
656                    unsigned int pos, unsigned int count,
657                    void *kbuf, void __user *ubuf)
658 {
659         if (!system_supports_fpsimd())
660                 return -EINVAL;
661
662         if (target == current)
663                 fpsimd_preserve_current_state();
664
665         return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
666 }
667
668 static int __fpr_set(struct task_struct *target,
669                      const struct user_regset *regset,
670                      unsigned int pos, unsigned int count,
671                      const void *kbuf, const void __user *ubuf,
672                      unsigned int start_pos)
673 {
674         int ret;
675         struct user_fpsimd_state newstate;
676
677         /*
678          * Ensure target->thread.uw.fpsimd_state is up to date, so that a
679          * short copyin can't resurrect stale data.
680          */
681         sve_sync_to_fpsimd(target);
682
683         newstate = target->thread.uw.fpsimd_state;
684
685         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
686                                  start_pos, start_pos + sizeof(newstate));
687         if (ret)
688                 return ret;
689
690         target->thread.uw.fpsimd_state = newstate;
691
692         return ret;
693 }
694
695 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
696                    unsigned int pos, unsigned int count,
697                    const void *kbuf, const void __user *ubuf)
698 {
699         int ret;
700
701         if (!system_supports_fpsimd())
702                 return -EINVAL;
703
704         ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
705         if (ret)
706                 return ret;
707
708         sve_sync_from_fpsimd_zeropad(target);
709         fpsimd_flush_task_state(target);
710
711         return ret;
712 }
713
714 static int tls_get(struct task_struct *target, const struct user_regset *regset,
715                    unsigned int pos, unsigned int count,
716                    void *kbuf, void __user *ubuf)
717 {
718         unsigned long *tls = &target->thread.uw.tp_value;
719
720         if (target == current)
721                 tls_preserve_current_state();
722
723         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
724 }
725
726 static int tls_set(struct task_struct *target, const struct user_regset *regset,
727                    unsigned int pos, unsigned int count,
728                    const void *kbuf, const void __user *ubuf)
729 {
730         int ret;
731         unsigned long tls = target->thread.uw.tp_value;
732
733         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
734         if (ret)
735                 return ret;
736
737         target->thread.uw.tp_value = tls;
738         return ret;
739 }
740
741 static int system_call_get(struct task_struct *target,
742                            const struct user_regset *regset,
743                            unsigned int pos, unsigned int count,
744                            void *kbuf, void __user *ubuf)
745 {
746         int syscallno = task_pt_regs(target)->syscallno;
747
748         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
749                                    &syscallno, 0, -1);
750 }
751
752 static int system_call_set(struct task_struct *target,
753                            const struct user_regset *regset,
754                            unsigned int pos, unsigned int count,
755                            const void *kbuf, const void __user *ubuf)
756 {
757         int syscallno = task_pt_regs(target)->syscallno;
758         int ret;
759
760         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
761         if (ret)
762                 return ret;
763
764         task_pt_regs(target)->syscallno = syscallno;
765         return ret;
766 }
767
768 #ifdef CONFIG_ARM64_SVE
769
770 static void sve_init_header_from_task(struct user_sve_header *header,
771                                       struct task_struct *target)
772 {
773         unsigned int vq;
774
775         memset(header, 0, sizeof(*header));
776
777         header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
778                 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
779         if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
780                 header->flags |= SVE_PT_VL_INHERIT;
781
782         header->vl = target->thread.sve_vl;
783         vq = sve_vq_from_vl(header->vl);
784
785         header->max_vl = sve_max_vl;
786         header->size = SVE_PT_SIZE(vq, header->flags);
787         header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
788                                       SVE_PT_REGS_SVE);
789 }
790
791 static unsigned int sve_size_from_header(struct user_sve_header const *header)
792 {
793         return ALIGN(header->size, SVE_VQ_BYTES);
794 }
795
796 static unsigned int sve_get_size(struct task_struct *target,
797                                  const struct user_regset *regset)
798 {
799         struct user_sve_header header;
800
801         if (!system_supports_sve())
802                 return 0;
803
804         sve_init_header_from_task(&header, target);
805         return sve_size_from_header(&header);
806 }
807
808 static int sve_get(struct task_struct *target,
809                    const struct user_regset *regset,
810                    unsigned int pos, unsigned int count,
811                    void *kbuf, void __user *ubuf)
812 {
813         int ret;
814         struct user_sve_header header;
815         unsigned int vq;
816         unsigned long start, end;
817
818         if (!system_supports_sve())
819                 return -EINVAL;
820
821         /* Header */
822         sve_init_header_from_task(&header, target);
823         vq = sve_vq_from_vl(header.vl);
824
825         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
826                                   0, sizeof(header));
827         if (ret)
828                 return ret;
829
830         if (target == current)
831                 fpsimd_preserve_current_state();
832
833         /* Registers: FPSIMD-only case */
834
835         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
836         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
837                 return __fpr_get(target, regset, pos, count, kbuf, ubuf,
838                                  SVE_PT_FPSIMD_OFFSET);
839
840         /* Otherwise: full SVE case */
841
842         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
843         start = SVE_PT_SVE_OFFSET;
844         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
845         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
846                                   target->thread.sve_state,
847                                   start, end);
848         if (ret)
849                 return ret;
850
851         start = end;
852         end = SVE_PT_SVE_FPSR_OFFSET(vq);
853         ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
854                                        start, end);
855         if (ret)
856                 return ret;
857
858         /*
859          * Copy fpsr, and fpcr which must follow contiguously in
860          * struct fpsimd_state:
861          */
862         start = end;
863         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
864         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
865                                   &target->thread.uw.fpsimd_state.fpsr,
866                                   start, end);
867         if (ret)
868                 return ret;
869
870         start = end;
871         end = sve_size_from_header(&header);
872         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
873                                         start, end);
874 }
875
876 static int sve_set(struct task_struct *target,
877                    const struct user_regset *regset,
878                    unsigned int pos, unsigned int count,
879                    const void *kbuf, const void __user *ubuf)
880 {
881         int ret;
882         struct user_sve_header header;
883         unsigned int vq;
884         unsigned long start, end;
885
886         if (!system_supports_sve())
887                 return -EINVAL;
888
889         /* Header */
890         if (count < sizeof(header))
891                 return -EINVAL;
892         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
893                                  0, sizeof(header));
894         if (ret)
895                 goto out;
896
897         /*
898          * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
899          * sve_set_vector_length(), which will also validate them for us:
900          */
901         ret = sve_set_vector_length(target, header.vl,
902                 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
903         if (ret)
904                 goto out;
905
906         /* Actual VL set may be less than the user asked for: */
907         vq = sve_vq_from_vl(target->thread.sve_vl);
908
909         /* Registers: FPSIMD-only case */
910
911         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
912         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
913                 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
914                                 SVE_PT_FPSIMD_OFFSET);
915                 clear_tsk_thread_flag(target, TIF_SVE);
916                 goto out;
917         }
918
919         /* Otherwise: full SVE case */
920
921         /*
922          * If setting a different VL from the requested VL and there is
923          * register data, the data layout will be wrong: don't even
924          * try to set the registers in this case.
925          */
926         if (count && vq != sve_vq_from_vl(header.vl)) {
927                 ret = -EIO;
928                 goto out;
929         }
930
931         sve_alloc(target);
932
933         /*
934          * Ensure target->thread.sve_state is up to date with target's
935          * FPSIMD regs, so that a short copyin leaves trailing registers
936          * unmodified.
937          */
938         fpsimd_sync_to_sve(target);
939         set_tsk_thread_flag(target, TIF_SVE);
940
941         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
942         start = SVE_PT_SVE_OFFSET;
943         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
944         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
945                                  target->thread.sve_state,
946                                  start, end);
947         if (ret)
948                 goto out;
949
950         start = end;
951         end = SVE_PT_SVE_FPSR_OFFSET(vq);
952         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
953                                         start, end);
954         if (ret)
955                 goto out;
956
957         /*
958          * Copy fpsr, and fpcr which must follow contiguously in
959          * struct fpsimd_state:
960          */
961         start = end;
962         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
963         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
964                                  &target->thread.uw.fpsimd_state.fpsr,
965                                  start, end);
966
967 out:
968         fpsimd_flush_task_state(target);
969         return ret;
970 }
971
972 #endif /* CONFIG_ARM64_SVE */
973
974 enum aarch64_regset {
975         REGSET_GPR,
976         REGSET_FPR,
977         REGSET_TLS,
978 #ifdef CONFIG_HAVE_HW_BREAKPOINT
979         REGSET_HW_BREAK,
980         REGSET_HW_WATCH,
981 #endif
982         REGSET_SYSTEM_CALL,
983 #ifdef CONFIG_ARM64_SVE
984         REGSET_SVE,
985 #endif
986 };
987
988 static const struct user_regset aarch64_regsets[] = {
989         [REGSET_GPR] = {
990                 .core_note_type = NT_PRSTATUS,
991                 .n = sizeof(struct user_pt_regs) / sizeof(u64),
992                 .size = sizeof(u64),
993                 .align = sizeof(u64),
994                 .get = gpr_get,
995                 .set = gpr_set
996         },
997         [REGSET_FPR] = {
998                 .core_note_type = NT_PRFPREG,
999                 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1000                 /*
1001                  * We pretend we have 32-bit registers because the fpsr and
1002                  * fpcr are 32-bits wide.
1003                  */
1004                 .size = sizeof(u32),
1005                 .align = sizeof(u32),
1006                 .active = fpr_active,
1007                 .get = fpr_get,
1008                 .set = fpr_set
1009         },
1010         [REGSET_TLS] = {
1011                 .core_note_type = NT_ARM_TLS,
1012                 .n = 1,
1013                 .size = sizeof(void *),
1014                 .align = sizeof(void *),
1015                 .get = tls_get,
1016                 .set = tls_set,
1017         },
1018 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1019         [REGSET_HW_BREAK] = {
1020                 .core_note_type = NT_ARM_HW_BREAK,
1021                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1022                 .size = sizeof(u32),
1023                 .align = sizeof(u32),
1024                 .get = hw_break_get,
1025                 .set = hw_break_set,
1026         },
1027         [REGSET_HW_WATCH] = {
1028                 .core_note_type = NT_ARM_HW_WATCH,
1029                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1030                 .size = sizeof(u32),
1031                 .align = sizeof(u32),
1032                 .get = hw_break_get,
1033                 .set = hw_break_set,
1034         },
1035 #endif
1036         [REGSET_SYSTEM_CALL] = {
1037                 .core_note_type = NT_ARM_SYSTEM_CALL,
1038                 .n = 1,
1039                 .size = sizeof(int),
1040                 .align = sizeof(int),
1041                 .get = system_call_get,
1042                 .set = system_call_set,
1043         },
1044 #ifdef CONFIG_ARM64_SVE
1045         [REGSET_SVE] = { /* Scalable Vector Extension */
1046                 .core_note_type = NT_ARM_SVE,
1047                 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1048                                   SVE_VQ_BYTES),
1049                 .size = SVE_VQ_BYTES,
1050                 .align = SVE_VQ_BYTES,
1051                 .get = sve_get,
1052                 .set = sve_set,
1053                 .get_size = sve_get_size,
1054         },
1055 #endif
1056 };
1057
1058 static const struct user_regset_view user_aarch64_view = {
1059         .name = "aarch64", .e_machine = EM_AARCH64,
1060         .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1061 };
1062
1063 #ifdef CONFIG_COMPAT
1064 enum compat_regset {
1065         REGSET_COMPAT_GPR,
1066         REGSET_COMPAT_VFP,
1067 };
1068
1069 static int compat_gpr_get(struct task_struct *target,
1070                           const struct user_regset *regset,
1071                           unsigned int pos, unsigned int count,
1072                           void *kbuf, void __user *ubuf)
1073 {
1074         int ret = 0;
1075         unsigned int i, start, num_regs;
1076
1077         /* Calculate the number of AArch32 registers contained in count */
1078         num_regs = count / regset->size;
1079
1080         /* Convert pos into an register number */
1081         start = pos / regset->size;
1082
1083         if (start + num_regs > regset->n)
1084                 return -EIO;
1085
1086         for (i = 0; i < num_regs; ++i) {
1087                 unsigned int idx = start + i;
1088                 compat_ulong_t reg;
1089
1090                 switch (idx) {
1091                 case 15:
1092                         reg = task_pt_regs(target)->pc;
1093                         break;
1094                 case 16:
1095                         reg = task_pt_regs(target)->pstate;
1096                         reg = pstate_to_compat_psr(reg);
1097                         break;
1098                 case 17:
1099                         reg = task_pt_regs(target)->orig_x0;
1100                         break;
1101                 default:
1102                         reg = task_pt_regs(target)->regs[idx];
1103                 }
1104
1105                 if (kbuf) {
1106                         memcpy(kbuf, &reg, sizeof(reg));
1107                         kbuf += sizeof(reg);
1108                 } else {
1109                         ret = copy_to_user(ubuf, &reg, sizeof(reg));
1110                         if (ret) {
1111                                 ret = -EFAULT;
1112                                 break;
1113                         }
1114
1115                         ubuf += sizeof(reg);
1116                 }
1117         }
1118
1119         return ret;
1120 }
1121
1122 static int compat_gpr_set(struct task_struct *target,
1123                           const struct user_regset *regset,
1124                           unsigned int pos, unsigned int count,
1125                           const void *kbuf, const void __user *ubuf)
1126 {
1127         struct pt_regs newregs;
1128         int ret = 0;
1129         unsigned int i, start, num_regs;
1130
1131         /* Calculate the number of AArch32 registers contained in count */
1132         num_regs = count / regset->size;
1133
1134         /* Convert pos into an register number */
1135         start = pos / regset->size;
1136
1137         if (start + num_regs > regset->n)
1138                 return -EIO;
1139
1140         newregs = *task_pt_regs(target);
1141
1142         for (i = 0; i < num_regs; ++i) {
1143                 unsigned int idx = start + i;
1144                 compat_ulong_t reg;
1145
1146                 if (kbuf) {
1147                         memcpy(&reg, kbuf, sizeof(reg));
1148                         kbuf += sizeof(reg);
1149                 } else {
1150                         ret = copy_from_user(&reg, ubuf, sizeof(reg));
1151                         if (ret) {
1152                                 ret = -EFAULT;
1153                                 break;
1154                         }
1155
1156                         ubuf += sizeof(reg);
1157                 }
1158
1159                 switch (idx) {
1160                 case 15:
1161                         newregs.pc = reg;
1162                         break;
1163                 case 16:
1164                         reg = compat_psr_to_pstate(reg);
1165                         newregs.pstate = reg;
1166                         break;
1167                 case 17:
1168                         newregs.orig_x0 = reg;
1169                         break;
1170                 default:
1171                         newregs.regs[idx] = reg;
1172                 }
1173
1174         }
1175
1176         if (valid_user_regs(&newregs.user_regs, target))
1177                 *task_pt_regs(target) = newregs;
1178         else
1179                 ret = -EINVAL;
1180
1181         return ret;
1182 }
1183
1184 static int compat_vfp_get(struct task_struct *target,
1185                           const struct user_regset *regset,
1186                           unsigned int pos, unsigned int count,
1187                           void *kbuf, void __user *ubuf)
1188 {
1189         struct user_fpsimd_state *uregs;
1190         compat_ulong_t fpscr;
1191         int ret, vregs_end_pos;
1192
1193         if (!system_supports_fpsimd())
1194                 return -EINVAL;
1195
1196         uregs = &target->thread.uw.fpsimd_state;
1197
1198         if (target == current)
1199                 fpsimd_preserve_current_state();
1200
1201         /*
1202          * The VFP registers are packed into the fpsimd_state, so they all sit
1203          * nicely together for us. We just need to create the fpscr separately.
1204          */
1205         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1206         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
1207                                   0, vregs_end_pos);
1208
1209         if (count && !ret) {
1210                 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1211                         (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1212
1213                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
1214                                           vregs_end_pos, VFP_STATE_SIZE);
1215         }
1216
1217         return ret;
1218 }
1219
1220 static int compat_vfp_set(struct task_struct *target,
1221                           const struct user_regset *regset,
1222                           unsigned int pos, unsigned int count,
1223                           const void *kbuf, const void __user *ubuf)
1224 {
1225         struct user_fpsimd_state *uregs;
1226         compat_ulong_t fpscr;
1227         int ret, vregs_end_pos;
1228
1229         if (!system_supports_fpsimd())
1230                 return -EINVAL;
1231
1232         uregs = &target->thread.uw.fpsimd_state;
1233
1234         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1235         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1236                                  vregs_end_pos);
1237
1238         if (count && !ret) {
1239                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1240                                          vregs_end_pos, VFP_STATE_SIZE);
1241                 if (!ret) {
1242                         uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1243                         uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1244                 }
1245         }
1246
1247         fpsimd_flush_task_state(target);
1248         return ret;
1249 }
1250
1251 static int compat_tls_get(struct task_struct *target,
1252                           const struct user_regset *regset, unsigned int pos,
1253                           unsigned int count, void *kbuf, void __user *ubuf)
1254 {
1255         compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value;
1256         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1257 }
1258
1259 static int compat_tls_set(struct task_struct *target,
1260                           const struct user_regset *regset, unsigned int pos,
1261                           unsigned int count, const void *kbuf,
1262                           const void __user *ubuf)
1263 {
1264         int ret;
1265         compat_ulong_t tls = target->thread.uw.tp_value;
1266
1267         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1268         if (ret)
1269                 return ret;
1270
1271         target->thread.uw.tp_value = tls;
1272         return ret;
1273 }
1274
1275 static const struct user_regset aarch32_regsets[] = {
1276         [REGSET_COMPAT_GPR] = {
1277                 .core_note_type = NT_PRSTATUS,
1278                 .n = COMPAT_ELF_NGREG,
1279                 .size = sizeof(compat_elf_greg_t),
1280                 .align = sizeof(compat_elf_greg_t),
1281                 .get = compat_gpr_get,
1282                 .set = compat_gpr_set
1283         },
1284         [REGSET_COMPAT_VFP] = {
1285                 .core_note_type = NT_ARM_VFP,
1286                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1287                 .size = sizeof(compat_ulong_t),
1288                 .align = sizeof(compat_ulong_t),
1289                 .active = fpr_active,
1290                 .get = compat_vfp_get,
1291                 .set = compat_vfp_set
1292         },
1293 };
1294
1295 static const struct user_regset_view user_aarch32_view = {
1296         .name = "aarch32", .e_machine = EM_ARM,
1297         .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1298 };
1299
1300 static const struct user_regset aarch32_ptrace_regsets[] = {
1301         [REGSET_GPR] = {
1302                 .core_note_type = NT_PRSTATUS,
1303                 .n = COMPAT_ELF_NGREG,
1304                 .size = sizeof(compat_elf_greg_t),
1305                 .align = sizeof(compat_elf_greg_t),
1306                 .get = compat_gpr_get,
1307                 .set = compat_gpr_set
1308         },
1309         [REGSET_FPR] = {
1310                 .core_note_type = NT_ARM_VFP,
1311                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1312                 .size = sizeof(compat_ulong_t),
1313                 .align = sizeof(compat_ulong_t),
1314                 .get = compat_vfp_get,
1315                 .set = compat_vfp_set
1316         },
1317         [REGSET_TLS] = {
1318                 .core_note_type = NT_ARM_TLS,
1319                 .n = 1,
1320                 .size = sizeof(compat_ulong_t),
1321                 .align = sizeof(compat_ulong_t),
1322                 .get = compat_tls_get,
1323                 .set = compat_tls_set,
1324         },
1325 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1326         [REGSET_HW_BREAK] = {
1327                 .core_note_type = NT_ARM_HW_BREAK,
1328                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1329                 .size = sizeof(u32),
1330                 .align = sizeof(u32),
1331                 .get = hw_break_get,
1332                 .set = hw_break_set,
1333         },
1334         [REGSET_HW_WATCH] = {
1335                 .core_note_type = NT_ARM_HW_WATCH,
1336                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1337                 .size = sizeof(u32),
1338                 .align = sizeof(u32),
1339                 .get = hw_break_get,
1340                 .set = hw_break_set,
1341         },
1342 #endif
1343         [REGSET_SYSTEM_CALL] = {
1344                 .core_note_type = NT_ARM_SYSTEM_CALL,
1345                 .n = 1,
1346                 .size = sizeof(int),
1347                 .align = sizeof(int),
1348                 .get = system_call_get,
1349                 .set = system_call_set,
1350         },
1351 };
1352
1353 static const struct user_regset_view user_aarch32_ptrace_view = {
1354         .name = "aarch32", .e_machine = EM_ARM,
1355         .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1356 };
1357
1358 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1359                                    compat_ulong_t __user *ret)
1360 {
1361         compat_ulong_t tmp;
1362
1363         if (off & 3)
1364                 return -EIO;
1365
1366         if (off == COMPAT_PT_TEXT_ADDR)
1367                 tmp = tsk->mm->start_code;
1368         else if (off == COMPAT_PT_DATA_ADDR)
1369                 tmp = tsk->mm->start_data;
1370         else if (off == COMPAT_PT_TEXT_END_ADDR)
1371                 tmp = tsk->mm->end_code;
1372         else if (off < sizeof(compat_elf_gregset_t))
1373                 return copy_regset_to_user(tsk, &user_aarch32_view,
1374                                            REGSET_COMPAT_GPR, off,
1375                                            sizeof(compat_ulong_t), ret);
1376         else if (off >= COMPAT_USER_SZ)
1377                 return -EIO;
1378         else
1379                 tmp = 0;
1380
1381         return put_user(tmp, ret);
1382 }
1383
1384 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1385                                     compat_ulong_t val)
1386 {
1387         int ret;
1388         mm_segment_t old_fs = get_fs();
1389
1390         if (off & 3 || off >= COMPAT_USER_SZ)
1391                 return -EIO;
1392
1393         if (off >= sizeof(compat_elf_gregset_t))
1394                 return 0;
1395
1396         set_fs(KERNEL_DS);
1397         ret = copy_regset_from_user(tsk, &user_aarch32_view,
1398                                     REGSET_COMPAT_GPR, off,
1399                                     sizeof(compat_ulong_t),
1400                                     &val);
1401         set_fs(old_fs);
1402
1403         return ret;
1404 }
1405
1406 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1407
1408 /*
1409  * Convert a virtual register number into an index for a thread_info
1410  * breakpoint array. Breakpoints are identified using positive numbers
1411  * whilst watchpoints are negative. The registers are laid out as pairs
1412  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1413  * Register 0 is reserved for describing resource information.
1414  */
1415 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1416 {
1417         return (abs(num) - 1) >> 1;
1418 }
1419
1420 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1421 {
1422         u8 num_brps, num_wrps, debug_arch, wp_len;
1423         u32 reg = 0;
1424
1425         num_brps        = hw_breakpoint_slots(TYPE_INST);
1426         num_wrps        = hw_breakpoint_slots(TYPE_DATA);
1427
1428         debug_arch      = debug_monitors_arch();
1429         wp_len          = 8;
1430         reg             |= debug_arch;
1431         reg             <<= 8;
1432         reg             |= wp_len;
1433         reg             <<= 8;
1434         reg             |= num_wrps;
1435         reg             <<= 8;
1436         reg             |= num_brps;
1437
1438         *kdata = reg;
1439         return 0;
1440 }
1441
1442 static int compat_ptrace_hbp_get(unsigned int note_type,
1443                                  struct task_struct *tsk,
1444                                  compat_long_t num,
1445                                  u32 *kdata)
1446 {
1447         u64 addr = 0;
1448         u32 ctrl = 0;
1449
1450         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1451
1452         if (num & 1) {
1453                 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1454                 *kdata = (u32)addr;
1455         } else {
1456                 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1457                 *kdata = ctrl;
1458         }
1459
1460         return err;
1461 }
1462
1463 static int compat_ptrace_hbp_set(unsigned int note_type,
1464                                  struct task_struct *tsk,
1465                                  compat_long_t num,
1466                                  u32 *kdata)
1467 {
1468         u64 addr;
1469         u32 ctrl;
1470
1471         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1472
1473         if (num & 1) {
1474                 addr = *kdata;
1475                 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1476         } else {
1477                 ctrl = *kdata;
1478                 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1479         }
1480
1481         return err;
1482 }
1483
1484 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1485                                     compat_ulong_t __user *data)
1486 {
1487         int ret;
1488         u32 kdata;
1489
1490         /* Watchpoint */
1491         if (num < 0) {
1492                 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1493         /* Resource info */
1494         } else if (num == 0) {
1495                 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1496         /* Breakpoint */
1497         } else {
1498                 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1499         }
1500
1501         if (!ret)
1502                 ret = put_user(kdata, data);
1503
1504         return ret;
1505 }
1506
1507 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1508                                     compat_ulong_t __user *data)
1509 {
1510         int ret;
1511         u32 kdata = 0;
1512
1513         if (num == 0)
1514                 return 0;
1515
1516         ret = get_user(kdata, data);
1517         if (ret)
1518                 return ret;
1519
1520         if (num < 0)
1521                 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1522         else
1523                 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1524
1525         return ret;
1526 }
1527 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
1528
1529 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1530                         compat_ulong_t caddr, compat_ulong_t cdata)
1531 {
1532         unsigned long addr = caddr;
1533         unsigned long data = cdata;
1534         void __user *datap = compat_ptr(data);
1535         int ret;
1536
1537         switch (request) {
1538                 case PTRACE_PEEKUSR:
1539                         ret = compat_ptrace_read_user(child, addr, datap);
1540                         break;
1541
1542                 case PTRACE_POKEUSR:
1543                         ret = compat_ptrace_write_user(child, addr, data);
1544                         break;
1545
1546                 case COMPAT_PTRACE_GETREGS:
1547                         ret = copy_regset_to_user(child,
1548                                                   &user_aarch32_view,
1549                                                   REGSET_COMPAT_GPR,
1550                                                   0, sizeof(compat_elf_gregset_t),
1551                                                   datap);
1552                         break;
1553
1554                 case COMPAT_PTRACE_SETREGS:
1555                         ret = copy_regset_from_user(child,
1556                                                     &user_aarch32_view,
1557                                                     REGSET_COMPAT_GPR,
1558                                                     0, sizeof(compat_elf_gregset_t),
1559                                                     datap);
1560                         break;
1561
1562                 case COMPAT_PTRACE_GET_THREAD_AREA:
1563                         ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1564                                        (compat_ulong_t __user *)datap);
1565                         break;
1566
1567                 case COMPAT_PTRACE_SET_SYSCALL:
1568                         task_pt_regs(child)->syscallno = data;
1569                         ret = 0;
1570                         break;
1571
1572                 case COMPAT_PTRACE_GETVFPREGS:
1573                         ret = copy_regset_to_user(child,
1574                                                   &user_aarch32_view,
1575                                                   REGSET_COMPAT_VFP,
1576                                                   0, VFP_STATE_SIZE,
1577                                                   datap);
1578                         break;
1579
1580                 case COMPAT_PTRACE_SETVFPREGS:
1581                         ret = copy_regset_from_user(child,
1582                                                     &user_aarch32_view,
1583                                                     REGSET_COMPAT_VFP,
1584                                                     0, VFP_STATE_SIZE,
1585                                                     datap);
1586                         break;
1587
1588 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1589                 case COMPAT_PTRACE_GETHBPREGS:
1590                         ret = compat_ptrace_gethbpregs(child, addr, datap);
1591                         break;
1592
1593                 case COMPAT_PTRACE_SETHBPREGS:
1594                         ret = compat_ptrace_sethbpregs(child, addr, datap);
1595                         break;
1596 #endif
1597
1598                 default:
1599                         ret = compat_ptrace_request(child, request, addr,
1600                                                     data);
1601                         break;
1602         }
1603
1604         return ret;
1605 }
1606 #endif /* CONFIG_COMPAT */
1607
1608 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1609 {
1610 #ifdef CONFIG_COMPAT
1611         /*
1612          * Core dumping of 32-bit tasks or compat ptrace requests must use the
1613          * user_aarch32_view compatible with arm32. Native ptrace requests on
1614          * 32-bit children use an extended user_aarch32_ptrace_view to allow
1615          * access to the TLS register.
1616          */
1617         if (is_compat_task())
1618                 return &user_aarch32_view;
1619         else if (is_compat_thread(task_thread_info(task)))
1620                 return &user_aarch32_ptrace_view;
1621 #endif
1622         return &user_aarch64_view;
1623 }
1624
1625 long arch_ptrace(struct task_struct *child, long request,
1626                  unsigned long addr, unsigned long data)
1627 {
1628         return ptrace_request(child, request, addr, data);
1629 }
1630
1631 enum ptrace_syscall_dir {
1632         PTRACE_SYSCALL_ENTER = 0,
1633         PTRACE_SYSCALL_EXIT,
1634 };
1635
1636 static void tracehook_report_syscall(struct pt_regs *regs,
1637                                      enum ptrace_syscall_dir dir)
1638 {
1639         int regno;
1640         unsigned long saved_reg;
1641
1642         /*
1643          * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1644          * used to denote syscall entry/exit:
1645          */
1646         regno = (is_compat_task() ? 12 : 7);
1647         saved_reg = regs->regs[regno];
1648         regs->regs[regno] = dir;
1649
1650         if (dir == PTRACE_SYSCALL_ENTER) {
1651                 if (tracehook_report_syscall_entry(regs))
1652                         forget_syscall(regs);
1653                 regs->regs[regno] = saved_reg;
1654         } else if (!test_thread_flag(TIF_SINGLESTEP)) {
1655                 tracehook_report_syscall_exit(regs, 0);
1656                 regs->regs[regno] = saved_reg;
1657         } else {
1658                 regs->regs[regno] = saved_reg;
1659
1660                 /*
1661                  * Signal a pseudo-step exception since we are stepping but
1662                  * tracer modifications to the registers may have rewound the
1663                  * state machine.
1664                  */
1665                 tracehook_report_syscall_exit(regs, 1);
1666         }
1667 }
1668
1669 int syscall_trace_enter(struct pt_regs *regs)
1670 {
1671         if (test_thread_flag(TIF_SYSCALL_TRACE))
1672                 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1673
1674         /* Do the secure computing after ptrace; failures should be fast. */
1675         if (secure_computing(NULL) == -1)
1676                 return -1;
1677
1678         if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1679                 trace_sys_enter(regs, regs->syscallno);
1680
1681         audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1682                             regs->regs[2], regs->regs[3]);
1683
1684         return regs->syscallno;
1685 }
1686
1687 void syscall_trace_exit(struct pt_regs *regs)
1688 {
1689         unsigned long flags = READ_ONCE(current_thread_info()->flags);
1690
1691         audit_syscall_exit(regs);
1692
1693         if (flags & _TIF_SYSCALL_TRACEPOINT)
1694                 trace_sys_exit(regs, regs_return_value(regs));
1695
1696         if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
1697                 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1698
1699         rseq_syscall(regs);
1700 }
1701
1702 /*
1703  * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1704  * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1705  * not described in ARM DDI 0487D.a.
1706  * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1707  * be allocated an EL0 meaning in future.
1708  * Userspace cannot use these until they have an architectural meaning.
1709  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1710  * We also reserve IL for the kernel; SS is handled dynamically.
1711  */
1712 #define SPSR_EL1_AARCH64_RES0_BITS \
1713         (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1714          GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1715 #define SPSR_EL1_AARCH32_RES0_BITS \
1716         (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1717
1718 static int valid_compat_regs(struct user_pt_regs *regs)
1719 {
1720         regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1721
1722         if (!system_supports_mixed_endian_el0()) {
1723                 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1724                         regs->pstate |= PSR_AA32_E_BIT;
1725                 else
1726                         regs->pstate &= ~PSR_AA32_E_BIT;
1727         }
1728
1729         if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1730             (regs->pstate & PSR_AA32_A_BIT) == 0 &&
1731             (regs->pstate & PSR_AA32_I_BIT) == 0 &&
1732             (regs->pstate & PSR_AA32_F_BIT) == 0) {
1733                 return 1;
1734         }
1735
1736         /*
1737          * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1738          * arch/arm.
1739          */
1740         regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
1741                         PSR_AA32_C_BIT | PSR_AA32_V_BIT |
1742                         PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
1743                         PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
1744                         PSR_AA32_T_BIT;
1745         regs->pstate |= PSR_MODE32_BIT;
1746
1747         return 0;
1748 }
1749
1750 static int valid_native_regs(struct user_pt_regs *regs)
1751 {
1752         regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1753
1754         if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1755             (regs->pstate & PSR_D_BIT) == 0 &&
1756             (regs->pstate & PSR_A_BIT) == 0 &&
1757             (regs->pstate & PSR_I_BIT) == 0 &&
1758             (regs->pstate & PSR_F_BIT) == 0) {
1759                 return 1;
1760         }
1761
1762         /* Force PSR to a valid 64-bit EL0t */
1763         regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1764
1765         return 0;
1766 }
1767
1768 /*
1769  * Are the current registers suitable for user mode? (used to maintain
1770  * security in signal handlers)
1771  */
1772 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1773 {
1774         /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
1775         user_regs_reset_single_step(regs, task);
1776
1777         if (is_compat_thread(task_thread_info(task)))
1778                 return valid_compat_regs(regs);
1779         else
1780                 return valid_native_regs(regs);
1781 }