GNU Linux-libre 5.10.217-gnu1
[releases.git] / arch / arm64 / kernel / ptrace.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/ptrace.c
4  *
5  * By Ross Biro 1/23/92
6  * edited by Linus Torvalds
7  * ARM modifications Copyright (C) 2000 Russell King
8  * Copyright (C) 2012 ARM Ltd.
9  */
10
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/mm.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/tracehook.h>
31 #include <linux/elf.h>
32
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
37 #include <asm/mte.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
46
47 struct pt_regs_offset {
48         const char *name;
49         int offset;
50 };
51
52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
54 #define GPR_OFFSET_NAME(r) \
55         {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
56
57 static const struct pt_regs_offset regoffset_table[] = {
58         GPR_OFFSET_NAME(0),
59         GPR_OFFSET_NAME(1),
60         GPR_OFFSET_NAME(2),
61         GPR_OFFSET_NAME(3),
62         GPR_OFFSET_NAME(4),
63         GPR_OFFSET_NAME(5),
64         GPR_OFFSET_NAME(6),
65         GPR_OFFSET_NAME(7),
66         GPR_OFFSET_NAME(8),
67         GPR_OFFSET_NAME(9),
68         GPR_OFFSET_NAME(10),
69         GPR_OFFSET_NAME(11),
70         GPR_OFFSET_NAME(12),
71         GPR_OFFSET_NAME(13),
72         GPR_OFFSET_NAME(14),
73         GPR_OFFSET_NAME(15),
74         GPR_OFFSET_NAME(16),
75         GPR_OFFSET_NAME(17),
76         GPR_OFFSET_NAME(18),
77         GPR_OFFSET_NAME(19),
78         GPR_OFFSET_NAME(20),
79         GPR_OFFSET_NAME(21),
80         GPR_OFFSET_NAME(22),
81         GPR_OFFSET_NAME(23),
82         GPR_OFFSET_NAME(24),
83         GPR_OFFSET_NAME(25),
84         GPR_OFFSET_NAME(26),
85         GPR_OFFSET_NAME(27),
86         GPR_OFFSET_NAME(28),
87         GPR_OFFSET_NAME(29),
88         GPR_OFFSET_NAME(30),
89         {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
90         REG_OFFSET_NAME(sp),
91         REG_OFFSET_NAME(pc),
92         REG_OFFSET_NAME(pstate),
93         REG_OFFSET_END,
94 };
95
96 /**
97  * regs_query_register_offset() - query register offset from its name
98  * @name:       the name of a register
99  *
100  * regs_query_register_offset() returns the offset of a register in struct
101  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
102  */
103 int regs_query_register_offset(const char *name)
104 {
105         const struct pt_regs_offset *roff;
106
107         for (roff = regoffset_table; roff->name != NULL; roff++)
108                 if (!strcmp(roff->name, name))
109                         return roff->offset;
110         return -EINVAL;
111 }
112
113 /**
114  * regs_within_kernel_stack() - check the address in the stack
115  * @regs:      pt_regs which contains kernel stack pointer.
116  * @addr:      address which is checked.
117  *
118  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
119  * If @addr is within the kernel stack, it returns true. If not, returns false.
120  */
121 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
122 {
123         return ((addr & ~(THREAD_SIZE - 1))  ==
124                 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
125                 on_irq_stack(addr, NULL);
126 }
127
128 /**
129  * regs_get_kernel_stack_nth() - get Nth entry of the stack
130  * @regs:       pt_regs which contains kernel stack pointer.
131  * @n:          stack entry number.
132  *
133  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
134  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
135  * this returns 0.
136  */
137 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
138 {
139         unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
140
141         addr += n;
142         if (regs_within_kernel_stack(regs, (unsigned long)addr))
143                 return *addr;
144         else
145                 return 0;
146 }
147
148 /*
149  * TODO: does not yet catch signals sent when the child dies.
150  * in exit.c or in signal.c.
151  */
152
153 /*
154  * Called by kernel/ptrace.c when detaching..
155  */
156 void ptrace_disable(struct task_struct *child)
157 {
158         /*
159          * This would be better off in core code, but PTRACE_DETACH has
160          * grown its fair share of arch-specific worts and changing it
161          * is likely to cause regressions on obscure architectures.
162          */
163         user_disable_single_step(child);
164 }
165
166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
167 /*
168  * Handle hitting a HW-breakpoint.
169  */
170 static void ptrace_hbptriggered(struct perf_event *bp,
171                                 struct perf_sample_data *data,
172                                 struct pt_regs *regs)
173 {
174         struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
175         const char *desc = "Hardware breakpoint trap (ptrace)";
176
177 #ifdef CONFIG_COMPAT
178         if (is_compat_task()) {
179                 int si_errno = 0;
180                 int i;
181
182                 for (i = 0; i < ARM_MAX_BRP; ++i) {
183                         if (current->thread.debug.hbp_break[i] == bp) {
184                                 si_errno = (i << 1) + 1;
185                                 break;
186                         }
187                 }
188
189                 for (i = 0; i < ARM_MAX_WRP; ++i) {
190                         if (current->thread.debug.hbp_watch[i] == bp) {
191                                 si_errno = -((i << 1) + 1);
192                                 break;
193                         }
194                 }
195                 arm64_force_sig_ptrace_errno_trap(si_errno,
196                                                   (void __user *)bkpt->trigger,
197                                                   desc);
198         }
199 #endif
200         arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
201                               (void __user *)(bkpt->trigger),
202                               desc);
203 }
204
205 /*
206  * Unregister breakpoints from this task and reset the pointers in
207  * the thread_struct.
208  */
209 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
210 {
211         int i;
212         struct thread_struct *t = &tsk->thread;
213
214         for (i = 0; i < ARM_MAX_BRP; i++) {
215                 if (t->debug.hbp_break[i]) {
216                         unregister_hw_breakpoint(t->debug.hbp_break[i]);
217                         t->debug.hbp_break[i] = NULL;
218                 }
219         }
220
221         for (i = 0; i < ARM_MAX_WRP; i++) {
222                 if (t->debug.hbp_watch[i]) {
223                         unregister_hw_breakpoint(t->debug.hbp_watch[i]);
224                         t->debug.hbp_watch[i] = NULL;
225                 }
226         }
227 }
228
229 void ptrace_hw_copy_thread(struct task_struct *tsk)
230 {
231         memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
232 }
233
234 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
235                                                struct task_struct *tsk,
236                                                unsigned long idx)
237 {
238         struct perf_event *bp = ERR_PTR(-EINVAL);
239
240         switch (note_type) {
241         case NT_ARM_HW_BREAK:
242                 if (idx >= ARM_MAX_BRP)
243                         goto out;
244                 idx = array_index_nospec(idx, ARM_MAX_BRP);
245                 bp = tsk->thread.debug.hbp_break[idx];
246                 break;
247         case NT_ARM_HW_WATCH:
248                 if (idx >= ARM_MAX_WRP)
249                         goto out;
250                 idx = array_index_nospec(idx, ARM_MAX_WRP);
251                 bp = tsk->thread.debug.hbp_watch[idx];
252                 break;
253         }
254
255 out:
256         return bp;
257 }
258
259 static int ptrace_hbp_set_event(unsigned int note_type,
260                                 struct task_struct *tsk,
261                                 unsigned long idx,
262                                 struct perf_event *bp)
263 {
264         int err = -EINVAL;
265
266         switch (note_type) {
267         case NT_ARM_HW_BREAK:
268                 if (idx >= ARM_MAX_BRP)
269                         goto out;
270                 idx = array_index_nospec(idx, ARM_MAX_BRP);
271                 tsk->thread.debug.hbp_break[idx] = bp;
272                 err = 0;
273                 break;
274         case NT_ARM_HW_WATCH:
275                 if (idx >= ARM_MAX_WRP)
276                         goto out;
277                 idx = array_index_nospec(idx, ARM_MAX_WRP);
278                 tsk->thread.debug.hbp_watch[idx] = bp;
279                 err = 0;
280                 break;
281         }
282
283 out:
284         return err;
285 }
286
287 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
288                                             struct task_struct *tsk,
289                                             unsigned long idx)
290 {
291         struct perf_event *bp;
292         struct perf_event_attr attr;
293         int err, type;
294
295         switch (note_type) {
296         case NT_ARM_HW_BREAK:
297                 type = HW_BREAKPOINT_X;
298                 break;
299         case NT_ARM_HW_WATCH:
300                 type = HW_BREAKPOINT_RW;
301                 break;
302         default:
303                 return ERR_PTR(-EINVAL);
304         }
305
306         ptrace_breakpoint_init(&attr);
307
308         /*
309          * Initialise fields to sane defaults
310          * (i.e. values that will pass validation).
311          */
312         attr.bp_addr    = 0;
313         attr.bp_len     = HW_BREAKPOINT_LEN_4;
314         attr.bp_type    = type;
315         attr.disabled   = 1;
316
317         bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
318         if (IS_ERR(bp))
319                 return bp;
320
321         err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
322         if (err)
323                 return ERR_PTR(err);
324
325         return bp;
326 }
327
328 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
329                                      struct arch_hw_breakpoint_ctrl ctrl,
330                                      struct perf_event_attr *attr)
331 {
332         int err, len, type, offset, disabled = !ctrl.enabled;
333
334         attr->disabled = disabled;
335         if (disabled)
336                 return 0;
337
338         err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
339         if (err)
340                 return err;
341
342         switch (note_type) {
343         case NT_ARM_HW_BREAK:
344                 if ((type & HW_BREAKPOINT_X) != type)
345                         return -EINVAL;
346                 break;
347         case NT_ARM_HW_WATCH:
348                 if ((type & HW_BREAKPOINT_RW) != type)
349                         return -EINVAL;
350                 break;
351         default:
352                 return -EINVAL;
353         }
354
355         attr->bp_len    = len;
356         attr->bp_type   = type;
357         attr->bp_addr   += offset;
358
359         return 0;
360 }
361
362 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
363 {
364         u8 num;
365         u32 reg = 0;
366
367         switch (note_type) {
368         case NT_ARM_HW_BREAK:
369                 num = hw_breakpoint_slots(TYPE_INST);
370                 break;
371         case NT_ARM_HW_WATCH:
372                 num = hw_breakpoint_slots(TYPE_DATA);
373                 break;
374         default:
375                 return -EINVAL;
376         }
377
378         reg |= debug_monitors_arch();
379         reg <<= 8;
380         reg |= num;
381
382         *info = reg;
383         return 0;
384 }
385
386 static int ptrace_hbp_get_ctrl(unsigned int note_type,
387                                struct task_struct *tsk,
388                                unsigned long idx,
389                                u32 *ctrl)
390 {
391         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
392
393         if (IS_ERR(bp))
394                 return PTR_ERR(bp);
395
396         *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
397         return 0;
398 }
399
400 static int ptrace_hbp_get_addr(unsigned int note_type,
401                                struct task_struct *tsk,
402                                unsigned long idx,
403                                u64 *addr)
404 {
405         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
406
407         if (IS_ERR(bp))
408                 return PTR_ERR(bp);
409
410         *addr = bp ? counter_arch_bp(bp)->address : 0;
411         return 0;
412 }
413
414 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
415                                                         struct task_struct *tsk,
416                                                         unsigned long idx)
417 {
418         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
419
420         if (!bp)
421                 bp = ptrace_hbp_create(note_type, tsk, idx);
422
423         return bp;
424 }
425
426 static int ptrace_hbp_set_ctrl(unsigned int note_type,
427                                struct task_struct *tsk,
428                                unsigned long idx,
429                                u32 uctrl)
430 {
431         int err;
432         struct perf_event *bp;
433         struct perf_event_attr attr;
434         struct arch_hw_breakpoint_ctrl ctrl;
435
436         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
437         if (IS_ERR(bp)) {
438                 err = PTR_ERR(bp);
439                 return err;
440         }
441
442         attr = bp->attr;
443         decode_ctrl_reg(uctrl, &ctrl);
444         err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
445         if (err)
446                 return err;
447
448         return modify_user_hw_breakpoint(bp, &attr);
449 }
450
451 static int ptrace_hbp_set_addr(unsigned int note_type,
452                                struct task_struct *tsk,
453                                unsigned long idx,
454                                u64 addr)
455 {
456         int err;
457         struct perf_event *bp;
458         struct perf_event_attr attr;
459
460         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
461         if (IS_ERR(bp)) {
462                 err = PTR_ERR(bp);
463                 return err;
464         }
465
466         attr = bp->attr;
467         attr.bp_addr = addr;
468         err = modify_user_hw_breakpoint(bp, &attr);
469         return err;
470 }
471
472 #define PTRACE_HBP_ADDR_SZ      sizeof(u64)
473 #define PTRACE_HBP_CTRL_SZ      sizeof(u32)
474 #define PTRACE_HBP_PAD_SZ       sizeof(u32)
475
476 static int hw_break_get(struct task_struct *target,
477                         const struct user_regset *regset,
478                         struct membuf to)
479 {
480         unsigned int note_type = regset->core_note_type;
481         int ret, idx = 0;
482         u32 info, ctrl;
483         u64 addr;
484
485         /* Resource info */
486         ret = ptrace_hbp_get_resource_info(note_type, &info);
487         if (ret)
488                 return ret;
489
490         membuf_write(&to, &info, sizeof(info));
491         membuf_zero(&to, sizeof(u32));
492         /* (address, ctrl) registers */
493         while (to.left) {
494                 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
495                 if (ret)
496                         return ret;
497                 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
498                 if (ret)
499                         return ret;
500                 membuf_store(&to, addr);
501                 membuf_store(&to, ctrl);
502                 membuf_zero(&to, sizeof(u32));
503                 idx++;
504         }
505         return 0;
506 }
507
508 static int hw_break_set(struct task_struct *target,
509                         const struct user_regset *regset,
510                         unsigned int pos, unsigned int count,
511                         const void *kbuf, const void __user *ubuf)
512 {
513         unsigned int note_type = regset->core_note_type;
514         int ret, idx = 0, offset, limit;
515         u32 ctrl;
516         u64 addr;
517
518         /* Resource info and pad */
519         offset = offsetof(struct user_hwdebug_state, dbg_regs);
520         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
521         if (ret)
522                 return ret;
523
524         /* (address, ctrl) registers */
525         limit = regset->n * regset->size;
526         while (count && offset < limit) {
527                 if (count < PTRACE_HBP_ADDR_SZ)
528                         return -EINVAL;
529                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
530                                          offset, offset + PTRACE_HBP_ADDR_SZ);
531                 if (ret)
532                         return ret;
533                 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
534                 if (ret)
535                         return ret;
536                 offset += PTRACE_HBP_ADDR_SZ;
537
538                 if (!count)
539                         break;
540                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
541                                          offset, offset + PTRACE_HBP_CTRL_SZ);
542                 if (ret)
543                         return ret;
544                 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
545                 if (ret)
546                         return ret;
547                 offset += PTRACE_HBP_CTRL_SZ;
548
549                 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
550                                                 offset,
551                                                 offset + PTRACE_HBP_PAD_SZ);
552                 if (ret)
553                         return ret;
554                 offset += PTRACE_HBP_PAD_SZ;
555                 idx++;
556         }
557
558         return 0;
559 }
560 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
561
562 static int gpr_get(struct task_struct *target,
563                    const struct user_regset *regset,
564                    struct membuf to)
565 {
566         struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
567         return membuf_write(&to, uregs, sizeof(*uregs));
568 }
569
570 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
571                    unsigned int pos, unsigned int count,
572                    const void *kbuf, const void __user *ubuf)
573 {
574         int ret;
575         struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
576
577         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
578         if (ret)
579                 return ret;
580
581         if (!valid_user_regs(&newregs, target))
582                 return -EINVAL;
583
584         task_pt_regs(target)->user_regs = newregs;
585         return 0;
586 }
587
588 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
589 {
590         if (!system_supports_fpsimd())
591                 return -ENODEV;
592         return regset->n;
593 }
594
595 /*
596  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
597  */
598 static int __fpr_get(struct task_struct *target,
599                      const struct user_regset *regset,
600                      struct membuf to)
601 {
602         struct user_fpsimd_state *uregs;
603
604         sve_sync_to_fpsimd(target);
605
606         uregs = &target->thread.uw.fpsimd_state;
607
608         return membuf_write(&to, uregs, sizeof(*uregs));
609 }
610
611 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
612                    struct membuf to)
613 {
614         if (!system_supports_fpsimd())
615                 return -EINVAL;
616
617         if (target == current)
618                 fpsimd_preserve_current_state();
619
620         return __fpr_get(target, regset, to);
621 }
622
623 static int __fpr_set(struct task_struct *target,
624                      const struct user_regset *regset,
625                      unsigned int pos, unsigned int count,
626                      const void *kbuf, const void __user *ubuf,
627                      unsigned int start_pos)
628 {
629         int ret;
630         struct user_fpsimd_state newstate;
631
632         /*
633          * Ensure target->thread.uw.fpsimd_state is up to date, so that a
634          * short copyin can't resurrect stale data.
635          */
636         sve_sync_to_fpsimd(target);
637
638         newstate = target->thread.uw.fpsimd_state;
639
640         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
641                                  start_pos, start_pos + sizeof(newstate));
642         if (ret)
643                 return ret;
644
645         target->thread.uw.fpsimd_state = newstate;
646
647         return ret;
648 }
649
650 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
651                    unsigned int pos, unsigned int count,
652                    const void *kbuf, const void __user *ubuf)
653 {
654         int ret;
655
656         if (!system_supports_fpsimd())
657                 return -EINVAL;
658
659         ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
660         if (ret)
661                 return ret;
662
663         sve_sync_from_fpsimd_zeropad(target);
664         fpsimd_flush_task_state(target);
665
666         return ret;
667 }
668
669 static int tls_get(struct task_struct *target, const struct user_regset *regset,
670                    struct membuf to)
671 {
672         if (target == current)
673                 tls_preserve_current_state();
674
675         return membuf_store(&to, target->thread.uw.tp_value);
676 }
677
678 static int tls_set(struct task_struct *target, const struct user_regset *regset,
679                    unsigned int pos, unsigned int count,
680                    const void *kbuf, const void __user *ubuf)
681 {
682         int ret;
683         unsigned long tls = target->thread.uw.tp_value;
684
685         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
686         if (ret)
687                 return ret;
688
689         target->thread.uw.tp_value = tls;
690         return ret;
691 }
692
693 static int system_call_get(struct task_struct *target,
694                            const struct user_regset *regset,
695                            struct membuf to)
696 {
697         return membuf_store(&to, task_pt_regs(target)->syscallno);
698 }
699
700 static int system_call_set(struct task_struct *target,
701                            const struct user_regset *regset,
702                            unsigned int pos, unsigned int count,
703                            const void *kbuf, const void __user *ubuf)
704 {
705         int syscallno = task_pt_regs(target)->syscallno;
706         int ret;
707
708         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
709         if (ret)
710                 return ret;
711
712         task_pt_regs(target)->syscallno = syscallno;
713         return ret;
714 }
715
716 #ifdef CONFIG_ARM64_SVE
717
718 static void sve_init_header_from_task(struct user_sve_header *header,
719                                       struct task_struct *target)
720 {
721         unsigned int vq;
722
723         memset(header, 0, sizeof(*header));
724
725         header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
726                 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
727         if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
728                 header->flags |= SVE_PT_VL_INHERIT;
729
730         header->vl = target->thread.sve_vl;
731         vq = sve_vq_from_vl(header->vl);
732
733         header->max_vl = sve_max_vl;
734         header->size = SVE_PT_SIZE(vq, header->flags);
735         header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
736                                       SVE_PT_REGS_SVE);
737 }
738
739 static unsigned int sve_size_from_header(struct user_sve_header const *header)
740 {
741         return ALIGN(header->size, SVE_VQ_BYTES);
742 }
743
744 static int sve_get(struct task_struct *target,
745                    const struct user_regset *regset,
746                    struct membuf to)
747 {
748         struct user_sve_header header;
749         unsigned int vq;
750         unsigned long start, end;
751
752         if (!system_supports_sve())
753                 return -EINVAL;
754
755         /* Header */
756         sve_init_header_from_task(&header, target);
757         vq = sve_vq_from_vl(header.vl);
758
759         membuf_write(&to, &header, sizeof(header));
760
761         if (target == current)
762                 fpsimd_preserve_current_state();
763
764         /* Registers: FPSIMD-only case */
765
766         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
767         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
768                 return __fpr_get(target, regset, to);
769
770         /* Otherwise: full SVE case */
771
772         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
773         start = SVE_PT_SVE_OFFSET;
774         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
775         membuf_write(&to, target->thread.sve_state, end - start);
776
777         start = end;
778         end = SVE_PT_SVE_FPSR_OFFSET(vq);
779         membuf_zero(&to, end - start);
780
781         /*
782          * Copy fpsr, and fpcr which must follow contiguously in
783          * struct fpsimd_state:
784          */
785         start = end;
786         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
787         membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start);
788
789         start = end;
790         end = sve_size_from_header(&header);
791         return membuf_zero(&to, end - start);
792 }
793
794 static int sve_set(struct task_struct *target,
795                    const struct user_regset *regset,
796                    unsigned int pos, unsigned int count,
797                    const void *kbuf, const void __user *ubuf)
798 {
799         int ret;
800         struct user_sve_header header;
801         unsigned int vq;
802         unsigned long start, end;
803
804         if (!system_supports_sve())
805                 return -EINVAL;
806
807         /* Header */
808         if (count < sizeof(header))
809                 return -EINVAL;
810         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
811                                  0, sizeof(header));
812         if (ret)
813                 goto out;
814
815         /*
816          * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
817          * sve_set_vector_length(), which will also validate them for us:
818          */
819         ret = sve_set_vector_length(target, header.vl,
820                 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
821         if (ret)
822                 goto out;
823
824         /* Actual VL set may be less than the user asked for: */
825         vq = sve_vq_from_vl(target->thread.sve_vl);
826
827         /* Registers: FPSIMD-only case */
828
829         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
830         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
831                 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
832                                 SVE_PT_FPSIMD_OFFSET);
833                 clear_tsk_thread_flag(target, TIF_SVE);
834                 goto out;
835         }
836
837         /* Otherwise: full SVE case */
838
839         /*
840          * If setting a different VL from the requested VL and there is
841          * register data, the data layout will be wrong: don't even
842          * try to set the registers in this case.
843          */
844         if (count && vq != sve_vq_from_vl(header.vl)) {
845                 ret = -EIO;
846                 goto out;
847         }
848
849         sve_alloc(target);
850
851         /*
852          * Ensure target->thread.sve_state is up to date with target's
853          * FPSIMD regs, so that a short copyin leaves trailing registers
854          * unmodified.
855          */
856         fpsimd_sync_to_sve(target);
857         set_tsk_thread_flag(target, TIF_SVE);
858
859         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
860         start = SVE_PT_SVE_OFFSET;
861         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
862         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
863                                  target->thread.sve_state,
864                                  start, end);
865         if (ret)
866                 goto out;
867
868         start = end;
869         end = SVE_PT_SVE_FPSR_OFFSET(vq);
870         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
871                                         start, end);
872         if (ret)
873                 goto out;
874
875         /*
876          * Copy fpsr, and fpcr which must follow contiguously in
877          * struct fpsimd_state:
878          */
879         start = end;
880         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
881         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
882                                  &target->thread.uw.fpsimd_state.fpsr,
883                                  start, end);
884
885 out:
886         fpsimd_flush_task_state(target);
887         return ret;
888 }
889
890 #endif /* CONFIG_ARM64_SVE */
891
892 #ifdef CONFIG_ARM64_PTR_AUTH
893 static int pac_mask_get(struct task_struct *target,
894                         const struct user_regset *regset,
895                         struct membuf to)
896 {
897         /*
898          * The PAC bits can differ across data and instruction pointers
899          * depending on TCR_EL1.TBID*, which we may make use of in future, so
900          * we expose separate masks.
901          */
902         unsigned long mask = ptrauth_user_pac_mask();
903         struct user_pac_mask uregs = {
904                 .data_mask = mask,
905                 .insn_mask = mask,
906         };
907
908         if (!system_supports_address_auth())
909                 return -EINVAL;
910
911         return membuf_write(&to, &uregs, sizeof(uregs));
912 }
913
914 #ifdef CONFIG_CHECKPOINT_RESTORE
915 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
916 {
917         return (__uint128_t)key->hi << 64 | key->lo;
918 }
919
920 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
921 {
922         struct ptrauth_key key = {
923                 .lo = (unsigned long)ukey,
924                 .hi = (unsigned long)(ukey >> 64),
925         };
926
927         return key;
928 }
929
930 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
931                                      const struct ptrauth_keys_user *keys)
932 {
933         ukeys->apiakey = pac_key_to_user(&keys->apia);
934         ukeys->apibkey = pac_key_to_user(&keys->apib);
935         ukeys->apdakey = pac_key_to_user(&keys->apda);
936         ukeys->apdbkey = pac_key_to_user(&keys->apdb);
937 }
938
939 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
940                                        const struct user_pac_address_keys *ukeys)
941 {
942         keys->apia = pac_key_from_user(ukeys->apiakey);
943         keys->apib = pac_key_from_user(ukeys->apibkey);
944         keys->apda = pac_key_from_user(ukeys->apdakey);
945         keys->apdb = pac_key_from_user(ukeys->apdbkey);
946 }
947
948 static int pac_address_keys_get(struct task_struct *target,
949                                 const struct user_regset *regset,
950                                 struct membuf to)
951 {
952         struct ptrauth_keys_user *keys = &target->thread.keys_user;
953         struct user_pac_address_keys user_keys;
954
955         if (!system_supports_address_auth())
956                 return -EINVAL;
957
958         pac_address_keys_to_user(&user_keys, keys);
959
960         return membuf_write(&to, &user_keys, sizeof(user_keys));
961 }
962
963 static int pac_address_keys_set(struct task_struct *target,
964                                 const struct user_regset *regset,
965                                 unsigned int pos, unsigned int count,
966                                 const void *kbuf, const void __user *ubuf)
967 {
968         struct ptrauth_keys_user *keys = &target->thread.keys_user;
969         struct user_pac_address_keys user_keys;
970         int ret;
971
972         if (!system_supports_address_auth())
973                 return -EINVAL;
974
975         pac_address_keys_to_user(&user_keys, keys);
976         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
977                                  &user_keys, 0, -1);
978         if (ret)
979                 return ret;
980         pac_address_keys_from_user(keys, &user_keys);
981
982         return 0;
983 }
984
985 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
986                                      const struct ptrauth_keys_user *keys)
987 {
988         ukeys->apgakey = pac_key_to_user(&keys->apga);
989 }
990
991 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
992                                        const struct user_pac_generic_keys *ukeys)
993 {
994         keys->apga = pac_key_from_user(ukeys->apgakey);
995 }
996
997 static int pac_generic_keys_get(struct task_struct *target,
998                                 const struct user_regset *regset,
999                                 struct membuf to)
1000 {
1001         struct ptrauth_keys_user *keys = &target->thread.keys_user;
1002         struct user_pac_generic_keys user_keys;
1003
1004         if (!system_supports_generic_auth())
1005                 return -EINVAL;
1006
1007         pac_generic_keys_to_user(&user_keys, keys);
1008
1009         return membuf_write(&to, &user_keys, sizeof(user_keys));
1010 }
1011
1012 static int pac_generic_keys_set(struct task_struct *target,
1013                                 const struct user_regset *regset,
1014                                 unsigned int pos, unsigned int count,
1015                                 const void *kbuf, const void __user *ubuf)
1016 {
1017         struct ptrauth_keys_user *keys = &target->thread.keys_user;
1018         struct user_pac_generic_keys user_keys;
1019         int ret;
1020
1021         if (!system_supports_generic_auth())
1022                 return -EINVAL;
1023
1024         pac_generic_keys_to_user(&user_keys, keys);
1025         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1026                                  &user_keys, 0, -1);
1027         if (ret)
1028                 return ret;
1029         pac_generic_keys_from_user(keys, &user_keys);
1030
1031         return 0;
1032 }
1033 #endif /* CONFIG_CHECKPOINT_RESTORE */
1034 #endif /* CONFIG_ARM64_PTR_AUTH */
1035
1036 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1037 static int tagged_addr_ctrl_get(struct task_struct *target,
1038                                 const struct user_regset *regset,
1039                                 struct membuf to)
1040 {
1041         long ctrl = get_tagged_addr_ctrl(target);
1042
1043         if (IS_ERR_VALUE(ctrl))
1044                 return ctrl;
1045
1046         return membuf_write(&to, &ctrl, sizeof(ctrl));
1047 }
1048
1049 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1050                                 user_regset *regset, unsigned int pos,
1051                                 unsigned int count, const void *kbuf, const
1052                                 void __user *ubuf)
1053 {
1054         int ret;
1055         long ctrl;
1056
1057         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1058         if (ret)
1059                 return ret;
1060
1061         return set_tagged_addr_ctrl(target, ctrl);
1062 }
1063 #endif
1064
1065 enum aarch64_regset {
1066         REGSET_GPR,
1067         REGSET_FPR,
1068         REGSET_TLS,
1069 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1070         REGSET_HW_BREAK,
1071         REGSET_HW_WATCH,
1072 #endif
1073         REGSET_SYSTEM_CALL,
1074 #ifdef CONFIG_ARM64_SVE
1075         REGSET_SVE,
1076 #endif
1077 #ifdef CONFIG_ARM64_PTR_AUTH
1078         REGSET_PAC_MASK,
1079 #ifdef CONFIG_CHECKPOINT_RESTORE
1080         REGSET_PACA_KEYS,
1081         REGSET_PACG_KEYS,
1082 #endif
1083 #endif
1084 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1085         REGSET_TAGGED_ADDR_CTRL,
1086 #endif
1087 };
1088
1089 static const struct user_regset aarch64_regsets[] = {
1090         [REGSET_GPR] = {
1091                 .core_note_type = NT_PRSTATUS,
1092                 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1093                 .size = sizeof(u64),
1094                 .align = sizeof(u64),
1095                 .regset_get = gpr_get,
1096                 .set = gpr_set
1097         },
1098         [REGSET_FPR] = {
1099                 .core_note_type = NT_PRFPREG,
1100                 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1101                 /*
1102                  * We pretend we have 32-bit registers because the fpsr and
1103                  * fpcr are 32-bits wide.
1104                  */
1105                 .size = sizeof(u32),
1106                 .align = sizeof(u32),
1107                 .active = fpr_active,
1108                 .regset_get = fpr_get,
1109                 .set = fpr_set
1110         },
1111         [REGSET_TLS] = {
1112                 .core_note_type = NT_ARM_TLS,
1113                 .n = 1,
1114                 .size = sizeof(void *),
1115                 .align = sizeof(void *),
1116                 .regset_get = tls_get,
1117                 .set = tls_set,
1118         },
1119 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1120         [REGSET_HW_BREAK] = {
1121                 .core_note_type = NT_ARM_HW_BREAK,
1122                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1123                 .size = sizeof(u32),
1124                 .align = sizeof(u32),
1125                 .regset_get = hw_break_get,
1126                 .set = hw_break_set,
1127         },
1128         [REGSET_HW_WATCH] = {
1129                 .core_note_type = NT_ARM_HW_WATCH,
1130                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1131                 .size = sizeof(u32),
1132                 .align = sizeof(u32),
1133                 .regset_get = hw_break_get,
1134                 .set = hw_break_set,
1135         },
1136 #endif
1137         [REGSET_SYSTEM_CALL] = {
1138                 .core_note_type = NT_ARM_SYSTEM_CALL,
1139                 .n = 1,
1140                 .size = sizeof(int),
1141                 .align = sizeof(int),
1142                 .regset_get = system_call_get,
1143                 .set = system_call_set,
1144         },
1145 #ifdef CONFIG_ARM64_SVE
1146         [REGSET_SVE] = { /* Scalable Vector Extension */
1147                 .core_note_type = NT_ARM_SVE,
1148                 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1149                                   SVE_VQ_BYTES),
1150                 .size = SVE_VQ_BYTES,
1151                 .align = SVE_VQ_BYTES,
1152                 .regset_get = sve_get,
1153                 .set = sve_set,
1154         },
1155 #endif
1156 #ifdef CONFIG_ARM64_PTR_AUTH
1157         [REGSET_PAC_MASK] = {
1158                 .core_note_type = NT_ARM_PAC_MASK,
1159                 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1160                 .size = sizeof(u64),
1161                 .align = sizeof(u64),
1162                 .regset_get = pac_mask_get,
1163                 /* this cannot be set dynamically */
1164         },
1165 #ifdef CONFIG_CHECKPOINT_RESTORE
1166         [REGSET_PACA_KEYS] = {
1167                 .core_note_type = NT_ARM_PACA_KEYS,
1168                 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1169                 .size = sizeof(__uint128_t),
1170                 .align = sizeof(__uint128_t),
1171                 .regset_get = pac_address_keys_get,
1172                 .set = pac_address_keys_set,
1173         },
1174         [REGSET_PACG_KEYS] = {
1175                 .core_note_type = NT_ARM_PACG_KEYS,
1176                 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1177                 .size = sizeof(__uint128_t),
1178                 .align = sizeof(__uint128_t),
1179                 .regset_get = pac_generic_keys_get,
1180                 .set = pac_generic_keys_set,
1181         },
1182 #endif
1183 #endif
1184 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1185         [REGSET_TAGGED_ADDR_CTRL] = {
1186                 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1187                 .n = 1,
1188                 .size = sizeof(long),
1189                 .align = sizeof(long),
1190                 .regset_get = tagged_addr_ctrl_get,
1191                 .set = tagged_addr_ctrl_set,
1192         },
1193 #endif
1194 };
1195
1196 static const struct user_regset_view user_aarch64_view = {
1197         .name = "aarch64", .e_machine = EM_AARCH64,
1198         .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1199 };
1200
1201 #ifdef CONFIG_COMPAT
1202 enum compat_regset {
1203         REGSET_COMPAT_GPR,
1204         REGSET_COMPAT_VFP,
1205 };
1206
1207 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1208 {
1209         struct pt_regs *regs = task_pt_regs(task);
1210
1211         switch (idx) {
1212         case 15:
1213                 return regs->pc;
1214         case 16:
1215                 return pstate_to_compat_psr(regs->pstate);
1216         case 17:
1217                 return regs->orig_x0;
1218         default:
1219                 return regs->regs[idx];
1220         }
1221 }
1222
1223 static int compat_gpr_get(struct task_struct *target,
1224                           const struct user_regset *regset,
1225                           struct membuf to)
1226 {
1227         int i = 0;
1228
1229         while (to.left)
1230                 membuf_store(&to, compat_get_user_reg(target, i++));
1231         return 0;
1232 }
1233
1234 static int compat_gpr_set(struct task_struct *target,
1235                           const struct user_regset *regset,
1236                           unsigned int pos, unsigned int count,
1237                           const void *kbuf, const void __user *ubuf)
1238 {
1239         struct pt_regs newregs;
1240         int ret = 0;
1241         unsigned int i, start, num_regs;
1242
1243         /* Calculate the number of AArch32 registers contained in count */
1244         num_regs = count / regset->size;
1245
1246         /* Convert pos into an register number */
1247         start = pos / regset->size;
1248
1249         if (start + num_regs > regset->n)
1250                 return -EIO;
1251
1252         newregs = *task_pt_regs(target);
1253
1254         for (i = 0; i < num_regs; ++i) {
1255                 unsigned int idx = start + i;
1256                 compat_ulong_t reg;
1257
1258                 if (kbuf) {
1259                         memcpy(&reg, kbuf, sizeof(reg));
1260                         kbuf += sizeof(reg);
1261                 } else {
1262                         ret = copy_from_user(&reg, ubuf, sizeof(reg));
1263                         if (ret) {
1264                                 ret = -EFAULT;
1265                                 break;
1266                         }
1267
1268                         ubuf += sizeof(reg);
1269                 }
1270
1271                 switch (idx) {
1272                 case 15:
1273                         newregs.pc = reg;
1274                         break;
1275                 case 16:
1276                         reg = compat_psr_to_pstate(reg);
1277                         newregs.pstate = reg;
1278                         break;
1279                 case 17:
1280                         newregs.orig_x0 = reg;
1281                         break;
1282                 default:
1283                         newregs.regs[idx] = reg;
1284                 }
1285
1286         }
1287
1288         if (valid_user_regs(&newregs.user_regs, target))
1289                 *task_pt_regs(target) = newregs;
1290         else
1291                 ret = -EINVAL;
1292
1293         return ret;
1294 }
1295
1296 static int compat_vfp_get(struct task_struct *target,
1297                           const struct user_regset *regset,
1298                           struct membuf to)
1299 {
1300         struct user_fpsimd_state *uregs;
1301         compat_ulong_t fpscr;
1302
1303         if (!system_supports_fpsimd())
1304                 return -EINVAL;
1305
1306         uregs = &target->thread.uw.fpsimd_state;
1307
1308         if (target == current)
1309                 fpsimd_preserve_current_state();
1310
1311         /*
1312          * The VFP registers are packed into the fpsimd_state, so they all sit
1313          * nicely together for us. We just need to create the fpscr separately.
1314          */
1315         membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1316         fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1317                 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1318         return membuf_store(&to, fpscr);
1319 }
1320
1321 static int compat_vfp_set(struct task_struct *target,
1322                           const struct user_regset *regset,
1323                           unsigned int pos, unsigned int count,
1324                           const void *kbuf, const void __user *ubuf)
1325 {
1326         struct user_fpsimd_state *uregs;
1327         compat_ulong_t fpscr;
1328         int ret, vregs_end_pos;
1329
1330         if (!system_supports_fpsimd())
1331                 return -EINVAL;
1332
1333         uregs = &target->thread.uw.fpsimd_state;
1334
1335         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1336         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1337                                  vregs_end_pos);
1338
1339         if (count && !ret) {
1340                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1341                                          vregs_end_pos, VFP_STATE_SIZE);
1342                 if (!ret) {
1343                         uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1344                         uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1345                 }
1346         }
1347
1348         fpsimd_flush_task_state(target);
1349         return ret;
1350 }
1351
1352 static int compat_tls_get(struct task_struct *target,
1353                           const struct user_regset *regset,
1354                           struct membuf to)
1355 {
1356         return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1357 }
1358
1359 static int compat_tls_set(struct task_struct *target,
1360                           const struct user_regset *regset, unsigned int pos,
1361                           unsigned int count, const void *kbuf,
1362                           const void __user *ubuf)
1363 {
1364         int ret;
1365         compat_ulong_t tls = target->thread.uw.tp_value;
1366
1367         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1368         if (ret)
1369                 return ret;
1370
1371         target->thread.uw.tp_value = tls;
1372         return ret;
1373 }
1374
1375 static const struct user_regset aarch32_regsets[] = {
1376         [REGSET_COMPAT_GPR] = {
1377                 .core_note_type = NT_PRSTATUS,
1378                 .n = COMPAT_ELF_NGREG,
1379                 .size = sizeof(compat_elf_greg_t),
1380                 .align = sizeof(compat_elf_greg_t),
1381                 .regset_get = compat_gpr_get,
1382                 .set = compat_gpr_set
1383         },
1384         [REGSET_COMPAT_VFP] = {
1385                 .core_note_type = NT_ARM_VFP,
1386                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1387                 .size = sizeof(compat_ulong_t),
1388                 .align = sizeof(compat_ulong_t),
1389                 .active = fpr_active,
1390                 .regset_get = compat_vfp_get,
1391                 .set = compat_vfp_set
1392         },
1393 };
1394
1395 static const struct user_regset_view user_aarch32_view = {
1396         .name = "aarch32", .e_machine = EM_ARM,
1397         .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1398 };
1399
1400 static const struct user_regset aarch32_ptrace_regsets[] = {
1401         [REGSET_GPR] = {
1402                 .core_note_type = NT_PRSTATUS,
1403                 .n = COMPAT_ELF_NGREG,
1404                 .size = sizeof(compat_elf_greg_t),
1405                 .align = sizeof(compat_elf_greg_t),
1406                 .regset_get = compat_gpr_get,
1407                 .set = compat_gpr_set
1408         },
1409         [REGSET_FPR] = {
1410                 .core_note_type = NT_ARM_VFP,
1411                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1412                 .size = sizeof(compat_ulong_t),
1413                 .align = sizeof(compat_ulong_t),
1414                 .regset_get = compat_vfp_get,
1415                 .set = compat_vfp_set
1416         },
1417         [REGSET_TLS] = {
1418                 .core_note_type = NT_ARM_TLS,
1419                 .n = 1,
1420                 .size = sizeof(compat_ulong_t),
1421                 .align = sizeof(compat_ulong_t),
1422                 .regset_get = compat_tls_get,
1423                 .set = compat_tls_set,
1424         },
1425 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1426         [REGSET_HW_BREAK] = {
1427                 .core_note_type = NT_ARM_HW_BREAK,
1428                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1429                 .size = sizeof(u32),
1430                 .align = sizeof(u32),
1431                 .regset_get = hw_break_get,
1432                 .set = hw_break_set,
1433         },
1434         [REGSET_HW_WATCH] = {
1435                 .core_note_type = NT_ARM_HW_WATCH,
1436                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1437                 .size = sizeof(u32),
1438                 .align = sizeof(u32),
1439                 .regset_get = hw_break_get,
1440                 .set = hw_break_set,
1441         },
1442 #endif
1443         [REGSET_SYSTEM_CALL] = {
1444                 .core_note_type = NT_ARM_SYSTEM_CALL,
1445                 .n = 1,
1446                 .size = sizeof(int),
1447                 .align = sizeof(int),
1448                 .regset_get = system_call_get,
1449                 .set = system_call_set,
1450         },
1451 };
1452
1453 static const struct user_regset_view user_aarch32_ptrace_view = {
1454         .name = "aarch32", .e_machine = EM_ARM,
1455         .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1456 };
1457
1458 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1459                                    compat_ulong_t __user *ret)
1460 {
1461         compat_ulong_t tmp;
1462
1463         if (off & 3)
1464                 return -EIO;
1465
1466         if (off == COMPAT_PT_TEXT_ADDR)
1467                 tmp = tsk->mm->start_code;
1468         else if (off == COMPAT_PT_DATA_ADDR)
1469                 tmp = tsk->mm->start_data;
1470         else if (off == COMPAT_PT_TEXT_END_ADDR)
1471                 tmp = tsk->mm->end_code;
1472         else if (off < sizeof(compat_elf_gregset_t))
1473                 tmp = compat_get_user_reg(tsk, off >> 2);
1474         else if (off >= COMPAT_USER_SZ)
1475                 return -EIO;
1476         else
1477                 tmp = 0;
1478
1479         return put_user(tmp, ret);
1480 }
1481
1482 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1483                                     compat_ulong_t val)
1484 {
1485         struct pt_regs newregs = *task_pt_regs(tsk);
1486         unsigned int idx = off / 4;
1487
1488         if (off & 3 || off >= COMPAT_USER_SZ)
1489                 return -EIO;
1490
1491         if (off >= sizeof(compat_elf_gregset_t))
1492                 return 0;
1493
1494         switch (idx) {
1495         case 15:
1496                 newregs.pc = val;
1497                 break;
1498         case 16:
1499                 newregs.pstate = compat_psr_to_pstate(val);
1500                 break;
1501         case 17:
1502                 newregs.orig_x0 = val;
1503                 break;
1504         default:
1505                 newregs.regs[idx] = val;
1506         }
1507
1508         if (!valid_user_regs(&newregs.user_regs, tsk))
1509                 return -EINVAL;
1510
1511         *task_pt_regs(tsk) = newregs;
1512         return 0;
1513 }
1514
1515 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1516
1517 /*
1518  * Convert a virtual register number into an index for a thread_info
1519  * breakpoint array. Breakpoints are identified using positive numbers
1520  * whilst watchpoints are negative. The registers are laid out as pairs
1521  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1522  * Register 0 is reserved for describing resource information.
1523  */
1524 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1525 {
1526         return (abs(num) - 1) >> 1;
1527 }
1528
1529 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1530 {
1531         u8 num_brps, num_wrps, debug_arch, wp_len;
1532         u32 reg = 0;
1533
1534         num_brps        = hw_breakpoint_slots(TYPE_INST);
1535         num_wrps        = hw_breakpoint_slots(TYPE_DATA);
1536
1537         debug_arch      = debug_monitors_arch();
1538         wp_len          = 8;
1539         reg             |= debug_arch;
1540         reg             <<= 8;
1541         reg             |= wp_len;
1542         reg             <<= 8;
1543         reg             |= num_wrps;
1544         reg             <<= 8;
1545         reg             |= num_brps;
1546
1547         *kdata = reg;
1548         return 0;
1549 }
1550
1551 static int compat_ptrace_hbp_get(unsigned int note_type,
1552                                  struct task_struct *tsk,
1553                                  compat_long_t num,
1554                                  u32 *kdata)
1555 {
1556         u64 addr = 0;
1557         u32 ctrl = 0;
1558
1559         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1560
1561         if (num & 1) {
1562                 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1563                 *kdata = (u32)addr;
1564         } else {
1565                 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1566                 *kdata = ctrl;
1567         }
1568
1569         return err;
1570 }
1571
1572 static int compat_ptrace_hbp_set(unsigned int note_type,
1573                                  struct task_struct *tsk,
1574                                  compat_long_t num,
1575                                  u32 *kdata)
1576 {
1577         u64 addr;
1578         u32 ctrl;
1579
1580         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1581
1582         if (num & 1) {
1583                 addr = *kdata;
1584                 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1585         } else {
1586                 ctrl = *kdata;
1587                 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1588         }
1589
1590         return err;
1591 }
1592
1593 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1594                                     compat_ulong_t __user *data)
1595 {
1596         int ret;
1597         u32 kdata;
1598
1599         /* Watchpoint */
1600         if (num < 0) {
1601                 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1602         /* Resource info */
1603         } else if (num == 0) {
1604                 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1605         /* Breakpoint */
1606         } else {
1607                 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1608         }
1609
1610         if (!ret)
1611                 ret = put_user(kdata, data);
1612
1613         return ret;
1614 }
1615
1616 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1617                                     compat_ulong_t __user *data)
1618 {
1619         int ret;
1620         u32 kdata = 0;
1621
1622         if (num == 0)
1623                 return 0;
1624
1625         ret = get_user(kdata, data);
1626         if (ret)
1627                 return ret;
1628
1629         if (num < 0)
1630                 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1631         else
1632                 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1633
1634         return ret;
1635 }
1636 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
1637
1638 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1639                         compat_ulong_t caddr, compat_ulong_t cdata)
1640 {
1641         unsigned long addr = caddr;
1642         unsigned long data = cdata;
1643         void __user *datap = compat_ptr(data);
1644         int ret;
1645
1646         switch (request) {
1647                 case PTRACE_PEEKUSR:
1648                         ret = compat_ptrace_read_user(child, addr, datap);
1649                         break;
1650
1651                 case PTRACE_POKEUSR:
1652                         ret = compat_ptrace_write_user(child, addr, data);
1653                         break;
1654
1655                 case COMPAT_PTRACE_GETREGS:
1656                         ret = copy_regset_to_user(child,
1657                                                   &user_aarch32_view,
1658                                                   REGSET_COMPAT_GPR,
1659                                                   0, sizeof(compat_elf_gregset_t),
1660                                                   datap);
1661                         break;
1662
1663                 case COMPAT_PTRACE_SETREGS:
1664                         ret = copy_regset_from_user(child,
1665                                                     &user_aarch32_view,
1666                                                     REGSET_COMPAT_GPR,
1667                                                     0, sizeof(compat_elf_gregset_t),
1668                                                     datap);
1669                         break;
1670
1671                 case COMPAT_PTRACE_GET_THREAD_AREA:
1672                         ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1673                                        (compat_ulong_t __user *)datap);
1674                         break;
1675
1676                 case COMPAT_PTRACE_SET_SYSCALL:
1677                         task_pt_regs(child)->syscallno = data;
1678                         ret = 0;
1679                         break;
1680
1681                 case COMPAT_PTRACE_GETVFPREGS:
1682                         ret = copy_regset_to_user(child,
1683                                                   &user_aarch32_view,
1684                                                   REGSET_COMPAT_VFP,
1685                                                   0, VFP_STATE_SIZE,
1686                                                   datap);
1687                         break;
1688
1689                 case COMPAT_PTRACE_SETVFPREGS:
1690                         ret = copy_regset_from_user(child,
1691                                                     &user_aarch32_view,
1692                                                     REGSET_COMPAT_VFP,
1693                                                     0, VFP_STATE_SIZE,
1694                                                     datap);
1695                         break;
1696
1697 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1698                 case COMPAT_PTRACE_GETHBPREGS:
1699                         ret = compat_ptrace_gethbpregs(child, addr, datap);
1700                         break;
1701
1702                 case COMPAT_PTRACE_SETHBPREGS:
1703                         ret = compat_ptrace_sethbpregs(child, addr, datap);
1704                         break;
1705 #endif
1706
1707                 default:
1708                         ret = compat_ptrace_request(child, request, addr,
1709                                                     data);
1710                         break;
1711         }
1712
1713         return ret;
1714 }
1715 #endif /* CONFIG_COMPAT */
1716
1717 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1718 {
1719 #ifdef CONFIG_COMPAT
1720         /*
1721          * Core dumping of 32-bit tasks or compat ptrace requests must use the
1722          * user_aarch32_view compatible with arm32. Native ptrace requests on
1723          * 32-bit children use an extended user_aarch32_ptrace_view to allow
1724          * access to the TLS register.
1725          */
1726         if (is_compat_task())
1727                 return &user_aarch32_view;
1728         else if (is_compat_thread(task_thread_info(task)))
1729                 return &user_aarch32_ptrace_view;
1730 #endif
1731         return &user_aarch64_view;
1732 }
1733
1734 long arch_ptrace(struct task_struct *child, long request,
1735                  unsigned long addr, unsigned long data)
1736 {
1737         switch (request) {
1738         case PTRACE_PEEKMTETAGS:
1739         case PTRACE_POKEMTETAGS:
1740                 return mte_ptrace_copy_tags(child, request, addr, data);
1741         }
1742
1743         return ptrace_request(child, request, addr, data);
1744 }
1745
1746 enum ptrace_syscall_dir {
1747         PTRACE_SYSCALL_ENTER = 0,
1748         PTRACE_SYSCALL_EXIT,
1749 };
1750
1751 static void tracehook_report_syscall(struct pt_regs *regs,
1752                                      enum ptrace_syscall_dir dir)
1753 {
1754         int regno;
1755         unsigned long saved_reg;
1756
1757         /*
1758          * We have some ABI weirdness here in the way that we handle syscall
1759          * exit stops because we indicate whether or not the stop has been
1760          * signalled from syscall entry or syscall exit by clobbering a general
1761          * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
1762          * and restoring its old value after the stop. This means that:
1763          *
1764          * - Any writes by the tracer to this register during the stop are
1765          *   ignored/discarded.
1766          *
1767          * - The actual value of the register is not available during the stop,
1768          *   so the tracer cannot save it and restore it later.
1769          *
1770          * - Syscall stops behave differently to seccomp and pseudo-step traps
1771          *   (the latter do not nobble any registers).
1772          */
1773         regno = (is_compat_task() ? 12 : 7);
1774         saved_reg = regs->regs[regno];
1775         regs->regs[regno] = dir;
1776
1777         if (dir == PTRACE_SYSCALL_ENTER) {
1778                 if (tracehook_report_syscall_entry(regs))
1779                         forget_syscall(regs);
1780                 regs->regs[regno] = saved_reg;
1781         } else if (!test_thread_flag(TIF_SINGLESTEP)) {
1782                 tracehook_report_syscall_exit(regs, 0);
1783                 regs->regs[regno] = saved_reg;
1784         } else {
1785                 regs->regs[regno] = saved_reg;
1786
1787                 /*
1788                  * Signal a pseudo-step exception since we are stepping but
1789                  * tracer modifications to the registers may have rewound the
1790                  * state machine.
1791                  */
1792                 tracehook_report_syscall_exit(regs, 1);
1793         }
1794 }
1795
1796 int syscall_trace_enter(struct pt_regs *regs)
1797 {
1798         unsigned long flags = READ_ONCE(current_thread_info()->flags);
1799
1800         if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
1801                 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1802                 if (flags & _TIF_SYSCALL_EMU)
1803                         return NO_SYSCALL;
1804         }
1805
1806         /* Do the secure computing after ptrace; failures should be fast. */
1807         if (secure_computing() == -1)
1808                 return NO_SYSCALL;
1809
1810         if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1811                 trace_sys_enter(regs, regs->syscallno);
1812
1813         audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1814                             regs->regs[2], regs->regs[3]);
1815
1816         return regs->syscallno;
1817 }
1818
1819 void syscall_trace_exit(struct pt_regs *regs)
1820 {
1821         unsigned long flags = READ_ONCE(current_thread_info()->flags);
1822
1823         audit_syscall_exit(regs);
1824
1825         if (flags & _TIF_SYSCALL_TRACEPOINT)
1826                 trace_sys_exit(regs, syscall_get_return_value(current, regs));
1827
1828         if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
1829                 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1830
1831         rseq_syscall(regs);
1832 }
1833
1834 /*
1835  * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1836  * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1837  * not described in ARM DDI 0487D.a.
1838  * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1839  * be allocated an EL0 meaning in future.
1840  * Userspace cannot use these until they have an architectural meaning.
1841  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1842  * We also reserve IL for the kernel; SS is handled dynamically.
1843  */
1844 #define SPSR_EL1_AARCH64_RES0_BITS \
1845         (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
1846          GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
1847 #define SPSR_EL1_AARCH32_RES0_BITS \
1848         (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1849
1850 static int valid_compat_regs(struct user_pt_regs *regs)
1851 {
1852         regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1853
1854         if (!system_supports_mixed_endian_el0()) {
1855                 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1856                         regs->pstate |= PSR_AA32_E_BIT;
1857                 else
1858                         regs->pstate &= ~PSR_AA32_E_BIT;
1859         }
1860
1861         if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1862             (regs->pstate & PSR_AA32_A_BIT) == 0 &&
1863             (regs->pstate & PSR_AA32_I_BIT) == 0 &&
1864             (regs->pstate & PSR_AA32_F_BIT) == 0) {
1865                 return 1;
1866         }
1867
1868         /*
1869          * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1870          * arch/arm.
1871          */
1872         regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
1873                         PSR_AA32_C_BIT | PSR_AA32_V_BIT |
1874                         PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
1875                         PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
1876                         PSR_AA32_T_BIT;
1877         regs->pstate |= PSR_MODE32_BIT;
1878
1879         return 0;
1880 }
1881
1882 static int valid_native_regs(struct user_pt_regs *regs)
1883 {
1884         regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1885
1886         if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1887             (regs->pstate & PSR_D_BIT) == 0 &&
1888             (regs->pstate & PSR_A_BIT) == 0 &&
1889             (regs->pstate & PSR_I_BIT) == 0 &&
1890             (regs->pstate & PSR_F_BIT) == 0) {
1891                 return 1;
1892         }
1893
1894         /* Force PSR to a valid 64-bit EL0t */
1895         regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1896
1897         return 0;
1898 }
1899
1900 /*
1901  * Are the current registers suitable for user mode? (used to maintain
1902  * security in signal handlers)
1903  */
1904 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1905 {
1906         /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
1907         user_regs_reset_single_step(regs, task);
1908
1909         if (is_compat_thread(task_thread_info(task)))
1910                 return valid_compat_regs(regs);
1911         else
1912                 return valid_native_regs(regs);
1913 }