3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11 * and Paul Mackerras (paulus@samba.org).
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
37 #include <linux/uaccess.h>
38 #include <linux/pkeys.h>
40 #include <asm/pgtable.h>
41 #include <asm/switch_to.h>
43 #include <asm/asm-prototypes.h>
44 #include <asm/debug.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/syscalls.h>
50 * The parameter save area on the stack is used to store arguments being passed
51 * to callee function and is located at fixed offset from stack pointer.
54 #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
55 #else /* CONFIG_PPC32 */
56 #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
59 struct pt_regs_offset {
64 #define STR(s) #s /* convert to string */
65 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
66 #define GPR_OFFSET_NAME(num) \
67 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
68 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
69 #define REG_OFFSET_END {.name = NULL, .offset = 0}
71 #define TVSO(f) (offsetof(struct thread_vr_state, f))
72 #define TFSO(f) (offsetof(struct thread_fp_state, f))
73 #define TSO(f) (offsetof(struct thread_struct, f))
75 static const struct pt_regs_offset regoffset_table[] = {
108 REG_OFFSET_NAME(nip),
109 REG_OFFSET_NAME(msr),
110 REG_OFFSET_NAME(ctr),
111 REG_OFFSET_NAME(link),
112 REG_OFFSET_NAME(xer),
113 REG_OFFSET_NAME(ccr),
115 REG_OFFSET_NAME(softe),
119 REG_OFFSET_NAME(trap),
120 REG_OFFSET_NAME(dar),
121 REG_OFFSET_NAME(dsisr),
125 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
126 static void flush_tmregs_to_thread(struct task_struct *tsk)
129 * If task is not current, it will have been flushed already to
130 * it's thread_struct during __switch_to().
132 * A reclaim flushes ALL the state or if not in TM save TM SPRs
133 * in the appropriate thread structures from live.
136 if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
139 if (MSR_TM_SUSPENDED(mfmsr())) {
140 tm_reclaim_current(TM_CAUSE_SIGNAL);
143 tm_save_sprs(&(tsk->thread));
147 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
151 * regs_query_register_offset() - query register offset from its name
152 * @name: the name of a register
154 * regs_query_register_offset() returns the offset of a register in struct
155 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
157 int regs_query_register_offset(const char *name)
159 const struct pt_regs_offset *roff;
160 for (roff = regoffset_table; roff->name != NULL; roff++)
161 if (!strcmp(roff->name, name))
167 * regs_query_register_name() - query register name from its offset
168 * @offset: the offset of a register in struct pt_regs.
170 * regs_query_register_name() returns the name of a register from its
171 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
173 const char *regs_query_register_name(unsigned int offset)
175 const struct pt_regs_offset *roff;
176 for (roff = regoffset_table; roff->name != NULL; roff++)
177 if (roff->offset == offset)
183 * does not yet catch signals sent when the child dies.
184 * in exit.c or in signal.c.
188 * Set of msr bits that gdb can change on behalf of a process.
190 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
191 #define MSR_DEBUGCHANGE 0
193 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
197 * Max register writeable via put_reg
200 #define PT_MAX_PUT_REG PT_MQ
202 #define PT_MAX_PUT_REG PT_CCR
205 static unsigned long get_user_msr(struct task_struct *task)
207 return task->thread.regs->msr | task->thread.fpexc_mode;
210 static int set_user_msr(struct task_struct *task, unsigned long msr)
212 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
213 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
217 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
218 static unsigned long get_user_ckpt_msr(struct task_struct *task)
220 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
223 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
225 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
226 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
230 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
232 task->thread.ckpt_regs.trap = trap & 0xfff0;
238 static int get_user_dscr(struct task_struct *task, unsigned long *data)
240 *data = task->thread.dscr;
244 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
246 task->thread.dscr = dscr;
247 task->thread.dscr_inherit = 1;
251 static int get_user_dscr(struct task_struct *task, unsigned long *data)
256 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
263 * We prevent mucking around with the reserved area of trap
264 * which are used internally by the kernel.
266 static int set_user_trap(struct task_struct *task, unsigned long trap)
268 task->thread.regs->trap = trap & 0xfff0;
273 * Get contents of register REGNO in task TASK.
275 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
277 if ((task->thread.regs == NULL) || !data)
280 if (regno == PT_MSR) {
281 *data = get_user_msr(task);
285 if (regno == PT_DSCR)
286 return get_user_dscr(task, data);
290 * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
291 * no more used as a flag, lets force usr to alway see the softe value as 1
292 * which means interrupts are not soft disabled.
294 if (regno == PT_SOFTE) {
300 if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
301 *data = ((unsigned long *)task->thread.regs)[regno];
309 * Write contents of register REGNO in task TASK.
311 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
313 if (task->thread.regs == NULL)
317 return set_user_msr(task, data);
318 if (regno == PT_TRAP)
319 return set_user_trap(task, data);
320 if (regno == PT_DSCR)
321 return set_user_dscr(task, data);
323 if (regno <= PT_MAX_PUT_REG) {
324 ((unsigned long *)task->thread.regs)[regno] = data;
330 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
331 unsigned int pos, unsigned int count,
332 void *kbuf, void __user *ubuf)
336 if (target->thread.regs == NULL)
339 if (!FULL_REGS(target->thread.regs)) {
340 /* We have a partial register set. Fill 14-31 with bogus values */
341 for (i = 14; i < 32; i++)
342 target->thread.regs->gpr[i] = NV_REG_POISON;
345 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
347 0, offsetof(struct pt_regs, msr));
349 unsigned long msr = get_user_msr(target);
350 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
351 offsetof(struct pt_regs, msr),
352 offsetof(struct pt_regs, msr) +
356 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
357 offsetof(struct pt_regs, msr) + sizeof(long));
360 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
361 &target->thread.regs->orig_gpr3,
362 offsetof(struct pt_regs, orig_gpr3),
363 sizeof(struct pt_regs));
365 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
366 sizeof(struct pt_regs), -1);
371 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
372 unsigned int pos, unsigned int count,
373 const void *kbuf, const void __user *ubuf)
378 if (target->thread.regs == NULL)
381 CHECK_FULL_REGS(target->thread.regs);
383 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
385 0, PT_MSR * sizeof(reg));
387 if (!ret && count > 0) {
388 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
389 PT_MSR * sizeof(reg),
390 (PT_MSR + 1) * sizeof(reg));
392 ret = set_user_msr(target, reg);
395 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
396 offsetof(struct pt_regs, msr) + sizeof(long));
399 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
400 &target->thread.regs->orig_gpr3,
401 PT_ORIG_R3 * sizeof(reg),
402 (PT_MAX_PUT_REG + 1) * sizeof(reg));
404 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
405 ret = user_regset_copyin_ignore(
406 &pos, &count, &kbuf, &ubuf,
407 (PT_MAX_PUT_REG + 1) * sizeof(reg),
408 PT_TRAP * sizeof(reg));
410 if (!ret && count > 0) {
411 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
412 PT_TRAP * sizeof(reg),
413 (PT_TRAP + 1) * sizeof(reg));
415 ret = set_user_trap(target, reg);
419 ret = user_regset_copyin_ignore(
420 &pos, &count, &kbuf, &ubuf,
421 (PT_TRAP + 1) * sizeof(reg), -1);
427 * Regardless of transactions, 'fp_state' holds the current running
428 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
429 * value of all FPR registers for the current transaction.
431 * Userspace interface buffer layout:
438 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
439 unsigned int pos, unsigned int count,
440 void *kbuf, void __user *ubuf)
446 flush_fp_to_thread(target);
448 /* copy to local buffer then write that out */
449 for (i = 0; i < 32 ; i++)
450 buf[i] = target->thread.TS_FPR(i);
451 buf[32] = target->thread.fp_state.fpscr;
452 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
454 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
455 offsetof(struct thread_fp_state, fpr[32]));
457 flush_fp_to_thread(target);
459 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
460 &target->thread.fp_state, 0, -1);
465 * Regardless of transactions, 'fp_state' holds the current running
466 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
467 * value of all FPR registers for the current transaction.
469 * Userspace interface buffer layout:
477 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
478 unsigned int pos, unsigned int count,
479 const void *kbuf, const void __user *ubuf)
485 flush_fp_to_thread(target);
487 for (i = 0; i < 32 ; i++)
488 buf[i] = target->thread.TS_FPR(i);
489 buf[32] = target->thread.fp_state.fpscr;
491 /* copy to local buffer then write that out */
492 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
496 for (i = 0; i < 32 ; i++)
497 target->thread.TS_FPR(i) = buf[i];
498 target->thread.fp_state.fpscr = buf[32];
501 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
502 offsetof(struct thread_fp_state, fpr[32]));
504 flush_fp_to_thread(target);
506 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
507 &target->thread.fp_state, 0, -1);
511 #ifdef CONFIG_ALTIVEC
513 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
514 * The transfer totals 34 quadword. Quadwords 0-31 contain the
515 * corresponding vector registers. Quadword 32 contains the vscr as the
516 * last word (offset 12) within that quadword. Quadword 33 contains the
517 * vrsave as the first word (offset 0) within the quadword.
519 * This definition of the VMX state is compatible with the current PPC32
520 * ptrace interface. This allows signal handling and ptrace to use the
521 * same structures. This also simplifies the implementation of a bi-arch
522 * (combined (32- and 64-bit) gdb.
525 static int vr_active(struct task_struct *target,
526 const struct user_regset *regset)
528 flush_altivec_to_thread(target);
529 return target->thread.used_vr ? regset->n : 0;
533 * Regardless of transactions, 'vr_state' holds the current running
534 * value of all the VMX registers and 'ckvr_state' holds the last
535 * checkpointed value of all the VMX registers for the current
536 * transaction to fall back on in case it aborts.
538 * Userspace interface buffer layout:
546 static int vr_get(struct task_struct *target, const struct user_regset *regset,
547 unsigned int pos, unsigned int count,
548 void *kbuf, void __user *ubuf)
552 flush_altivec_to_thread(target);
554 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
555 offsetof(struct thread_vr_state, vr[32]));
557 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
558 &target->thread.vr_state, 0,
559 33 * sizeof(vector128));
562 * Copy out only the low-order word of vrsave.
569 memset(&vrsave, 0, sizeof(vrsave));
571 vrsave.word = target->thread.vrsave;
573 start = 33 * sizeof(vector128);
574 end = start + sizeof(vrsave);
575 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
583 * Regardless of transactions, 'vr_state' holds the current running
584 * value of all the VMX registers and 'ckvr_state' holds the last
585 * checkpointed value of all the VMX registers for the current
586 * transaction to fall back on in case it aborts.
588 * Userspace interface buffer layout:
596 static int vr_set(struct task_struct *target, const struct user_regset *regset,
597 unsigned int pos, unsigned int count,
598 const void *kbuf, const void __user *ubuf)
602 flush_altivec_to_thread(target);
604 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
605 offsetof(struct thread_vr_state, vr[32]));
607 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
608 &target->thread.vr_state, 0,
609 33 * sizeof(vector128));
610 if (!ret && count > 0) {
612 * We use only the first word of vrsave.
619 memset(&vrsave, 0, sizeof(vrsave));
621 vrsave.word = target->thread.vrsave;
623 start = 33 * sizeof(vector128);
624 end = start + sizeof(vrsave);
625 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
628 target->thread.vrsave = vrsave.word;
633 #endif /* CONFIG_ALTIVEC */
637 * Currently to set and and get all the vsx state, you need to call
638 * the fp and VMX calls as well. This only get/sets the lower 32
639 * 128bit VSX registers.
642 static int vsr_active(struct task_struct *target,
643 const struct user_regset *regset)
645 flush_vsx_to_thread(target);
646 return target->thread.used_vsr ? regset->n : 0;
650 * Regardless of transactions, 'fp_state' holds the current running
651 * value of all FPR registers and 'ckfp_state' holds the last
652 * checkpointed value of all FPR registers for the current
655 * Userspace interface buffer layout:
661 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
662 unsigned int pos, unsigned int count,
663 void *kbuf, void __user *ubuf)
668 flush_tmregs_to_thread(target);
669 flush_fp_to_thread(target);
670 flush_altivec_to_thread(target);
671 flush_vsx_to_thread(target);
673 for (i = 0; i < 32 ; i++)
674 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
676 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
677 buf, 0, 32 * sizeof(double));
683 * Regardless of transactions, 'fp_state' holds the current running
684 * value of all FPR registers and 'ckfp_state' holds the last
685 * checkpointed value of all FPR registers for the current
688 * Userspace interface buffer layout:
694 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
695 unsigned int pos, unsigned int count,
696 const void *kbuf, const void __user *ubuf)
701 flush_tmregs_to_thread(target);
702 flush_fp_to_thread(target);
703 flush_altivec_to_thread(target);
704 flush_vsx_to_thread(target);
706 for (i = 0; i < 32 ; i++)
707 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
709 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
710 buf, 0, 32 * sizeof(double));
712 for (i = 0; i < 32 ; i++)
713 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
717 #endif /* CONFIG_VSX */
722 * For get_evrregs/set_evrregs functions 'data' has the following layout:
731 static int evr_active(struct task_struct *target,
732 const struct user_regset *regset)
734 flush_spe_to_thread(target);
735 return target->thread.used_spe ? regset->n : 0;
738 static int evr_get(struct task_struct *target, const struct user_regset *regset,
739 unsigned int pos, unsigned int count,
740 void *kbuf, void __user *ubuf)
744 flush_spe_to_thread(target);
746 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
748 0, sizeof(target->thread.evr));
750 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
751 offsetof(struct thread_struct, spefscr));
754 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
756 sizeof(target->thread.evr), -1);
761 static int evr_set(struct task_struct *target, const struct user_regset *regset,
762 unsigned int pos, unsigned int count,
763 const void *kbuf, const void __user *ubuf)
767 flush_spe_to_thread(target);
769 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
771 0, sizeof(target->thread.evr));
773 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
774 offsetof(struct thread_struct, spefscr));
777 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
779 sizeof(target->thread.evr), -1);
783 #endif /* CONFIG_SPE */
785 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
787 * tm_cgpr_active - get active number of registers in CGPR
788 * @target: The target task.
789 * @regset: The user regset structure.
791 * This function checks for the active number of available
792 * regisers in transaction checkpointed GPR category.
794 static int tm_cgpr_active(struct task_struct *target,
795 const struct user_regset *regset)
797 if (!cpu_has_feature(CPU_FTR_TM))
800 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
807 * tm_cgpr_get - get CGPR registers
808 * @target: The target task.
809 * @regset: The user regset structure.
810 * @pos: The buffer position.
811 * @count: Number of bytes to copy.
812 * @kbuf: Kernel buffer to copy from.
813 * @ubuf: User buffer to copy into.
815 * This function gets transaction checkpointed GPR registers.
817 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
818 * GPR register values for the current transaction to fall back on if it
819 * aborts in between. This function gets those checkpointed GPR registers.
820 * The userspace interface buffer layout is as follows.
823 * struct pt_regs ckpt_regs;
826 static int tm_cgpr_get(struct task_struct *target,
827 const struct user_regset *regset,
828 unsigned int pos, unsigned int count,
829 void *kbuf, void __user *ubuf)
833 if (!cpu_has_feature(CPU_FTR_TM))
836 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
839 flush_tmregs_to_thread(target);
840 flush_fp_to_thread(target);
841 flush_altivec_to_thread(target);
843 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
844 &target->thread.ckpt_regs,
845 0, offsetof(struct pt_regs, msr));
847 unsigned long msr = get_user_ckpt_msr(target);
849 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
850 offsetof(struct pt_regs, msr),
851 offsetof(struct pt_regs, msr) +
855 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
856 offsetof(struct pt_regs, msr) + sizeof(long));
859 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
860 &target->thread.ckpt_regs.orig_gpr3,
861 offsetof(struct pt_regs, orig_gpr3),
862 sizeof(struct pt_regs));
864 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
865 sizeof(struct pt_regs), -1);
871 * tm_cgpr_set - set the CGPR registers
872 * @target: The target task.
873 * @regset: The user regset structure.
874 * @pos: The buffer position.
875 * @count: Number of bytes to copy.
876 * @kbuf: Kernel buffer to copy into.
877 * @ubuf: User buffer to copy from.
879 * This function sets in transaction checkpointed GPR registers.
881 * When the transaction is active, 'ckpt_regs' holds the checkpointed
882 * GPR register values for the current transaction to fall back on if it
883 * aborts in between. This function sets those checkpointed GPR registers.
884 * The userspace interface buffer layout is as follows.
887 * struct pt_regs ckpt_regs;
890 static int tm_cgpr_set(struct task_struct *target,
891 const struct user_regset *regset,
892 unsigned int pos, unsigned int count,
893 const void *kbuf, const void __user *ubuf)
898 if (!cpu_has_feature(CPU_FTR_TM))
901 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
904 flush_tmregs_to_thread(target);
905 flush_fp_to_thread(target);
906 flush_altivec_to_thread(target);
908 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
909 &target->thread.ckpt_regs,
910 0, PT_MSR * sizeof(reg));
912 if (!ret && count > 0) {
913 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
914 PT_MSR * sizeof(reg),
915 (PT_MSR + 1) * sizeof(reg));
917 ret = set_user_ckpt_msr(target, reg);
920 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
921 offsetof(struct pt_regs, msr) + sizeof(long));
924 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
925 &target->thread.ckpt_regs.orig_gpr3,
926 PT_ORIG_R3 * sizeof(reg),
927 (PT_MAX_PUT_REG + 1) * sizeof(reg));
929 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
930 ret = user_regset_copyin_ignore(
931 &pos, &count, &kbuf, &ubuf,
932 (PT_MAX_PUT_REG + 1) * sizeof(reg),
933 PT_TRAP * sizeof(reg));
935 if (!ret && count > 0) {
936 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
937 PT_TRAP * sizeof(reg),
938 (PT_TRAP + 1) * sizeof(reg));
940 ret = set_user_ckpt_trap(target, reg);
944 ret = user_regset_copyin_ignore(
945 &pos, &count, &kbuf, &ubuf,
946 (PT_TRAP + 1) * sizeof(reg), -1);
952 * tm_cfpr_active - get active number of registers in CFPR
953 * @target: The target task.
954 * @regset: The user regset structure.
956 * This function checks for the active number of available
957 * regisers in transaction checkpointed FPR category.
959 static int tm_cfpr_active(struct task_struct *target,
960 const struct user_regset *regset)
962 if (!cpu_has_feature(CPU_FTR_TM))
965 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
972 * tm_cfpr_get - get CFPR registers
973 * @target: The target task.
974 * @regset: The user regset structure.
975 * @pos: The buffer position.
976 * @count: Number of bytes to copy.
977 * @kbuf: Kernel buffer to copy from.
978 * @ubuf: User buffer to copy into.
980 * This function gets in transaction checkpointed FPR registers.
982 * When the transaction is active 'ckfp_state' holds the checkpointed
983 * values for the current transaction to fall back on if it aborts
984 * in between. This function gets those checkpointed FPR registers.
985 * The userspace interface buffer layout is as follows.
992 static int tm_cfpr_get(struct task_struct *target,
993 const struct user_regset *regset,
994 unsigned int pos, unsigned int count,
995 void *kbuf, void __user *ubuf)
1000 if (!cpu_has_feature(CPU_FTR_TM))
1003 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1006 flush_tmregs_to_thread(target);
1007 flush_fp_to_thread(target);
1008 flush_altivec_to_thread(target);
1010 /* copy to local buffer then write that out */
1011 for (i = 0; i < 32 ; i++)
1012 buf[i] = target->thread.TS_CKFPR(i);
1013 buf[32] = target->thread.ckfp_state.fpscr;
1014 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1018 * tm_cfpr_set - set CFPR registers
1019 * @target: The target task.
1020 * @regset: The user regset structure.
1021 * @pos: The buffer position.
1022 * @count: Number of bytes to copy.
1023 * @kbuf: Kernel buffer to copy into.
1024 * @ubuf: User buffer to copy from.
1026 * This function sets in transaction checkpointed FPR registers.
1028 * When the transaction is active 'ckfp_state' holds the checkpointed
1029 * FPR register values for the current transaction to fall back on
1030 * if it aborts in between. This function sets these checkpointed
1031 * FPR registers. The userspace interface buffer layout is as follows.
1038 static int tm_cfpr_set(struct task_struct *target,
1039 const struct user_regset *regset,
1040 unsigned int pos, unsigned int count,
1041 const void *kbuf, const void __user *ubuf)
1046 if (!cpu_has_feature(CPU_FTR_TM))
1049 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1052 flush_tmregs_to_thread(target);
1053 flush_fp_to_thread(target);
1054 flush_altivec_to_thread(target);
1056 for (i = 0; i < 32; i++)
1057 buf[i] = target->thread.TS_CKFPR(i);
1058 buf[32] = target->thread.ckfp_state.fpscr;
1060 /* copy to local buffer then write that out */
1061 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1064 for (i = 0; i < 32 ; i++)
1065 target->thread.TS_CKFPR(i) = buf[i];
1066 target->thread.ckfp_state.fpscr = buf[32];
1071 * tm_cvmx_active - get active number of registers in CVMX
1072 * @target: The target task.
1073 * @regset: The user regset structure.
1075 * This function checks for the active number of available
1076 * regisers in checkpointed VMX category.
1078 static int tm_cvmx_active(struct task_struct *target,
1079 const struct user_regset *regset)
1081 if (!cpu_has_feature(CPU_FTR_TM))
1084 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1091 * tm_cvmx_get - get CMVX registers
1092 * @target: The target task.
1093 * @regset: The user regset structure.
1094 * @pos: The buffer position.
1095 * @count: Number of bytes to copy.
1096 * @kbuf: Kernel buffer to copy from.
1097 * @ubuf: User buffer to copy into.
1099 * This function gets in transaction checkpointed VMX registers.
1101 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1102 * the checkpointed values for the current transaction to fall
1103 * back on if it aborts in between. The userspace interface buffer
1104 * layout is as follows.
1112 static int tm_cvmx_get(struct task_struct *target,
1113 const struct user_regset *regset,
1114 unsigned int pos, unsigned int count,
1115 void *kbuf, void __user *ubuf)
1119 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1121 if (!cpu_has_feature(CPU_FTR_TM))
1124 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1127 /* Flush the state */
1128 flush_tmregs_to_thread(target);
1129 flush_fp_to_thread(target);
1130 flush_altivec_to_thread(target);
1132 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1133 &target->thread.ckvr_state, 0,
1134 33 * sizeof(vector128));
1137 * Copy out only the low-order word of vrsave.
1143 memset(&vrsave, 0, sizeof(vrsave));
1144 vrsave.word = target->thread.ckvrsave;
1145 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1146 33 * sizeof(vector128), -1);
1153 * tm_cvmx_set - set CMVX registers
1154 * @target: The target task.
1155 * @regset: The user regset structure.
1156 * @pos: The buffer position.
1157 * @count: Number of bytes to copy.
1158 * @kbuf: Kernel buffer to copy into.
1159 * @ubuf: User buffer to copy from.
1161 * This function sets in transaction checkpointed VMX registers.
1163 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1164 * the checkpointed values for the current transaction to fall
1165 * back on if it aborts in between. The userspace interface buffer
1166 * layout is as follows.
1174 static int tm_cvmx_set(struct task_struct *target,
1175 const struct user_regset *regset,
1176 unsigned int pos, unsigned int count,
1177 const void *kbuf, const void __user *ubuf)
1181 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1183 if (!cpu_has_feature(CPU_FTR_TM))
1186 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1189 flush_tmregs_to_thread(target);
1190 flush_fp_to_thread(target);
1191 flush_altivec_to_thread(target);
1193 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1194 &target->thread.ckvr_state, 0,
1195 33 * sizeof(vector128));
1196 if (!ret && count > 0) {
1198 * We use only the low-order word of vrsave.
1204 memset(&vrsave, 0, sizeof(vrsave));
1205 vrsave.word = target->thread.ckvrsave;
1206 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1207 33 * sizeof(vector128), -1);
1209 target->thread.ckvrsave = vrsave.word;
1216 * tm_cvsx_active - get active number of registers in CVSX
1217 * @target: The target task.
1218 * @regset: The user regset structure.
1220 * This function checks for the active number of available
1221 * regisers in transaction checkpointed VSX category.
1223 static int tm_cvsx_active(struct task_struct *target,
1224 const struct user_regset *regset)
1226 if (!cpu_has_feature(CPU_FTR_TM))
1229 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1232 flush_vsx_to_thread(target);
1233 return target->thread.used_vsr ? regset->n : 0;
1237 * tm_cvsx_get - get CVSX registers
1238 * @target: The target task.
1239 * @regset: The user regset structure.
1240 * @pos: The buffer position.
1241 * @count: Number of bytes to copy.
1242 * @kbuf: Kernel buffer to copy from.
1243 * @ubuf: User buffer to copy into.
1245 * This function gets in transaction checkpointed VSX registers.
1247 * When the transaction is active 'ckfp_state' holds the checkpointed
1248 * values for the current transaction to fall back on if it aborts
1249 * in between. This function gets those checkpointed VSX registers.
1250 * The userspace interface buffer layout is as follows.
1256 static int tm_cvsx_get(struct task_struct *target,
1257 const struct user_regset *regset,
1258 unsigned int pos, unsigned int count,
1259 void *kbuf, void __user *ubuf)
1264 if (!cpu_has_feature(CPU_FTR_TM))
1267 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1270 /* Flush the state */
1271 flush_tmregs_to_thread(target);
1272 flush_fp_to_thread(target);
1273 flush_altivec_to_thread(target);
1274 flush_vsx_to_thread(target);
1276 for (i = 0; i < 32 ; i++)
1277 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1278 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1279 buf, 0, 32 * sizeof(double));
1285 * tm_cvsx_set - set CFPR registers
1286 * @target: The target task.
1287 * @regset: The user regset structure.
1288 * @pos: The buffer position.
1289 * @count: Number of bytes to copy.
1290 * @kbuf: Kernel buffer to copy into.
1291 * @ubuf: User buffer to copy from.
1293 * This function sets in transaction checkpointed VSX registers.
1295 * When the transaction is active 'ckfp_state' holds the checkpointed
1296 * VSX register values for the current transaction to fall back on
1297 * if it aborts in between. This function sets these checkpointed
1298 * FPR registers. The userspace interface buffer layout is as follows.
1304 static int tm_cvsx_set(struct task_struct *target,
1305 const struct user_regset *regset,
1306 unsigned int pos, unsigned int count,
1307 const void *kbuf, const void __user *ubuf)
1312 if (!cpu_has_feature(CPU_FTR_TM))
1315 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1318 /* Flush the state */
1319 flush_tmregs_to_thread(target);
1320 flush_fp_to_thread(target);
1321 flush_altivec_to_thread(target);
1322 flush_vsx_to_thread(target);
1324 for (i = 0; i < 32 ; i++)
1325 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1327 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1328 buf, 0, 32 * sizeof(double));
1330 for (i = 0; i < 32 ; i++)
1331 target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1337 * tm_spr_active - get active number of registers in TM SPR
1338 * @target: The target task.
1339 * @regset: The user regset structure.
1341 * This function checks the active number of available
1342 * regisers in the transactional memory SPR category.
1344 static int tm_spr_active(struct task_struct *target,
1345 const struct user_regset *regset)
1347 if (!cpu_has_feature(CPU_FTR_TM))
1354 * tm_spr_get - get the TM related SPR registers
1355 * @target: The target task.
1356 * @regset: The user regset structure.
1357 * @pos: The buffer position.
1358 * @count: Number of bytes to copy.
1359 * @kbuf: Kernel buffer to copy from.
1360 * @ubuf: User buffer to copy into.
1362 * This function gets transactional memory related SPR registers.
1363 * The userspace interface buffer layout is as follows.
1371 static int tm_spr_get(struct task_struct *target,
1372 const struct user_regset *regset,
1373 unsigned int pos, unsigned int count,
1374 void *kbuf, void __user *ubuf)
1379 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1380 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1381 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1383 if (!cpu_has_feature(CPU_FTR_TM))
1386 /* Flush the states */
1387 flush_tmregs_to_thread(target);
1388 flush_fp_to_thread(target);
1389 flush_altivec_to_thread(target);
1391 /* TFHAR register */
1392 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1393 &target->thread.tm_tfhar, 0, sizeof(u64));
1395 /* TEXASR register */
1397 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1398 &target->thread.tm_texasr, sizeof(u64),
1401 /* TFIAR register */
1403 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1404 &target->thread.tm_tfiar,
1405 2 * sizeof(u64), 3 * sizeof(u64));
1410 * tm_spr_set - set the TM related SPR registers
1411 * @target: The target task.
1412 * @regset: The user regset structure.
1413 * @pos: The buffer position.
1414 * @count: Number of bytes to copy.
1415 * @kbuf: Kernel buffer to copy into.
1416 * @ubuf: User buffer to copy from.
1418 * This function sets transactional memory related SPR registers.
1419 * The userspace interface buffer layout is as follows.
1427 static int tm_spr_set(struct task_struct *target,
1428 const struct user_regset *regset,
1429 unsigned int pos, unsigned int count,
1430 const void *kbuf, const void __user *ubuf)
1435 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1436 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1437 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1439 if (!cpu_has_feature(CPU_FTR_TM))
1442 /* Flush the states */
1443 flush_tmregs_to_thread(target);
1444 flush_fp_to_thread(target);
1445 flush_altivec_to_thread(target);
1447 /* TFHAR register */
1448 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1449 &target->thread.tm_tfhar, 0, sizeof(u64));
1451 /* TEXASR register */
1453 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1454 &target->thread.tm_texasr, sizeof(u64),
1457 /* TFIAR register */
1459 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1460 &target->thread.tm_tfiar,
1461 2 * sizeof(u64), 3 * sizeof(u64));
1465 static int tm_tar_active(struct task_struct *target,
1466 const struct user_regset *regset)
1468 if (!cpu_has_feature(CPU_FTR_TM))
1471 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1477 static int tm_tar_get(struct task_struct *target,
1478 const struct user_regset *regset,
1479 unsigned int pos, unsigned int count,
1480 void *kbuf, void __user *ubuf)
1484 if (!cpu_has_feature(CPU_FTR_TM))
1487 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1490 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1491 &target->thread.tm_tar, 0, sizeof(u64));
1495 static int tm_tar_set(struct task_struct *target,
1496 const struct user_regset *regset,
1497 unsigned int pos, unsigned int count,
1498 const void *kbuf, const void __user *ubuf)
1502 if (!cpu_has_feature(CPU_FTR_TM))
1505 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1508 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1509 &target->thread.tm_tar, 0, sizeof(u64));
1513 static int tm_ppr_active(struct task_struct *target,
1514 const struct user_regset *regset)
1516 if (!cpu_has_feature(CPU_FTR_TM))
1519 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1526 static int tm_ppr_get(struct task_struct *target,
1527 const struct user_regset *regset,
1528 unsigned int pos, unsigned int count,
1529 void *kbuf, void __user *ubuf)
1533 if (!cpu_has_feature(CPU_FTR_TM))
1536 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1539 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1540 &target->thread.tm_ppr, 0, sizeof(u64));
1544 static int tm_ppr_set(struct task_struct *target,
1545 const struct user_regset *regset,
1546 unsigned int pos, unsigned int count,
1547 const void *kbuf, const void __user *ubuf)
1551 if (!cpu_has_feature(CPU_FTR_TM))
1554 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1557 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1558 &target->thread.tm_ppr, 0, sizeof(u64));
1562 static int tm_dscr_active(struct task_struct *target,
1563 const struct user_regset *regset)
1565 if (!cpu_has_feature(CPU_FTR_TM))
1568 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1574 static int tm_dscr_get(struct task_struct *target,
1575 const struct user_regset *regset,
1576 unsigned int pos, unsigned int count,
1577 void *kbuf, void __user *ubuf)
1581 if (!cpu_has_feature(CPU_FTR_TM))
1584 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1587 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1588 &target->thread.tm_dscr, 0, sizeof(u64));
1592 static int tm_dscr_set(struct task_struct *target,
1593 const struct user_regset *regset,
1594 unsigned int pos, unsigned int count,
1595 const void *kbuf, const void __user *ubuf)
1599 if (!cpu_has_feature(CPU_FTR_TM))
1602 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1606 &target->thread.tm_dscr, 0, sizeof(u64));
1609 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1612 static int ppr_get(struct task_struct *target,
1613 const struct user_regset *regset,
1614 unsigned int pos, unsigned int count,
1615 void *kbuf, void __user *ubuf)
1617 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1618 &target->thread.ppr, 0, sizeof(u64));
1621 static int ppr_set(struct task_struct *target,
1622 const struct user_regset *regset,
1623 unsigned int pos, unsigned int count,
1624 const void *kbuf, const void __user *ubuf)
1626 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1627 &target->thread.ppr, 0, sizeof(u64));
1630 static int dscr_get(struct task_struct *target,
1631 const struct user_regset *regset,
1632 unsigned int pos, unsigned int count,
1633 void *kbuf, void __user *ubuf)
1635 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1636 &target->thread.dscr, 0, sizeof(u64));
1638 static int dscr_set(struct task_struct *target,
1639 const struct user_regset *regset,
1640 unsigned int pos, unsigned int count,
1641 const void *kbuf, const void __user *ubuf)
1643 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1644 &target->thread.dscr, 0, sizeof(u64));
1647 #ifdef CONFIG_PPC_BOOK3S_64
1648 static int tar_get(struct task_struct *target,
1649 const struct user_regset *regset,
1650 unsigned int pos, unsigned int count,
1651 void *kbuf, void __user *ubuf)
1653 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1654 &target->thread.tar, 0, sizeof(u64));
1656 static int tar_set(struct task_struct *target,
1657 const struct user_regset *regset,
1658 unsigned int pos, unsigned int count,
1659 const void *kbuf, const void __user *ubuf)
1661 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1662 &target->thread.tar, 0, sizeof(u64));
1665 static int ebb_active(struct task_struct *target,
1666 const struct user_regset *regset)
1668 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1671 if (target->thread.used_ebb)
1677 static int ebb_get(struct task_struct *target,
1678 const struct user_regset *regset,
1679 unsigned int pos, unsigned int count,
1680 void *kbuf, void __user *ubuf)
1683 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1684 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1686 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1689 if (!target->thread.used_ebb)
1692 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1693 &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1696 static int ebb_set(struct task_struct *target,
1697 const struct user_regset *regset,
1698 unsigned int pos, unsigned int count,
1699 const void *kbuf, const void __user *ubuf)
1704 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1705 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1707 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1710 if (target->thread.used_ebb)
1713 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1714 &target->thread.ebbrr, 0, sizeof(unsigned long));
1717 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1718 &target->thread.ebbhr, sizeof(unsigned long),
1719 2 * sizeof(unsigned long));
1722 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1723 &target->thread.bescr,
1724 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1728 static int pmu_active(struct task_struct *target,
1729 const struct user_regset *regset)
1731 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1737 static int pmu_get(struct task_struct *target,
1738 const struct user_regset *regset,
1739 unsigned int pos, unsigned int count,
1740 void *kbuf, void __user *ubuf)
1743 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1744 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1745 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1746 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1748 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1751 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1752 &target->thread.siar, 0,
1753 5 * sizeof(unsigned long));
1756 static int pmu_set(struct task_struct *target,
1757 const struct user_regset *regset,
1758 unsigned int pos, unsigned int count,
1759 const void *kbuf, const void __user *ubuf)
1764 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1765 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1766 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1767 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1769 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1772 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1773 &target->thread.siar, 0,
1774 sizeof(unsigned long));
1777 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1778 &target->thread.sdar, sizeof(unsigned long),
1779 2 * sizeof(unsigned long));
1782 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1783 &target->thread.sier, 2 * sizeof(unsigned long),
1784 3 * sizeof(unsigned long));
1787 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1788 &target->thread.mmcr2, 3 * sizeof(unsigned long),
1789 4 * sizeof(unsigned long));
1792 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1793 &target->thread.mmcr0, 4 * sizeof(unsigned long),
1794 5 * sizeof(unsigned long));
1799 #ifdef CONFIG_PPC_MEM_KEYS
1800 static int pkey_active(struct task_struct *target,
1801 const struct user_regset *regset)
1803 if (!arch_pkeys_enabled())
1809 static int pkey_get(struct task_struct *target,
1810 const struct user_regset *regset,
1811 unsigned int pos, unsigned int count,
1812 void *kbuf, void __user *ubuf)
1814 BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1815 BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1817 if (!arch_pkeys_enabled())
1820 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1821 &target->thread.amr, 0,
1822 ELF_NPKEY * sizeof(unsigned long));
1825 static int pkey_set(struct task_struct *target,
1826 const struct user_regset *regset,
1827 unsigned int pos, unsigned int count,
1828 const void *kbuf, const void __user *ubuf)
1833 if (!arch_pkeys_enabled())
1836 /* Only the AMR can be set from userspace */
1837 if (pos != 0 || count != sizeof(new_amr))
1840 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1841 &new_amr, 0, sizeof(new_amr));
1845 /* UAMOR determines which bits of the AMR can be set from userspace. */
1846 target->thread.amr = (new_amr & target->thread.uamor) |
1847 (target->thread.amr & ~target->thread.uamor);
1851 #endif /* CONFIG_PPC_MEM_KEYS */
1854 * These are our native regset flavors.
1856 enum powerpc_regset {
1859 #ifdef CONFIG_ALTIVEC
1868 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1869 REGSET_TM_CGPR, /* TM checkpointed GPR registers */
1870 REGSET_TM_CFPR, /* TM checkpointed FPR registers */
1871 REGSET_TM_CVMX, /* TM checkpointed VMX registers */
1872 REGSET_TM_CVSX, /* TM checkpointed VSX registers */
1873 REGSET_TM_SPR, /* TM specific SPR registers */
1874 REGSET_TM_CTAR, /* TM checkpointed TAR register */
1875 REGSET_TM_CPPR, /* TM checkpointed PPR register */
1876 REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
1879 REGSET_PPR, /* PPR register */
1880 REGSET_DSCR, /* DSCR register */
1882 #ifdef CONFIG_PPC_BOOK3S_64
1883 REGSET_TAR, /* TAR register */
1884 REGSET_EBB, /* EBB registers */
1885 REGSET_PMR, /* Performance Monitor Registers */
1887 #ifdef CONFIG_PPC_MEM_KEYS
1888 REGSET_PKEY, /* AMR register */
1892 static const struct user_regset native_regsets[] = {
1894 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1895 .size = sizeof(long), .align = sizeof(long),
1896 .get = gpr_get, .set = gpr_set
1899 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1900 .size = sizeof(double), .align = sizeof(double),
1901 .get = fpr_get, .set = fpr_set
1903 #ifdef CONFIG_ALTIVEC
1905 .core_note_type = NT_PPC_VMX, .n = 34,
1906 .size = sizeof(vector128), .align = sizeof(vector128),
1907 .active = vr_active, .get = vr_get, .set = vr_set
1912 .core_note_type = NT_PPC_VSX, .n = 32,
1913 .size = sizeof(double), .align = sizeof(double),
1914 .active = vsr_active, .get = vsr_get, .set = vsr_set
1919 .core_note_type = NT_PPC_SPE, .n = 35,
1920 .size = sizeof(u32), .align = sizeof(u32),
1921 .active = evr_active, .get = evr_get, .set = evr_set
1924 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1925 [REGSET_TM_CGPR] = {
1926 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1927 .size = sizeof(long), .align = sizeof(long),
1928 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1930 [REGSET_TM_CFPR] = {
1931 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1932 .size = sizeof(double), .align = sizeof(double),
1933 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1935 [REGSET_TM_CVMX] = {
1936 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1937 .size = sizeof(vector128), .align = sizeof(vector128),
1938 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1940 [REGSET_TM_CVSX] = {
1941 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1942 .size = sizeof(double), .align = sizeof(double),
1943 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1946 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1947 .size = sizeof(u64), .align = sizeof(u64),
1948 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1950 [REGSET_TM_CTAR] = {
1951 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1952 .size = sizeof(u64), .align = sizeof(u64),
1953 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1955 [REGSET_TM_CPPR] = {
1956 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1957 .size = sizeof(u64), .align = sizeof(u64),
1958 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1960 [REGSET_TM_CDSCR] = {
1961 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1962 .size = sizeof(u64), .align = sizeof(u64),
1963 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1968 .core_note_type = NT_PPC_PPR, .n = 1,
1969 .size = sizeof(u64), .align = sizeof(u64),
1970 .get = ppr_get, .set = ppr_set
1973 .core_note_type = NT_PPC_DSCR, .n = 1,
1974 .size = sizeof(u64), .align = sizeof(u64),
1975 .get = dscr_get, .set = dscr_set
1978 #ifdef CONFIG_PPC_BOOK3S_64
1980 .core_note_type = NT_PPC_TAR, .n = 1,
1981 .size = sizeof(u64), .align = sizeof(u64),
1982 .get = tar_get, .set = tar_set
1985 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1986 .size = sizeof(u64), .align = sizeof(u64),
1987 .active = ebb_active, .get = ebb_get, .set = ebb_set
1990 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1991 .size = sizeof(u64), .align = sizeof(u64),
1992 .active = pmu_active, .get = pmu_get, .set = pmu_set
1995 #ifdef CONFIG_PPC_MEM_KEYS
1997 .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
1998 .size = sizeof(u64), .align = sizeof(u64),
1999 .active = pkey_active, .get = pkey_get, .set = pkey_set
2004 static const struct user_regset_view user_ppc_native_view = {
2005 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2006 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2010 #include <linux/compat.h>
2012 static int gpr32_get_common(struct task_struct *target,
2013 const struct user_regset *regset,
2014 unsigned int pos, unsigned int count,
2015 void *kbuf, void __user *ubuf,
2016 unsigned long *regs)
2018 compat_ulong_t *k = kbuf;
2019 compat_ulong_t __user *u = ubuf;
2023 count /= sizeof(reg);
2026 for (; count > 0 && pos < PT_MSR; --count)
2029 for (; count > 0 && pos < PT_MSR; --count)
2030 if (__put_user((compat_ulong_t) regs[pos++], u++))
2033 if (count > 0 && pos == PT_MSR) {
2034 reg = get_user_msr(target);
2037 else if (__put_user(reg, u++))
2044 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2047 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2048 if (__put_user((compat_ulong_t) regs[pos++], u++))
2054 count *= sizeof(reg);
2055 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2056 PT_REGS_COUNT * sizeof(reg), -1);
2059 static int gpr32_set_common(struct task_struct *target,
2060 const struct user_regset *regset,
2061 unsigned int pos, unsigned int count,
2062 const void *kbuf, const void __user *ubuf,
2063 unsigned long *regs)
2065 const compat_ulong_t *k = kbuf;
2066 const compat_ulong_t __user *u = ubuf;
2070 count /= sizeof(reg);
2073 for (; count > 0 && pos < PT_MSR; --count)
2076 for (; count > 0 && pos < PT_MSR; --count) {
2077 if (__get_user(reg, u++))
2083 if (count > 0 && pos == PT_MSR) {
2086 else if (__get_user(reg, u++))
2088 set_user_msr(target, reg);
2094 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2096 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2099 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2100 if (__get_user(reg, u++))
2104 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2105 if (__get_user(reg, u++))
2109 if (count > 0 && pos == PT_TRAP) {
2112 else if (__get_user(reg, u++))
2114 set_user_trap(target, reg);
2122 count *= sizeof(reg);
2123 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2124 (PT_TRAP + 1) * sizeof(reg), -1);
2127 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2128 static int tm_cgpr32_get(struct task_struct *target,
2129 const struct user_regset *regset,
2130 unsigned int pos, unsigned int count,
2131 void *kbuf, void __user *ubuf)
2133 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2134 &target->thread.ckpt_regs.gpr[0]);
2137 static int tm_cgpr32_set(struct task_struct *target,
2138 const struct user_regset *regset,
2139 unsigned int pos, unsigned int count,
2140 const void *kbuf, const void __user *ubuf)
2142 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2143 &target->thread.ckpt_regs.gpr[0]);
2145 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2147 static int gpr32_get(struct task_struct *target,
2148 const struct user_regset *regset,
2149 unsigned int pos, unsigned int count,
2150 void *kbuf, void __user *ubuf)
2154 if (target->thread.regs == NULL)
2157 if (!FULL_REGS(target->thread.regs)) {
2159 * We have a partial register set.
2160 * Fill 14-31 with bogus values.
2162 for (i = 14; i < 32; i++)
2163 target->thread.regs->gpr[i] = NV_REG_POISON;
2165 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2166 &target->thread.regs->gpr[0]);
2169 static int gpr32_set(struct task_struct *target,
2170 const struct user_regset *regset,
2171 unsigned int pos, unsigned int count,
2172 const void *kbuf, const void __user *ubuf)
2174 if (target->thread.regs == NULL)
2177 CHECK_FULL_REGS(target->thread.regs);
2178 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2179 &target->thread.regs->gpr[0]);
2183 * These are the regset flavors matching the CONFIG_PPC32 native set.
2185 static const struct user_regset compat_regsets[] = {
2187 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2188 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2189 .get = gpr32_get, .set = gpr32_set
2192 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2193 .size = sizeof(double), .align = sizeof(double),
2194 .get = fpr_get, .set = fpr_set
2196 #ifdef CONFIG_ALTIVEC
2198 .core_note_type = NT_PPC_VMX, .n = 34,
2199 .size = sizeof(vector128), .align = sizeof(vector128),
2200 .active = vr_active, .get = vr_get, .set = vr_set
2205 .core_note_type = NT_PPC_SPE, .n = 35,
2206 .size = sizeof(u32), .align = sizeof(u32),
2207 .active = evr_active, .get = evr_get, .set = evr_set
2210 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2211 [REGSET_TM_CGPR] = {
2212 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2213 .size = sizeof(long), .align = sizeof(long),
2214 .active = tm_cgpr_active,
2215 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2217 [REGSET_TM_CFPR] = {
2218 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2219 .size = sizeof(double), .align = sizeof(double),
2220 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2222 [REGSET_TM_CVMX] = {
2223 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2224 .size = sizeof(vector128), .align = sizeof(vector128),
2225 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2227 [REGSET_TM_CVSX] = {
2228 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2229 .size = sizeof(double), .align = sizeof(double),
2230 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2233 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2234 .size = sizeof(u64), .align = sizeof(u64),
2235 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2237 [REGSET_TM_CTAR] = {
2238 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2239 .size = sizeof(u64), .align = sizeof(u64),
2240 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2242 [REGSET_TM_CPPR] = {
2243 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2244 .size = sizeof(u64), .align = sizeof(u64),
2245 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2247 [REGSET_TM_CDSCR] = {
2248 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2249 .size = sizeof(u64), .align = sizeof(u64),
2250 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2255 .core_note_type = NT_PPC_PPR, .n = 1,
2256 .size = sizeof(u64), .align = sizeof(u64),
2257 .get = ppr_get, .set = ppr_set
2260 .core_note_type = NT_PPC_DSCR, .n = 1,
2261 .size = sizeof(u64), .align = sizeof(u64),
2262 .get = dscr_get, .set = dscr_set
2265 #ifdef CONFIG_PPC_BOOK3S_64
2267 .core_note_type = NT_PPC_TAR, .n = 1,
2268 .size = sizeof(u64), .align = sizeof(u64),
2269 .get = tar_get, .set = tar_set
2272 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2273 .size = sizeof(u64), .align = sizeof(u64),
2274 .active = ebb_active, .get = ebb_get, .set = ebb_set
2279 static const struct user_regset_view user_ppc_compat_view = {
2280 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2281 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2283 #endif /* CONFIG_PPC64 */
2285 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2288 if (test_tsk_thread_flag(task, TIF_32BIT))
2289 return &user_ppc_compat_view;
2291 return &user_ppc_native_view;
2295 void user_enable_single_step(struct task_struct *task)
2297 struct pt_regs *regs = task->thread.regs;
2300 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2301 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2302 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2303 regs->msr |= MSR_DE;
2305 regs->msr &= ~MSR_BE;
2306 regs->msr |= MSR_SE;
2309 set_tsk_thread_flag(task, TIF_SINGLESTEP);
2312 void user_enable_block_step(struct task_struct *task)
2314 struct pt_regs *regs = task->thread.regs;
2317 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2318 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2319 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2320 regs->msr |= MSR_DE;
2322 regs->msr &= ~MSR_SE;
2323 regs->msr |= MSR_BE;
2326 set_tsk_thread_flag(task, TIF_SINGLESTEP);
2329 void user_disable_single_step(struct task_struct *task)
2331 struct pt_regs *regs = task->thread.regs;
2334 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2336 * The logic to disable single stepping should be as
2337 * simple as turning off the Instruction Complete flag.
2338 * And, after doing so, if all debug flags are off, turn
2339 * off DBCR0(IDM) and MSR(DE) .... Torez
2341 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2343 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2345 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2346 task->thread.debug.dbcr1)) {
2348 * All debug events were off.....
2350 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2351 regs->msr &= ~MSR_DE;
2354 regs->msr &= ~(MSR_SE | MSR_BE);
2357 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2360 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2361 void ptrace_triggered(struct perf_event *bp,
2362 struct perf_sample_data *data, struct pt_regs *regs)
2364 struct perf_event_attr attr;
2367 * Disable the breakpoint request here since ptrace has defined a
2368 * one-shot behaviour for breakpoint exceptions in PPC64.
2369 * The SIGTRAP signal is generated automatically for us in do_dabr().
2370 * We don't have to do anything about that here
2373 attr.disabled = true;
2374 modify_user_hw_breakpoint(bp, &attr);
2376 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2378 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2381 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2383 struct thread_struct *thread = &(task->thread);
2384 struct perf_event *bp;
2385 struct perf_event_attr attr;
2386 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2387 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2389 struct arch_hw_breakpoint hw_brk;
2392 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2393 * For embedded processors we support one DAC and no IAC's at the
2399 /* The bottom 3 bits in dabr are flags */
2400 if ((data & ~0x7UL) >= TASK_SIZE)
2403 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2404 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2405 * It was assumed, on previous implementations, that 3 bits were
2406 * passed together with the data address, fitting the design of the
2407 * DABR register, as follows:
2411 * bit 2: Breakpoint translation
2413 * Thus, we use them here as so.
2416 /* Ensure breakpoint translation bit is set */
2417 if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2419 hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2420 hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2422 set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
2423 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2424 bp = thread->ptrace_bps[0];
2427 unregister_hw_breakpoint(bp);
2428 thread->ptrace_bps[0] = NULL;
2434 attr.bp_addr = hw_brk.address;
2435 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2437 /* Enable breakpoint */
2438 attr.disabled = false;
2440 ret = modify_user_hw_breakpoint(bp, &attr);
2444 thread->ptrace_bps[0] = bp;
2445 thread->hw_brk = hw_brk;
2449 /* Create a new breakpoint request if one doesn't exist already */
2450 hw_breakpoint_init(&attr);
2451 attr.bp_addr = hw_brk.address;
2453 arch_bp_generic_fields(hw_brk.type,
2456 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2457 ptrace_triggered, NULL, task);
2459 thread->ptrace_bps[0] = NULL;
2463 #else /* !CONFIG_HAVE_HW_BREAKPOINT */
2464 if (set_bp && (!ppc_breakpoint_available()))
2466 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2467 task->thread.hw_brk = hw_brk;
2468 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2469 /* As described above, it was assumed 3 bits were passed with the data
2470 * address, but we will assume only the mode bits will be passed
2471 * as to not cause alignment restrictions for DAC-based processors.
2474 /* DAC's hold the whole address without any mode flags */
2475 task->thread.debug.dac1 = data & ~0x3UL;
2477 if (task->thread.debug.dac1 == 0) {
2478 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2479 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2480 task->thread.debug.dbcr1)) {
2481 task->thread.regs->msr &= ~MSR_DE;
2482 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2487 /* Read or Write bits must be set */
2489 if (!(data & 0x3UL))
2492 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2494 task->thread.debug.dbcr0 |= DBCR0_IDM;
2496 /* Check for write and read flags and set DBCR0
2498 dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2500 dbcr_dac(task) |= DBCR_DAC1R;
2502 dbcr_dac(task) |= DBCR_DAC1W;
2503 task->thread.regs->msr |= MSR_DE;
2504 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2509 * Called by kernel/ptrace.c when detaching..
2511 * Make sure single step bits etc are not set.
2513 void ptrace_disable(struct task_struct *child)
2515 /* make sure the single step bit is not set. */
2516 user_disable_single_step(child);
2519 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2520 static long set_instruction_bp(struct task_struct *child,
2521 struct ppc_hw_breakpoint *bp_info)
2524 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2525 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2526 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2527 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2529 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2531 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2534 if (bp_info->addr >= TASK_SIZE)
2537 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2539 /* Make sure range is valid. */
2540 if (bp_info->addr2 >= TASK_SIZE)
2543 /* We need a pair of IAC regsisters */
2544 if ((!slot1_in_use) && (!slot2_in_use)) {
2546 child->thread.debug.iac1 = bp_info->addr;
2547 child->thread.debug.iac2 = bp_info->addr2;
2548 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2549 if (bp_info->addr_mode ==
2550 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2551 dbcr_iac_range(child) |= DBCR_IAC12X;
2553 dbcr_iac_range(child) |= DBCR_IAC12I;
2554 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2555 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2557 child->thread.debug.iac3 = bp_info->addr;
2558 child->thread.debug.iac4 = bp_info->addr2;
2559 child->thread.debug.dbcr0 |= DBCR0_IAC3;
2560 if (bp_info->addr_mode ==
2561 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2562 dbcr_iac_range(child) |= DBCR_IAC34X;
2564 dbcr_iac_range(child) |= DBCR_IAC34I;
2569 /* We only need one. If possible leave a pair free in
2570 * case a range is needed later
2572 if (!slot1_in_use) {
2574 * Don't use iac1 if iac1-iac2 are free and either
2575 * iac3 or iac4 (but not both) are free
2577 if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2579 child->thread.debug.iac1 = bp_info->addr;
2580 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2584 if (!slot2_in_use) {
2586 child->thread.debug.iac2 = bp_info->addr;
2587 child->thread.debug.dbcr0 |= DBCR0_IAC2;
2588 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2589 } else if (!slot3_in_use) {
2591 child->thread.debug.iac3 = bp_info->addr;
2592 child->thread.debug.dbcr0 |= DBCR0_IAC3;
2593 } else if (!slot4_in_use) {
2595 child->thread.debug.iac4 = bp_info->addr;
2596 child->thread.debug.dbcr0 |= DBCR0_IAC4;
2602 child->thread.debug.dbcr0 |= DBCR0_IDM;
2603 child->thread.regs->msr |= MSR_DE;
2608 static int del_instruction_bp(struct task_struct *child, int slot)
2612 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2615 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2616 /* address range - clear slots 1 & 2 */
2617 child->thread.debug.iac2 = 0;
2618 dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2620 child->thread.debug.iac1 = 0;
2621 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2624 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2627 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2628 /* used in a range */
2630 child->thread.debug.iac2 = 0;
2631 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2633 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2635 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2638 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2639 /* address range - clear slots 3 & 4 */
2640 child->thread.debug.iac4 = 0;
2641 dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2643 child->thread.debug.iac3 = 0;
2644 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2647 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2650 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2651 /* Used in a range */
2653 child->thread.debug.iac4 = 0;
2654 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2663 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2666 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2668 int condition_mode =
2669 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2672 if (byte_enable && (condition_mode == 0))
2675 if (bp_info->addr >= TASK_SIZE)
2678 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2680 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2681 dbcr_dac(child) |= DBCR_DAC1R;
2682 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2683 dbcr_dac(child) |= DBCR_DAC1W;
2684 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2685 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2687 child->thread.debug.dvc1 =
2688 (unsigned long)bp_info->condition_value;
2689 child->thread.debug.dbcr2 |=
2690 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2691 (condition_mode << DBCR2_DVC1M_SHIFT));
2694 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2695 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2696 /* Both dac1 and dac2 are part of a range */
2699 } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2701 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2702 dbcr_dac(child) |= DBCR_DAC2R;
2703 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2704 dbcr_dac(child) |= DBCR_DAC2W;
2705 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2706 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2708 child->thread.debug.dvc2 =
2709 (unsigned long)bp_info->condition_value;
2710 child->thread.debug.dbcr2 |=
2711 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2712 (condition_mode << DBCR2_DVC2M_SHIFT));
2717 child->thread.debug.dbcr0 |= DBCR0_IDM;
2718 child->thread.regs->msr |= MSR_DE;
2723 static int del_dac(struct task_struct *child, int slot)
2726 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2729 child->thread.debug.dac1 = 0;
2730 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2731 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2732 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2733 child->thread.debug.dac2 = 0;
2734 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2736 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2738 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2739 child->thread.debug.dvc1 = 0;
2741 } else if (slot == 2) {
2742 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2745 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2746 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2747 /* Part of a range */
2749 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2751 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2752 child->thread.debug.dvc2 = 0;
2754 child->thread.debug.dac2 = 0;
2755 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2761 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2763 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2764 static int set_dac_range(struct task_struct *child,
2765 struct ppc_hw_breakpoint *bp_info)
2767 int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2769 /* We don't allow range watchpoints to be used with DVC */
2770 if (bp_info->condition_mode)
2774 * Best effort to verify the address range. The user/supervisor bits
2775 * prevent trapping in kernel space, but let's fail on an obvious bad
2776 * range. The simple test on the mask is not fool-proof, and any
2777 * exclusive range will spill over into kernel space.
2779 if (bp_info->addr >= TASK_SIZE)
2781 if (mode == PPC_BREAKPOINT_MODE_MASK) {
2783 * dac2 is a bitmask. Don't allow a mask that makes a
2784 * kernel space address from a valid dac1 value
2786 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2790 * For range breakpoints, addr2 must also be a valid address
2792 if (bp_info->addr2 >= TASK_SIZE)
2796 if (child->thread.debug.dbcr0 &
2797 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2800 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2801 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2802 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2803 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2804 child->thread.debug.dac1 = bp_info->addr;
2805 child->thread.debug.dac2 = bp_info->addr2;
2806 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2807 child->thread.debug.dbcr2 |= DBCR2_DAC12M;
2808 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2809 child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
2810 else /* PPC_BREAKPOINT_MODE_MASK */
2811 child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
2812 child->thread.regs->msr |= MSR_DE;
2816 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2818 static long ppc_set_hwdebug(struct task_struct *child,
2819 struct ppc_hw_breakpoint *bp_info)
2821 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2823 struct thread_struct *thread = &(child->thread);
2824 struct perf_event *bp;
2825 struct perf_event_attr attr;
2826 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2827 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2828 struct arch_hw_breakpoint brk;
2831 if (bp_info->version != 1)
2833 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2835 * Check for invalid flags and combinations
2837 if ((bp_info->trigger_type == 0) ||
2838 (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2839 PPC_BREAKPOINT_TRIGGER_RW)) ||
2840 (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2841 (bp_info->condition_mode &
2842 ~(PPC_BREAKPOINT_CONDITION_MODE |
2843 PPC_BREAKPOINT_CONDITION_BE_ALL)))
2845 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2846 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2850 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2851 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2852 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2854 return set_instruction_bp(child, bp_info);
2856 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2857 return set_dac(child, bp_info);
2859 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2860 return set_dac_range(child, bp_info);
2864 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2866 * We only support one data breakpoint
2868 if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2869 (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2870 bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2873 if ((unsigned long)bp_info->addr >= TASK_SIZE)
2876 brk.address = bp_info->addr & ~7UL;
2877 brk.type = HW_BRK_TYPE_TRANSLATE;
2879 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2880 brk.type |= HW_BRK_TYPE_READ;
2881 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2882 brk.type |= HW_BRK_TYPE_WRITE;
2883 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2885 * Check if the request is for 'range' breakpoints. We can
2886 * support it if range < 8 bytes.
2888 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2889 len = bp_info->addr2 - bp_info->addr;
2890 else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2894 bp = thread->ptrace_bps[0];
2898 /* Create a new breakpoint request if one doesn't exist already */
2899 hw_breakpoint_init(&attr);
2900 attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2902 arch_bp_generic_fields(brk.type, &attr.bp_type);
2904 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2905 ptrace_triggered, NULL, child);
2907 thread->ptrace_bps[0] = NULL;
2912 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2914 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2917 if (child->thread.hw_brk.address)
2920 if (!ppc_breakpoint_available())
2923 child->thread.hw_brk = brk;
2926 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2929 static long ppc_del_hwdebug(struct task_struct *child, long data)
2931 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2933 struct thread_struct *thread = &(child->thread);
2934 struct perf_event *bp;
2935 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2936 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2940 rc = del_instruction_bp(child, (int)data);
2942 rc = del_dac(child, (int)data - 4);
2945 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2946 child->thread.debug.dbcr1)) {
2947 child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2948 child->thread.regs->msr &= ~MSR_DE;
2956 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2957 bp = thread->ptrace_bps[0];
2959 unregister_hw_breakpoint(bp);
2960 thread->ptrace_bps[0] = NULL;
2964 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2965 if (child->thread.hw_brk.address == 0)
2968 child->thread.hw_brk.address = 0;
2969 child->thread.hw_brk.type = 0;
2970 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2976 long arch_ptrace(struct task_struct *child, long request,
2977 unsigned long addr, unsigned long data)
2980 void __user *datavp = (void __user *) data;
2981 unsigned long __user *datalp = datavp;
2984 /* read the word at location addr in the USER area. */
2985 case PTRACE_PEEKUSR: {
2986 unsigned long index, tmp;
2989 /* convert to index and check */
2992 if ((addr & 3) || (index > PT_FPSCR)
2993 || (child->thread.regs == NULL))
2996 if ((addr & 7) || (index > PT_FPSCR))
3000 CHECK_FULL_REGS(child->thread.regs);
3001 if (index < PT_FPR0) {
3002 ret = ptrace_get_reg(child, (int) index, &tmp);
3006 unsigned int fpidx = index - PT_FPR0;
3008 flush_fp_to_thread(child);
3009 if (fpidx < (PT_FPSCR - PT_FPR0))
3010 if (IS_ENABLED(CONFIG_PPC32)) {
3011 // On 32-bit the index we are passed refers to 32-bit words
3012 tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx];
3014 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
3018 tmp = child->thread.fp_state.fpscr;
3020 ret = put_user(tmp, datalp);
3024 /* write the word at location addr in the USER area */
3025 case PTRACE_POKEUSR: {
3026 unsigned long index;
3029 /* convert to index and check */
3032 if ((addr & 3) || (index > PT_FPSCR)
3033 || (child->thread.regs == NULL))
3036 if ((addr & 7) || (index > PT_FPSCR))
3040 CHECK_FULL_REGS(child->thread.regs);
3041 if (index < PT_FPR0) {
3042 ret = ptrace_put_reg(child, index, data);
3044 unsigned int fpidx = index - PT_FPR0;
3046 flush_fp_to_thread(child);
3047 if (fpidx < (PT_FPSCR - PT_FPR0))
3048 if (IS_ENABLED(CONFIG_PPC32)) {
3049 // On 32-bit the index we are passed refers to 32-bit words
3050 ((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
3052 memcpy(&child->thread.TS_FPR(fpidx), &data,
3056 child->thread.fp_state.fpscr = data;
3062 case PPC_PTRACE_GETHWDBGINFO: {
3063 struct ppc_debug_info dbginfo;
3065 dbginfo.version = 1;
3066 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3067 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3068 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3069 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3070 dbginfo.data_bp_alignment = 4;
3071 dbginfo.sizeof_condition = 4;
3072 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3073 PPC_DEBUG_FEATURE_INSN_BP_MASK;
3074 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3076 PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3077 PPC_DEBUG_FEATURE_DATA_BP_MASK;
3079 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3080 dbginfo.num_instruction_bps = 0;
3081 if (ppc_breakpoint_available())
3082 dbginfo.num_data_bps = 1;
3084 dbginfo.num_data_bps = 0;
3085 dbginfo.num_condition_regs = 0;
3087 dbginfo.data_bp_alignment = 8;
3089 dbginfo.data_bp_alignment = 4;
3091 dbginfo.sizeof_condition = 0;
3092 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3093 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3094 if (cpu_has_feature(CPU_FTR_DAWR))
3095 dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3097 dbginfo.features = 0;
3098 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3099 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3101 if (copy_to_user(datavp, &dbginfo,
3102 sizeof(struct ppc_debug_info)))
3107 case PPC_PTRACE_SETHWDEBUG: {
3108 struct ppc_hw_breakpoint bp_info;
3110 if (copy_from_user(&bp_info, datavp,
3111 sizeof(struct ppc_hw_breakpoint)))
3113 return ppc_set_hwdebug(child, &bp_info);
3116 case PPC_PTRACE_DELHWDEBUG: {
3117 ret = ppc_del_hwdebug(child, data);
3121 case PTRACE_GET_DEBUGREG: {
3122 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3123 unsigned long dabr_fake;
3126 /* We only support one DABR and no IABRS at the moment */
3129 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3130 ret = put_user(child->thread.debug.dac1, datalp);
3132 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3133 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3134 ret = put_user(dabr_fake, datalp);
3139 case PTRACE_SET_DEBUGREG:
3140 ret = ptrace_set_debugreg(child, addr, data);
3144 case PTRACE_GETREGS64:
3146 case PTRACE_GETREGS: /* Get all pt_regs from the child. */
3147 return copy_regset_to_user(child, &user_ppc_native_view,
3149 0, sizeof(struct pt_regs),
3153 case PTRACE_SETREGS64:
3155 case PTRACE_SETREGS: /* Set all gp regs in the child. */
3156 return copy_regset_from_user(child, &user_ppc_native_view,
3158 0, sizeof(struct pt_regs),
3161 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3162 return copy_regset_to_user(child, &user_ppc_native_view,
3164 0, sizeof(elf_fpregset_t),
3167 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3168 return copy_regset_from_user(child, &user_ppc_native_view,
3170 0, sizeof(elf_fpregset_t),
3173 #ifdef CONFIG_ALTIVEC
3174 case PTRACE_GETVRREGS:
3175 return copy_regset_to_user(child, &user_ppc_native_view,
3177 0, (33 * sizeof(vector128) +
3181 case PTRACE_SETVRREGS:
3182 return copy_regset_from_user(child, &user_ppc_native_view,
3184 0, (33 * sizeof(vector128) +
3189 case PTRACE_GETVSRREGS:
3190 return copy_regset_to_user(child, &user_ppc_native_view,
3192 0, 32 * sizeof(double),
3195 case PTRACE_SETVSRREGS:
3196 return copy_regset_from_user(child, &user_ppc_native_view,
3198 0, 32 * sizeof(double),
3202 case PTRACE_GETEVRREGS:
3203 /* Get the child spe register state. */
3204 return copy_regset_to_user(child, &user_ppc_native_view,
3205 REGSET_SPE, 0, 35 * sizeof(u32),
3208 case PTRACE_SETEVRREGS:
3209 /* Set the child spe register state. */
3210 return copy_regset_from_user(child, &user_ppc_native_view,
3211 REGSET_SPE, 0, 35 * sizeof(u32),
3216 ret = ptrace_request(child, request, addr, data);
3222 #ifdef CONFIG_SECCOMP
3223 static int do_seccomp(struct pt_regs *regs)
3225 if (!test_thread_flag(TIF_SECCOMP))
3229 * The ABI we present to seccomp tracers is that r3 contains
3230 * the syscall return value and orig_gpr3 contains the first
3231 * syscall parameter. This is different to the ptrace ABI where
3232 * both r3 and orig_gpr3 contain the first syscall parameter.
3234 regs->gpr[3] = -ENOSYS;
3237 * We use the __ version here because we have already checked
3238 * TIF_SECCOMP. If this fails, there is nothing left to do, we
3239 * have already loaded -ENOSYS into r3, or seccomp has put
3240 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3242 if (__secure_computing(NULL))
3246 * The syscall was allowed by seccomp, restore the register
3247 * state to what audit expects.
3248 * Note that we use orig_gpr3, which means a seccomp tracer can
3249 * modify the first syscall parameter (in orig_gpr3) and also
3250 * allow the syscall to proceed.
3252 regs->gpr[3] = regs->orig_gpr3;
3257 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3258 #endif /* CONFIG_SECCOMP */
3261 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3262 * @regs: the pt_regs of the task to trace (current)
3264 * Performs various types of tracing on syscall entry. This includes seccomp,
3265 * ptrace, syscall tracepoints and audit.
3267 * The pt_regs are potentially visible to userspace via ptrace, so their
3270 * One or more of the tracers may modify the contents of pt_regs, in particular
3271 * to modify arguments or even the syscall number itself.
3273 * It's also possible that a tracer can choose to reject the system call. In
3274 * that case this function will return an illegal syscall number, and will put
3275 * an appropriate return value in regs->r3.
3277 * Return: the (possibly changed) syscall number.
3279 long do_syscall_trace_enter(struct pt_regs *regs)
3284 * The tracer may decide to abort the syscall, if so tracehook
3285 * will return !0. Note that the tracer may also just change
3286 * regs->gpr[0] to an invalid syscall number, that is handled
3287 * below on the exit path.
3289 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3290 tracehook_report_syscall_entry(regs))
3293 /* Run seccomp after ptrace; allow it to set gpr[3]. */
3294 if (do_seccomp(regs))
3297 /* Avoid trace and audit when syscall is invalid. */
3298 if (regs->gpr[0] >= NR_syscalls)
3301 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3302 trace_sys_enter(regs, regs->gpr[0]);
3305 if (!is_32bit_task())
3306 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3307 regs->gpr[5], regs->gpr[6]);
3310 audit_syscall_entry(regs->gpr[0],
3311 regs->gpr[3] & 0xffffffff,
3312 regs->gpr[4] & 0xffffffff,
3313 regs->gpr[5] & 0xffffffff,
3314 regs->gpr[6] & 0xffffffff);
3316 /* Return the possibly modified but valid syscall number */
3317 return regs->gpr[0];
3321 * If we are aborting explicitly, or if the syscall number is
3322 * now invalid, set the return value to -ENOSYS.
3324 regs->gpr[3] = -ENOSYS;
3328 void do_syscall_trace_leave(struct pt_regs *regs)
3332 audit_syscall_exit(regs);
3334 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3335 trace_sys_exit(regs, regs->result);
3337 step = test_thread_flag(TIF_SINGLESTEP);
3338 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3339 tracehook_report_syscall_exit(regs, step);