GNU Linux-libre 4.14.332-gnu1
[releases.git] / arch / s390 / kernel / ptrace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Ptrace user space interface.
4  *
5  *    Copyright IBM Corp. 1999, 2010
6  *    Author(s): Denis Joseph Barrow
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/segment.h>
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <linux/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/switch_to.h>
34 #include "entry.h"
35
36 #ifdef CONFIG_COMPAT
37 #include "compat_ptrace.h"
38 #endif
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/syscalls.h>
42
43 void update_cr_regs(struct task_struct *task)
44 {
45         struct pt_regs *regs = task_pt_regs(task);
46         struct thread_struct *thread = &task->thread;
47         struct per_regs old, new;
48         unsigned long cr0_old, cr0_new;
49         unsigned long cr2_old, cr2_new;
50         int cr0_changed, cr2_changed;
51
52         __ctl_store(cr0_old, 0, 0);
53         __ctl_store(cr2_old, 2, 2);
54         cr0_new = cr0_old;
55         cr2_new = cr2_old;
56         /* Take care of the enable/disable of transactional execution. */
57         if (MACHINE_HAS_TE) {
58                 /* Set or clear transaction execution TXC bit 8. */
59                 cr0_new |= (1UL << 55);
60                 if (task->thread.per_flags & PER_FLAG_NO_TE)
61                         cr0_new &= ~(1UL << 55);
62                 /* Set or clear transaction execution TDC bits 62 and 63. */
63                 cr2_new &= ~3UL;
64                 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
65                         if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
66                                 cr2_new |= 1UL;
67                         else
68                                 cr2_new |= 2UL;
69                 }
70         }
71         /* Take care of enable/disable of guarded storage. */
72         if (MACHINE_HAS_GS) {
73                 cr2_new &= ~(1UL << 4);
74                 if (task->thread.gs_cb)
75                         cr2_new |= (1UL << 4);
76         }
77         /* Load control register 0/2 iff changed */
78         cr0_changed = cr0_new != cr0_old;
79         cr2_changed = cr2_new != cr2_old;
80         if (cr0_changed)
81                 __ctl_load(cr0_new, 0, 0);
82         if (cr2_changed)
83                 __ctl_load(cr2_new, 2, 2);
84         /* Copy user specified PER registers */
85         new.control = thread->per_user.control;
86         new.start = thread->per_user.start;
87         new.end = thread->per_user.end;
88
89         /* merge TIF_SINGLE_STEP into user specified PER registers. */
90         if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91             test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
92                 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
93                         new.control |= PER_EVENT_BRANCH;
94                 else
95                         new.control |= PER_EVENT_IFETCH;
96                 new.control |= PER_CONTROL_SUSPENSION;
97                 new.control |= PER_EVENT_TRANSACTION_END;
98                 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
99                         new.control |= PER_EVENT_IFETCH;
100                 new.start = 0;
101                 new.end = -1UL;
102         }
103
104         /* Take care of the PER enablement bit in the PSW. */
105         if (!(new.control & PER_EVENT_MASK)) {
106                 regs->psw.mask &= ~PSW_MASK_PER;
107                 return;
108         }
109         regs->psw.mask |= PSW_MASK_PER;
110         __ctl_store(old, 9, 11);
111         if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
112                 __ctl_load(new, 9, 11);
113 }
114
115 void user_enable_single_step(struct task_struct *task)
116 {
117         clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
118         set_tsk_thread_flag(task, TIF_SINGLE_STEP);
119 }
120
121 void user_disable_single_step(struct task_struct *task)
122 {
123         clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
124         clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
125 }
126
127 void user_enable_block_step(struct task_struct *task)
128 {
129         set_tsk_thread_flag(task, TIF_SINGLE_STEP);
130         set_tsk_thread_flag(task, TIF_BLOCK_STEP);
131 }
132
133 /*
134  * Called by kernel/ptrace.c when detaching..
135  *
136  * Clear all debugging related fields.
137  */
138 void ptrace_disable(struct task_struct *task)
139 {
140         memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
141         memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
142         clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
143         clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
144         task->thread.per_flags = 0;
145 }
146
147 #define __ADDR_MASK 7
148
149 static inline unsigned long __peek_user_per(struct task_struct *child,
150                                             addr_t addr)
151 {
152         struct per_struct_kernel *dummy = NULL;
153
154         if (addr == (addr_t) &dummy->cr9)
155                 /* Control bits of the active per set. */
156                 return test_thread_flag(TIF_SINGLE_STEP) ?
157                         PER_EVENT_IFETCH : child->thread.per_user.control;
158         else if (addr == (addr_t) &dummy->cr10)
159                 /* Start address of the active per set. */
160                 return test_thread_flag(TIF_SINGLE_STEP) ?
161                         0 : child->thread.per_user.start;
162         else if (addr == (addr_t) &dummy->cr11)
163                 /* End address of the active per set. */
164                 return test_thread_flag(TIF_SINGLE_STEP) ?
165                         -1UL : child->thread.per_user.end;
166         else if (addr == (addr_t) &dummy->bits)
167                 /* Single-step bit. */
168                 return test_thread_flag(TIF_SINGLE_STEP) ?
169                         (1UL << (BITS_PER_LONG - 1)) : 0;
170         else if (addr == (addr_t) &dummy->starting_addr)
171                 /* Start address of the user specified per set. */
172                 return child->thread.per_user.start;
173         else if (addr == (addr_t) &dummy->ending_addr)
174                 /* End address of the user specified per set. */
175                 return child->thread.per_user.end;
176         else if (addr == (addr_t) &dummy->perc_atmid)
177                 /* PER code, ATMID and AI of the last PER trap */
178                 return (unsigned long)
179                         child->thread.per_event.cause << (BITS_PER_LONG - 16);
180         else if (addr == (addr_t) &dummy->address)
181                 /* Address of the last PER trap */
182                 return child->thread.per_event.address;
183         else if (addr == (addr_t) &dummy->access_id)
184                 /* Access id of the last PER trap */
185                 return (unsigned long)
186                         child->thread.per_event.paid << (BITS_PER_LONG - 8);
187         return 0;
188 }
189
190 /*
191  * Read the word at offset addr from the user area of a process. The
192  * trouble here is that the information is littered over different
193  * locations. The process registers are found on the kernel stack,
194  * the floating point stuff and the trace settings are stored in
195  * the task structure. In addition the different structures in
196  * struct user contain pad bytes that should be read as zeroes.
197  * Lovely...
198  */
199 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
200 {
201         struct user *dummy = NULL;
202         addr_t offset, tmp;
203
204         if (addr < (addr_t) &dummy->regs.acrs) {
205                 /*
206                  * psw and gprs are stored on the stack
207                  */
208                 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
209                 if (addr == (addr_t) &dummy->regs.psw.mask) {
210                         /* Return a clean psw mask. */
211                         tmp &= PSW_MASK_USER | PSW_MASK_RI;
212                         tmp |= PSW_USER_BITS;
213                 }
214
215         } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
216                 /*
217                  * access registers are stored in the thread structure
218                  */
219                 offset = addr - (addr_t) &dummy->regs.acrs;
220                 /*
221                  * Very special case: old & broken 64 bit gdb reading
222                  * from acrs[15]. Result is a 64 bit value. Read the
223                  * 32 bit acrs[15] value and shift it by 32. Sick...
224                  */
225                 if (addr == (addr_t) &dummy->regs.acrs[15])
226                         tmp = ((unsigned long) child->thread.acrs[15]) << 32;
227                 else
228                         tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
229
230         } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
231                 /*
232                  * orig_gpr2 is stored on the kernel stack
233                  */
234                 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
235
236         } else if (addr < (addr_t) &dummy->regs.fp_regs) {
237                 /*
238                  * prevent reads of padding hole between
239                  * orig_gpr2 and fp_regs on s390.
240                  */
241                 tmp = 0;
242
243         } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
244                 /*
245                  * floating point control reg. is in the thread structure
246                  */
247                 tmp = child->thread.fpu.fpc;
248                 tmp <<= BITS_PER_LONG - 32;
249
250         } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
251                 /*
252                  * floating point regs. are either in child->thread.fpu
253                  * or the child->thread.fpu.vxrs array
254                  */
255                 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
256                 if (MACHINE_HAS_VX)
257                         tmp = *(addr_t *)
258                                ((addr_t) child->thread.fpu.vxrs + 2*offset);
259                 else
260                         tmp = *(addr_t *)
261                                ((addr_t) child->thread.fpu.fprs + offset);
262
263         } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
264                 /*
265                  * Handle access to the per_info structure.
266                  */
267                 addr -= (addr_t) &dummy->regs.per_info;
268                 tmp = __peek_user_per(child, addr);
269
270         } else
271                 tmp = 0;
272
273         return tmp;
274 }
275
276 static int
277 peek_user(struct task_struct *child, addr_t addr, addr_t data)
278 {
279         addr_t tmp, mask;
280
281         /*
282          * Stupid gdb peeks/pokes the access registers in 64 bit with
283          * an alignment of 4. Programmers from hell...
284          */
285         mask = __ADDR_MASK;
286         if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
287             addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
288                 mask = 3;
289         if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
290                 return -EIO;
291
292         tmp = __peek_user(child, addr);
293         return put_user(tmp, (addr_t __user *) data);
294 }
295
296 static inline void __poke_user_per(struct task_struct *child,
297                                    addr_t addr, addr_t data)
298 {
299         struct per_struct_kernel *dummy = NULL;
300
301         /*
302          * There are only three fields in the per_info struct that the
303          * debugger user can write to.
304          * 1) cr9: the debugger wants to set a new PER event mask
305          * 2) starting_addr: the debugger wants to set a new starting
306          *    address to use with the PER event mask.
307          * 3) ending_addr: the debugger wants to set a new ending
308          *    address to use with the PER event mask.
309          * The user specified PER event mask and the start and end
310          * addresses are used only if single stepping is not in effect.
311          * Writes to any other field in per_info are ignored.
312          */
313         if (addr == (addr_t) &dummy->cr9)
314                 /* PER event mask of the user specified per set. */
315                 child->thread.per_user.control =
316                         data & (PER_EVENT_MASK | PER_CONTROL_MASK);
317         else if (addr == (addr_t) &dummy->starting_addr)
318                 /* Starting address of the user specified per set. */
319                 child->thread.per_user.start = data;
320         else if (addr == (addr_t) &dummy->ending_addr)
321                 /* Ending address of the user specified per set. */
322                 child->thread.per_user.end = data;
323 }
324
325 static void fixup_int_code(struct task_struct *child, addr_t data)
326 {
327         struct pt_regs *regs = task_pt_regs(child);
328         int ilc = regs->int_code >> 16;
329         u16 insn;
330
331         if (ilc > 6)
332                 return;
333
334         if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
335                         &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
336                 return;
337
338         /* double check that tracee stopped on svc instruction */
339         if ((insn >> 8) != 0xa)
340                 return;
341
342         regs->int_code = 0x20000 | (data & 0xffff);
343 }
344 /*
345  * Write a word to the user area of a process at location addr. This
346  * operation does have an additional problem compared to peek_user.
347  * Stores to the program status word and on the floating point
348  * control register needs to get checked for validity.
349  */
350 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
351 {
352         struct user *dummy = NULL;
353         addr_t offset;
354
355
356         if (addr < (addr_t) &dummy->regs.acrs) {
357                 struct pt_regs *regs = task_pt_regs(child);
358                 /*
359                  * psw and gprs are stored on the stack
360                  */
361                 if (addr == (addr_t) &dummy->regs.psw.mask) {
362                         unsigned long mask = PSW_MASK_USER;
363
364                         mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
365                         if ((data ^ PSW_USER_BITS) & ~mask)
366                                 /* Invalid psw mask. */
367                                 return -EINVAL;
368                         if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
369                                 /* Invalid address-space-control bits */
370                                 return -EINVAL;
371                         if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
372                                 /* Invalid addressing mode bits */
373                                 return -EINVAL;
374                 }
375
376                 if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
377                         addr == offsetof(struct user, regs.gprs[2]))
378                         fixup_int_code(child, data);
379                 *(addr_t *)((addr_t) &regs->psw + addr) = data;
380
381         } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
382                 /*
383                  * access registers are stored in the thread structure
384                  */
385                 offset = addr - (addr_t) &dummy->regs.acrs;
386                 /*
387                  * Very special case: old & broken 64 bit gdb writing
388                  * to acrs[15] with a 64 bit value. Ignore the lower
389                  * half of the value and write the upper 32 bit to
390                  * acrs[15]. Sick...
391                  */
392                 if (addr == (addr_t) &dummy->regs.acrs[15])
393                         child->thread.acrs[15] = (unsigned int) (data >> 32);
394                 else
395                         *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
396
397         } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
398                 /*
399                  * orig_gpr2 is stored on the kernel stack
400                  */
401                 task_pt_regs(child)->orig_gpr2 = data;
402
403         } else if (addr < (addr_t) &dummy->regs.fp_regs) {
404                 /*
405                  * prevent writes of padding hole between
406                  * orig_gpr2 and fp_regs on s390.
407                  */
408                 return 0;
409
410         } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
411                 /*
412                  * floating point control reg. is in the thread structure
413                  */
414                 if ((unsigned int) data != 0 ||
415                     test_fp_ctl(data >> (BITS_PER_LONG - 32)))
416                         return -EINVAL;
417                 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
418
419         } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
420                 /*
421                  * floating point regs. are either in child->thread.fpu
422                  * or the child->thread.fpu.vxrs array
423                  */
424                 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
425                 if (MACHINE_HAS_VX)
426                         *(addr_t *)((addr_t)
427                                 child->thread.fpu.vxrs + 2*offset) = data;
428                 else
429                         *(addr_t *)((addr_t)
430                                 child->thread.fpu.fprs + offset) = data;
431
432         } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
433                 /*
434                  * Handle access to the per_info structure.
435                  */
436                 addr -= (addr_t) &dummy->regs.per_info;
437                 __poke_user_per(child, addr, data);
438
439         }
440
441         return 0;
442 }
443
444 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
445 {
446         addr_t mask;
447
448         /*
449          * Stupid gdb peeks/pokes the access registers in 64 bit with
450          * an alignment of 4. Programmers from hell indeed...
451          */
452         mask = __ADDR_MASK;
453         if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
454             addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
455                 mask = 3;
456         if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
457                 return -EIO;
458
459         return __poke_user(child, addr, data);
460 }
461
462 long arch_ptrace(struct task_struct *child, long request,
463                  unsigned long addr, unsigned long data)
464 {
465         ptrace_area parea; 
466         int copied, ret;
467
468         switch (request) {
469         case PTRACE_PEEKUSR:
470                 /* read the word at location addr in the USER area. */
471                 return peek_user(child, addr, data);
472
473         case PTRACE_POKEUSR:
474                 /* write the word at location addr in the USER area */
475                 return poke_user(child, addr, data);
476
477         case PTRACE_PEEKUSR_AREA:
478         case PTRACE_POKEUSR_AREA:
479                 if (copy_from_user(&parea, (void __force __user *) addr,
480                                                         sizeof(parea)))
481                         return -EFAULT;
482                 addr = parea.kernel_addr;
483                 data = parea.process_addr;
484                 copied = 0;
485                 while (copied < parea.len) {
486                         if (request == PTRACE_PEEKUSR_AREA)
487                                 ret = peek_user(child, addr, data);
488                         else {
489                                 addr_t utmp;
490                                 if (get_user(utmp,
491                                              (addr_t __force __user *) data))
492                                         return -EFAULT;
493                                 ret = poke_user(child, addr, utmp);
494                         }
495                         if (ret)
496                                 return ret;
497                         addr += sizeof(unsigned long);
498                         data += sizeof(unsigned long);
499                         copied += sizeof(unsigned long);
500                 }
501                 return 0;
502         case PTRACE_GET_LAST_BREAK:
503                 return put_user(child->thread.last_break, (unsigned long __user *)data);
504         case PTRACE_ENABLE_TE:
505                 if (!MACHINE_HAS_TE)
506                         return -EIO;
507                 child->thread.per_flags &= ~PER_FLAG_NO_TE;
508                 return 0;
509         case PTRACE_DISABLE_TE:
510                 if (!MACHINE_HAS_TE)
511                         return -EIO;
512                 child->thread.per_flags |= PER_FLAG_NO_TE;
513                 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
514                 return 0;
515         case PTRACE_TE_ABORT_RAND:
516                 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
517                         return -EIO;
518                 switch (data) {
519                 case 0UL:
520                         child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
521                         break;
522                 case 1UL:
523                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
524                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
525                         break;
526                 case 2UL:
527                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
528                         child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
529                         break;
530                 default:
531                         return -EINVAL;
532                 }
533                 return 0;
534         default:
535                 return ptrace_request(child, request, addr, data);
536         }
537 }
538
539 #ifdef CONFIG_COMPAT
540 /*
541  * Now the fun part starts... a 31 bit program running in the
542  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
543  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
544  * to handle, the difference to the 64 bit versions of the requests
545  * is that the access is done in multiples of 4 byte instead of
546  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
547  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
548  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
549  * is a 31 bit program too, the content of struct user can be
550  * emulated. A 31 bit program peeking into the struct user of
551  * a 64 bit program is a no-no.
552  */
553
554 /*
555  * Same as peek_user_per but for a 31 bit program.
556  */
557 static inline __u32 __peek_user_per_compat(struct task_struct *child,
558                                            addr_t addr)
559 {
560         struct compat_per_struct_kernel *dummy32 = NULL;
561
562         if (addr == (addr_t) &dummy32->cr9)
563                 /* Control bits of the active per set. */
564                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
565                         PER_EVENT_IFETCH : child->thread.per_user.control;
566         else if (addr == (addr_t) &dummy32->cr10)
567                 /* Start address of the active per set. */
568                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
569                         0 : child->thread.per_user.start;
570         else if (addr == (addr_t) &dummy32->cr11)
571                 /* End address of the active per set. */
572                 return test_thread_flag(TIF_SINGLE_STEP) ?
573                         PSW32_ADDR_INSN : child->thread.per_user.end;
574         else if (addr == (addr_t) &dummy32->bits)
575                 /* Single-step bit. */
576                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
577                         0x80000000 : 0;
578         else if (addr == (addr_t) &dummy32->starting_addr)
579                 /* Start address of the user specified per set. */
580                 return (__u32) child->thread.per_user.start;
581         else if (addr == (addr_t) &dummy32->ending_addr)
582                 /* End address of the user specified per set. */
583                 return (__u32) child->thread.per_user.end;
584         else if (addr == (addr_t) &dummy32->perc_atmid)
585                 /* PER code, ATMID and AI of the last PER trap */
586                 return (__u32) child->thread.per_event.cause << 16;
587         else if (addr == (addr_t) &dummy32->address)
588                 /* Address of the last PER trap */
589                 return (__u32) child->thread.per_event.address;
590         else if (addr == (addr_t) &dummy32->access_id)
591                 /* Access id of the last PER trap */
592                 return (__u32) child->thread.per_event.paid << 24;
593         return 0;
594 }
595
596 /*
597  * Same as peek_user but for a 31 bit program.
598  */
599 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
600 {
601         struct compat_user *dummy32 = NULL;
602         addr_t offset;
603         __u32 tmp;
604
605         if (addr < (addr_t) &dummy32->regs.acrs) {
606                 struct pt_regs *regs = task_pt_regs(child);
607                 /*
608                  * psw and gprs are stored on the stack
609                  */
610                 if (addr == (addr_t) &dummy32->regs.psw.mask) {
611                         /* Fake a 31 bit psw mask. */
612                         tmp = (__u32)(regs->psw.mask >> 32);
613                         tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
614                         tmp |= PSW32_USER_BITS;
615                 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
616                         /* Fake a 31 bit psw address. */
617                         tmp = (__u32) regs->psw.addr |
618                                 (__u32)(regs->psw.mask & PSW_MASK_BA);
619                 } else {
620                         /* gpr 0-15 */
621                         tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
622                 }
623         } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
624                 /*
625                  * access registers are stored in the thread structure
626                  */
627                 offset = addr - (addr_t) &dummy32->regs.acrs;
628                 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
629
630         } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
631                 /*
632                  * orig_gpr2 is stored on the kernel stack
633                  */
634                 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
635
636         } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
637                 /*
638                  * prevent reads of padding hole between
639                  * orig_gpr2 and fp_regs on s390.
640                  */
641                 tmp = 0;
642
643         } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
644                 /*
645                  * floating point control reg. is in the thread structure
646                  */
647                 tmp = child->thread.fpu.fpc;
648
649         } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
650                 /*
651                  * floating point regs. are either in child->thread.fpu
652                  * or the child->thread.fpu.vxrs array
653                  */
654                 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
655                 if (MACHINE_HAS_VX)
656                         tmp = *(__u32 *)
657                                ((addr_t) child->thread.fpu.vxrs + 2*offset);
658                 else
659                         tmp = *(__u32 *)
660                                ((addr_t) child->thread.fpu.fprs + offset);
661
662         } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
663                 /*
664                  * Handle access to the per_info structure.
665                  */
666                 addr -= (addr_t) &dummy32->regs.per_info;
667                 tmp = __peek_user_per_compat(child, addr);
668
669         } else
670                 tmp = 0;
671
672         return tmp;
673 }
674
675 static int peek_user_compat(struct task_struct *child,
676                             addr_t addr, addr_t data)
677 {
678         __u32 tmp;
679
680         if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
681                 return -EIO;
682
683         tmp = __peek_user_compat(child, addr);
684         return put_user(tmp, (__u32 __user *) data);
685 }
686
687 /*
688  * Same as poke_user_per but for a 31 bit program.
689  */
690 static inline void __poke_user_per_compat(struct task_struct *child,
691                                           addr_t addr, __u32 data)
692 {
693         struct compat_per_struct_kernel *dummy32 = NULL;
694
695         if (addr == (addr_t) &dummy32->cr9)
696                 /* PER event mask of the user specified per set. */
697                 child->thread.per_user.control =
698                         data & (PER_EVENT_MASK | PER_CONTROL_MASK);
699         else if (addr == (addr_t) &dummy32->starting_addr)
700                 /* Starting address of the user specified per set. */
701                 child->thread.per_user.start = data;
702         else if (addr == (addr_t) &dummy32->ending_addr)
703                 /* Ending address of the user specified per set. */
704                 child->thread.per_user.end = data;
705 }
706
707 /*
708  * Same as poke_user but for a 31 bit program.
709  */
710 static int __poke_user_compat(struct task_struct *child,
711                               addr_t addr, addr_t data)
712 {
713         struct compat_user *dummy32 = NULL;
714         __u32 tmp = (__u32) data;
715         addr_t offset;
716
717         if (addr < (addr_t) &dummy32->regs.acrs) {
718                 struct pt_regs *regs = task_pt_regs(child);
719                 /*
720                  * psw, gprs, acrs and orig_gpr2 are stored on the stack
721                  */
722                 if (addr == (addr_t) &dummy32->regs.psw.mask) {
723                         __u32 mask = PSW32_MASK_USER;
724
725                         mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
726                         /* Build a 64 bit psw mask from 31 bit mask. */
727                         if ((tmp ^ PSW32_USER_BITS) & ~mask)
728                                 /* Invalid psw mask. */
729                                 return -EINVAL;
730                         if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
731                                 /* Invalid address-space-control bits */
732                                 return -EINVAL;
733                         regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
734                                 (regs->psw.mask & PSW_MASK_BA) |
735                                 (__u64)(tmp & mask) << 32;
736                 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
737                         /* Build a 64 bit psw address from 31 bit address. */
738                         regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
739                         /* Transfer 31 bit amode bit to psw mask. */
740                         regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
741                                 (__u64)(tmp & PSW32_ADDR_AMODE);
742                 } else {
743
744                         if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
745                                 addr == offsetof(struct compat_user, regs.gprs[2]))
746                                 fixup_int_code(child, data);
747                         /* gpr 0-15 */
748                         *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
749                 }
750         } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
751                 /*
752                  * access registers are stored in the thread structure
753                  */
754                 offset = addr - (addr_t) &dummy32->regs.acrs;
755                 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
756
757         } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
758                 /*
759                  * orig_gpr2 is stored on the kernel stack
760                  */
761                 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
762
763         } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
764                 /*
765                  * prevent writess of padding hole between
766                  * orig_gpr2 and fp_regs on s390.
767                  */
768                 return 0;
769
770         } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
771                 /*
772                  * floating point control reg. is in the thread structure
773                  */
774                 if (test_fp_ctl(tmp))
775                         return -EINVAL;
776                 child->thread.fpu.fpc = data;
777
778         } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
779                 /*
780                  * floating point regs. are either in child->thread.fpu
781                  * or the child->thread.fpu.vxrs array
782                  */
783                 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
784                 if (MACHINE_HAS_VX)
785                         *(__u32 *)((addr_t)
786                                 child->thread.fpu.vxrs + 2*offset) = tmp;
787                 else
788                         *(__u32 *)((addr_t)
789                                 child->thread.fpu.fprs + offset) = tmp;
790
791         } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
792                 /*
793                  * Handle access to the per_info structure.
794                  */
795                 addr -= (addr_t) &dummy32->regs.per_info;
796                 __poke_user_per_compat(child, addr, data);
797         }
798
799         return 0;
800 }
801
802 static int poke_user_compat(struct task_struct *child,
803                             addr_t addr, addr_t data)
804 {
805         if (!is_compat_task() || (addr & 3) ||
806             addr > sizeof(struct compat_user) - 3)
807                 return -EIO;
808
809         return __poke_user_compat(child, addr, data);
810 }
811
812 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
813                         compat_ulong_t caddr, compat_ulong_t cdata)
814 {
815         unsigned long addr = caddr;
816         unsigned long data = cdata;
817         compat_ptrace_area parea;
818         int copied, ret;
819
820         switch (request) {
821         case PTRACE_PEEKUSR:
822                 /* read the word at location addr in the USER area. */
823                 return peek_user_compat(child, addr, data);
824
825         case PTRACE_POKEUSR:
826                 /* write the word at location addr in the USER area */
827                 return poke_user_compat(child, addr, data);
828
829         case PTRACE_PEEKUSR_AREA:
830         case PTRACE_POKEUSR_AREA:
831                 if (copy_from_user(&parea, (void __force __user *) addr,
832                                                         sizeof(parea)))
833                         return -EFAULT;
834                 addr = parea.kernel_addr;
835                 data = parea.process_addr;
836                 copied = 0;
837                 while (copied < parea.len) {
838                         if (request == PTRACE_PEEKUSR_AREA)
839                                 ret = peek_user_compat(child, addr, data);
840                         else {
841                                 __u32 utmp;
842                                 if (get_user(utmp,
843                                              (__u32 __force __user *) data))
844                                         return -EFAULT;
845                                 ret = poke_user_compat(child, addr, utmp);
846                         }
847                         if (ret)
848                                 return ret;
849                         addr += sizeof(unsigned int);
850                         data += sizeof(unsigned int);
851                         copied += sizeof(unsigned int);
852                 }
853                 return 0;
854         case PTRACE_GET_LAST_BREAK:
855                 return put_user(child->thread.last_break, (unsigned int __user *)data);
856         }
857         return compat_ptrace_request(child, request, addr, data);
858 }
859 #endif
860
861 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
862 {
863         unsigned long mask = -1UL;
864
865         /*
866          * The sysc_tracesys code in entry.S stored the system
867          * call number to gprs[2].
868          */
869         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
870             (tracehook_report_syscall_entry(regs) ||
871              regs->gprs[2] >= NR_syscalls)) {
872                 /*
873                  * Tracing decided this syscall should not happen or the
874                  * debugger stored an invalid system call number. Skip
875                  * the system call and the system call restart handling.
876                  */
877                 clear_pt_regs_flag(regs, PIF_SYSCALL);
878                 return -1;
879         }
880
881         /* Do the secure computing check after ptrace. */
882         if (secure_computing(NULL)) {
883                 /* seccomp failures shouldn't expose any additional code. */
884                 return -1;
885         }
886
887         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
888                 trace_sys_enter(regs, regs->gprs[2]);
889
890         if (is_compat_task())
891                 mask = 0xffffffff;
892
893         audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
894                             regs->gprs[3] &mask, regs->gprs[4] &mask,
895                             regs->gprs[5] &mask);
896
897         return regs->gprs[2];
898 }
899
900 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
901 {
902         audit_syscall_exit(regs);
903
904         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
905                 trace_sys_exit(regs, regs->gprs[2]);
906
907         if (test_thread_flag(TIF_SYSCALL_TRACE))
908                 tracehook_report_syscall_exit(regs, 0);
909 }
910
911 /*
912  * user_regset definitions.
913  */
914
915 static int s390_regs_get(struct task_struct *target,
916                          const struct user_regset *regset,
917                          unsigned int pos, unsigned int count,
918                          void *kbuf, void __user *ubuf)
919 {
920         if (target == current)
921                 save_access_regs(target->thread.acrs);
922
923         if (kbuf) {
924                 unsigned long *k = kbuf;
925                 while (count > 0) {
926                         *k++ = __peek_user(target, pos);
927                         count -= sizeof(*k);
928                         pos += sizeof(*k);
929                 }
930         } else {
931                 unsigned long __user *u = ubuf;
932                 while (count > 0) {
933                         if (__put_user(__peek_user(target, pos), u++))
934                                 return -EFAULT;
935                         count -= sizeof(*u);
936                         pos += sizeof(*u);
937                 }
938         }
939         return 0;
940 }
941
942 static int s390_regs_set(struct task_struct *target,
943                          const struct user_regset *regset,
944                          unsigned int pos, unsigned int count,
945                          const void *kbuf, const void __user *ubuf)
946 {
947         int rc = 0;
948
949         if (target == current)
950                 save_access_regs(target->thread.acrs);
951
952         if (kbuf) {
953                 const unsigned long *k = kbuf;
954                 while (count > 0 && !rc) {
955                         rc = __poke_user(target, pos, *k++);
956                         count -= sizeof(*k);
957                         pos += sizeof(*k);
958                 }
959         } else {
960                 const unsigned long  __user *u = ubuf;
961                 while (count > 0 && !rc) {
962                         unsigned long word;
963                         rc = __get_user(word, u++);
964                         if (rc)
965                                 break;
966                         rc = __poke_user(target, pos, word);
967                         count -= sizeof(*u);
968                         pos += sizeof(*u);
969                 }
970         }
971
972         if (rc == 0 && target == current)
973                 restore_access_regs(target->thread.acrs);
974
975         return rc;
976 }
977
978 static int s390_fpregs_get(struct task_struct *target,
979                            const struct user_regset *regset, unsigned int pos,
980                            unsigned int count, void *kbuf, void __user *ubuf)
981 {
982         _s390_fp_regs fp_regs;
983
984         if (target == current)
985                 save_fpu_regs();
986
987         fp_regs.fpc = target->thread.fpu.fpc;
988         fpregs_store(&fp_regs, &target->thread.fpu);
989
990         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
991                                    &fp_regs, 0, -1);
992 }
993
994 static int s390_fpregs_set(struct task_struct *target,
995                            const struct user_regset *regset, unsigned int pos,
996                            unsigned int count, const void *kbuf,
997                            const void __user *ubuf)
998 {
999         int rc = 0;
1000         freg_t fprs[__NUM_FPRS];
1001
1002         if (target == current)
1003                 save_fpu_regs();
1004
1005         if (MACHINE_HAS_VX)
1006                 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
1007         else
1008                 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
1009
1010         /* If setting FPC, must validate it first. */
1011         if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
1012                 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
1013                 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
1014                                         0, offsetof(s390_fp_regs, fprs));
1015                 if (rc)
1016                         return rc;
1017                 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
1018                         return -EINVAL;
1019                 target->thread.fpu.fpc = ufpc[0];
1020         }
1021
1022         if (rc == 0 && count > 0)
1023                 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1024                                         fprs, offsetof(s390_fp_regs, fprs), -1);
1025         if (rc)
1026                 return rc;
1027
1028         if (MACHINE_HAS_VX)
1029                 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1030         else
1031                 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1032
1033         return rc;
1034 }
1035
1036 static int s390_last_break_get(struct task_struct *target,
1037                                const struct user_regset *regset,
1038                                unsigned int pos, unsigned int count,
1039                                void *kbuf, void __user *ubuf)
1040 {
1041         if (count > 0) {
1042                 if (kbuf) {
1043                         unsigned long *k = kbuf;
1044                         *k = target->thread.last_break;
1045                 } else {
1046                         unsigned long  __user *u = ubuf;
1047                         if (__put_user(target->thread.last_break, u))
1048                                 return -EFAULT;
1049                 }
1050         }
1051         return 0;
1052 }
1053
1054 static int s390_last_break_set(struct task_struct *target,
1055                                const struct user_regset *regset,
1056                                unsigned int pos, unsigned int count,
1057                                const void *kbuf, const void __user *ubuf)
1058 {
1059         return 0;
1060 }
1061
1062 static int s390_tdb_get(struct task_struct *target,
1063                         const struct user_regset *regset,
1064                         unsigned int pos, unsigned int count,
1065                         void *kbuf, void __user *ubuf)
1066 {
1067         struct pt_regs *regs = task_pt_regs(target);
1068         unsigned char *data;
1069
1070         if (!(regs->int_code & 0x200))
1071                 return -ENODATA;
1072         data = target->thread.trap_tdb;
1073         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1074 }
1075
1076 static int s390_tdb_set(struct task_struct *target,
1077                         const struct user_regset *regset,
1078                         unsigned int pos, unsigned int count,
1079                         const void *kbuf, const void __user *ubuf)
1080 {
1081         return 0;
1082 }
1083
1084 static int s390_vxrs_low_get(struct task_struct *target,
1085                              const struct user_regset *regset,
1086                              unsigned int pos, unsigned int count,
1087                              void *kbuf, void __user *ubuf)
1088 {
1089         __u64 vxrs[__NUM_VXRS_LOW];
1090         int i;
1091
1092         if (!MACHINE_HAS_VX)
1093                 return -ENODEV;
1094         if (target == current)
1095                 save_fpu_regs();
1096         for (i = 0; i < __NUM_VXRS_LOW; i++)
1097                 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1098         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1099 }
1100
1101 static int s390_vxrs_low_set(struct task_struct *target,
1102                              const struct user_regset *regset,
1103                              unsigned int pos, unsigned int count,
1104                              const void *kbuf, const void __user *ubuf)
1105 {
1106         __u64 vxrs[__NUM_VXRS_LOW];
1107         int i, rc;
1108
1109         if (!MACHINE_HAS_VX)
1110                 return -ENODEV;
1111         if (target == current)
1112                 save_fpu_regs();
1113
1114         for (i = 0; i < __NUM_VXRS_LOW; i++)
1115                 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1116
1117         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1118         if (rc == 0)
1119                 for (i = 0; i < __NUM_VXRS_LOW; i++)
1120                         *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1121
1122         return rc;
1123 }
1124
1125 static int s390_vxrs_high_get(struct task_struct *target,
1126                               const struct user_regset *regset,
1127                               unsigned int pos, unsigned int count,
1128                               void *kbuf, void __user *ubuf)
1129 {
1130         __vector128 vxrs[__NUM_VXRS_HIGH];
1131
1132         if (!MACHINE_HAS_VX)
1133                 return -ENODEV;
1134         if (target == current)
1135                 save_fpu_regs();
1136         memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1137
1138         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1139 }
1140
1141 static int s390_vxrs_high_set(struct task_struct *target,
1142                               const struct user_regset *regset,
1143                               unsigned int pos, unsigned int count,
1144                               const void *kbuf, const void __user *ubuf)
1145 {
1146         int rc;
1147
1148         if (!MACHINE_HAS_VX)
1149                 return -ENODEV;
1150         if (target == current)
1151                 save_fpu_regs();
1152
1153         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1154                                 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1155         return rc;
1156 }
1157
1158 static int s390_system_call_get(struct task_struct *target,
1159                                 const struct user_regset *regset,
1160                                 unsigned int pos, unsigned int count,
1161                                 void *kbuf, void __user *ubuf)
1162 {
1163         unsigned int *data = &target->thread.system_call;
1164         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1165                                    data, 0, sizeof(unsigned int));
1166 }
1167
1168 static int s390_system_call_set(struct task_struct *target,
1169                                 const struct user_regset *regset,
1170                                 unsigned int pos, unsigned int count,
1171                                 const void *kbuf, const void __user *ubuf)
1172 {
1173         unsigned int *data = &target->thread.system_call;
1174         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1175                                   data, 0, sizeof(unsigned int));
1176 }
1177
1178 static int s390_gs_cb_get(struct task_struct *target,
1179                           const struct user_regset *regset,
1180                           unsigned int pos, unsigned int count,
1181                           void *kbuf, void __user *ubuf)
1182 {
1183         struct gs_cb *data = target->thread.gs_cb;
1184
1185         if (!MACHINE_HAS_GS)
1186                 return -ENODEV;
1187         if (!data)
1188                 return -ENODATA;
1189         if (target == current)
1190                 save_gs_cb(data);
1191         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1192                                    data, 0, sizeof(struct gs_cb));
1193 }
1194
1195 static int s390_gs_cb_set(struct task_struct *target,
1196                           const struct user_regset *regset,
1197                           unsigned int pos, unsigned int count,
1198                           const void *kbuf, const void __user *ubuf)
1199 {
1200         struct gs_cb gs_cb = { }, *data = NULL;
1201         int rc;
1202
1203         if (!MACHINE_HAS_GS)
1204                 return -ENODEV;
1205         if (!target->thread.gs_cb) {
1206                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1207                 if (!data)
1208                         return -ENOMEM;
1209         }
1210         if (!target->thread.gs_cb)
1211                 gs_cb.gsd = 25;
1212         else if (target == current)
1213                 save_gs_cb(&gs_cb);
1214         else
1215                 gs_cb = *target->thread.gs_cb;
1216         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1217                                 &gs_cb, 0, sizeof(gs_cb));
1218         if (rc) {
1219                 kfree(data);
1220                 return -EFAULT;
1221         }
1222         preempt_disable();
1223         if (!target->thread.gs_cb)
1224                 target->thread.gs_cb = data;
1225         *target->thread.gs_cb = gs_cb;
1226         if (target == current) {
1227                 __ctl_set_bit(2, 4);
1228                 restore_gs_cb(target->thread.gs_cb);
1229         }
1230         preempt_enable();
1231         return rc;
1232 }
1233
1234 static int s390_gs_bc_get(struct task_struct *target,
1235                           const struct user_regset *regset,
1236                           unsigned int pos, unsigned int count,
1237                           void *kbuf, void __user *ubuf)
1238 {
1239         struct gs_cb *data = target->thread.gs_bc_cb;
1240
1241         if (!MACHINE_HAS_GS)
1242                 return -ENODEV;
1243         if (!data)
1244                 return -ENODATA;
1245         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1246                                    data, 0, sizeof(struct gs_cb));
1247 }
1248
1249 static int s390_gs_bc_set(struct task_struct *target,
1250                           const struct user_regset *regset,
1251                           unsigned int pos, unsigned int count,
1252                           const void *kbuf, const void __user *ubuf)
1253 {
1254         struct gs_cb *data = target->thread.gs_bc_cb;
1255
1256         if (!MACHINE_HAS_GS)
1257                 return -ENODEV;
1258         if (!data) {
1259                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1260                 if (!data)
1261                         return -ENOMEM;
1262                 target->thread.gs_bc_cb = data;
1263         }
1264         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1265                                   data, 0, sizeof(struct gs_cb));
1266 }
1267
1268 static const struct user_regset s390_regsets[] = {
1269         {
1270                 .core_note_type = NT_PRSTATUS,
1271                 .n = sizeof(s390_regs) / sizeof(long),
1272                 .size = sizeof(long),
1273                 .align = sizeof(long),
1274                 .get = s390_regs_get,
1275                 .set = s390_regs_set,
1276         },
1277         {
1278                 .core_note_type = NT_PRFPREG,
1279                 .n = sizeof(s390_fp_regs) / sizeof(long),
1280                 .size = sizeof(long),
1281                 .align = sizeof(long),
1282                 .get = s390_fpregs_get,
1283                 .set = s390_fpregs_set,
1284         },
1285         {
1286                 .core_note_type = NT_S390_SYSTEM_CALL,
1287                 .n = 1,
1288                 .size = sizeof(unsigned int),
1289                 .align = sizeof(unsigned int),
1290                 .get = s390_system_call_get,
1291                 .set = s390_system_call_set,
1292         },
1293         {
1294                 .core_note_type = NT_S390_LAST_BREAK,
1295                 .n = 1,
1296                 .size = sizeof(long),
1297                 .align = sizeof(long),
1298                 .get = s390_last_break_get,
1299                 .set = s390_last_break_set,
1300         },
1301         {
1302                 .core_note_type = NT_S390_TDB,
1303                 .n = 1,
1304                 .size = 256,
1305                 .align = 1,
1306                 .get = s390_tdb_get,
1307                 .set = s390_tdb_set,
1308         },
1309         {
1310                 .core_note_type = NT_S390_VXRS_LOW,
1311                 .n = __NUM_VXRS_LOW,
1312                 .size = sizeof(__u64),
1313                 .align = sizeof(__u64),
1314                 .get = s390_vxrs_low_get,
1315                 .set = s390_vxrs_low_set,
1316         },
1317         {
1318                 .core_note_type = NT_S390_VXRS_HIGH,
1319                 .n = __NUM_VXRS_HIGH,
1320                 .size = sizeof(__vector128),
1321                 .align = sizeof(__vector128),
1322                 .get = s390_vxrs_high_get,
1323                 .set = s390_vxrs_high_set,
1324         },
1325         {
1326                 .core_note_type = NT_S390_GS_CB,
1327                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1328                 .size = sizeof(__u64),
1329                 .align = sizeof(__u64),
1330                 .get = s390_gs_cb_get,
1331                 .set = s390_gs_cb_set,
1332         },
1333         {
1334                 .core_note_type = NT_S390_GS_BC,
1335                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1336                 .size = sizeof(__u64),
1337                 .align = sizeof(__u64),
1338                 .get = s390_gs_bc_get,
1339                 .set = s390_gs_bc_set,
1340         },
1341 };
1342
1343 static const struct user_regset_view user_s390_view = {
1344         .name = UTS_MACHINE,
1345         .e_machine = EM_S390,
1346         .regsets = s390_regsets,
1347         .n = ARRAY_SIZE(s390_regsets)
1348 };
1349
1350 #ifdef CONFIG_COMPAT
1351 static int s390_compat_regs_get(struct task_struct *target,
1352                                 const struct user_regset *regset,
1353                                 unsigned int pos, unsigned int count,
1354                                 void *kbuf, void __user *ubuf)
1355 {
1356         if (target == current)
1357                 save_access_regs(target->thread.acrs);
1358
1359         if (kbuf) {
1360                 compat_ulong_t *k = kbuf;
1361                 while (count > 0) {
1362                         *k++ = __peek_user_compat(target, pos);
1363                         count -= sizeof(*k);
1364                         pos += sizeof(*k);
1365                 }
1366         } else {
1367                 compat_ulong_t __user *u = ubuf;
1368                 while (count > 0) {
1369                         if (__put_user(__peek_user_compat(target, pos), u++))
1370                                 return -EFAULT;
1371                         count -= sizeof(*u);
1372                         pos += sizeof(*u);
1373                 }
1374         }
1375         return 0;
1376 }
1377
1378 static int s390_compat_regs_set(struct task_struct *target,
1379                                 const struct user_regset *regset,
1380                                 unsigned int pos, unsigned int count,
1381                                 const void *kbuf, const void __user *ubuf)
1382 {
1383         int rc = 0;
1384
1385         if (target == current)
1386                 save_access_regs(target->thread.acrs);
1387
1388         if (kbuf) {
1389                 const compat_ulong_t *k = kbuf;
1390                 while (count > 0 && !rc) {
1391                         rc = __poke_user_compat(target, pos, *k++);
1392                         count -= sizeof(*k);
1393                         pos += sizeof(*k);
1394                 }
1395         } else {
1396                 const compat_ulong_t  __user *u = ubuf;
1397                 while (count > 0 && !rc) {
1398                         compat_ulong_t word;
1399                         rc = __get_user(word, u++);
1400                         if (rc)
1401                                 break;
1402                         rc = __poke_user_compat(target, pos, word);
1403                         count -= sizeof(*u);
1404                         pos += sizeof(*u);
1405                 }
1406         }
1407
1408         if (rc == 0 && target == current)
1409                 restore_access_regs(target->thread.acrs);
1410
1411         return rc;
1412 }
1413
1414 static int s390_compat_regs_high_get(struct task_struct *target,
1415                                      const struct user_regset *regset,
1416                                      unsigned int pos, unsigned int count,
1417                                      void *kbuf, void __user *ubuf)
1418 {
1419         compat_ulong_t *gprs_high;
1420
1421         gprs_high = (compat_ulong_t *)
1422                 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1423         if (kbuf) {
1424                 compat_ulong_t *k = kbuf;
1425                 while (count > 0) {
1426                         *k++ = *gprs_high;
1427                         gprs_high += 2;
1428                         count -= sizeof(*k);
1429                 }
1430         } else {
1431                 compat_ulong_t __user *u = ubuf;
1432                 while (count > 0) {
1433                         if (__put_user(*gprs_high, u++))
1434                                 return -EFAULT;
1435                         gprs_high += 2;
1436                         count -= sizeof(*u);
1437                 }
1438         }
1439         return 0;
1440 }
1441
1442 static int s390_compat_regs_high_set(struct task_struct *target,
1443                                      const struct user_regset *regset,
1444                                      unsigned int pos, unsigned int count,
1445                                      const void *kbuf, const void __user *ubuf)
1446 {
1447         compat_ulong_t *gprs_high;
1448         int rc = 0;
1449
1450         gprs_high = (compat_ulong_t *)
1451                 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1452         if (kbuf) {
1453                 const compat_ulong_t *k = kbuf;
1454                 while (count > 0) {
1455                         *gprs_high = *k++;
1456                         *gprs_high += 2;
1457                         count -= sizeof(*k);
1458                 }
1459         } else {
1460                 const compat_ulong_t  __user *u = ubuf;
1461                 while (count > 0 && !rc) {
1462                         unsigned long word;
1463                         rc = __get_user(word, u++);
1464                         if (rc)
1465                                 break;
1466                         *gprs_high = word;
1467                         *gprs_high += 2;
1468                         count -= sizeof(*u);
1469                 }
1470         }
1471
1472         return rc;
1473 }
1474
1475 static int s390_compat_last_break_get(struct task_struct *target,
1476                                       const struct user_regset *regset,
1477                                       unsigned int pos, unsigned int count,
1478                                       void *kbuf, void __user *ubuf)
1479 {
1480         compat_ulong_t last_break;
1481
1482         if (count > 0) {
1483                 last_break = target->thread.last_break;
1484                 if (kbuf) {
1485                         unsigned long *k = kbuf;
1486                         *k = last_break;
1487                 } else {
1488                         unsigned long  __user *u = ubuf;
1489                         if (__put_user(last_break, u))
1490                                 return -EFAULT;
1491                 }
1492         }
1493         return 0;
1494 }
1495
1496 static int s390_compat_last_break_set(struct task_struct *target,
1497                                       const struct user_regset *regset,
1498                                       unsigned int pos, unsigned int count,
1499                                       const void *kbuf, const void __user *ubuf)
1500 {
1501         return 0;
1502 }
1503
1504 static const struct user_regset s390_compat_regsets[] = {
1505         {
1506                 .core_note_type = NT_PRSTATUS,
1507                 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1508                 .size = sizeof(compat_long_t),
1509                 .align = sizeof(compat_long_t),
1510                 .get = s390_compat_regs_get,
1511                 .set = s390_compat_regs_set,
1512         },
1513         {
1514                 .core_note_type = NT_PRFPREG,
1515                 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1516                 .size = sizeof(compat_long_t),
1517                 .align = sizeof(compat_long_t),
1518                 .get = s390_fpregs_get,
1519                 .set = s390_fpregs_set,
1520         },
1521         {
1522                 .core_note_type = NT_S390_SYSTEM_CALL,
1523                 .n = 1,
1524                 .size = sizeof(compat_uint_t),
1525                 .align = sizeof(compat_uint_t),
1526                 .get = s390_system_call_get,
1527                 .set = s390_system_call_set,
1528         },
1529         {
1530                 .core_note_type = NT_S390_LAST_BREAK,
1531                 .n = 1,
1532                 .size = sizeof(long),
1533                 .align = sizeof(long),
1534                 .get = s390_compat_last_break_get,
1535                 .set = s390_compat_last_break_set,
1536         },
1537         {
1538                 .core_note_type = NT_S390_TDB,
1539                 .n = 1,
1540                 .size = 256,
1541                 .align = 1,
1542                 .get = s390_tdb_get,
1543                 .set = s390_tdb_set,
1544         },
1545         {
1546                 .core_note_type = NT_S390_VXRS_LOW,
1547                 .n = __NUM_VXRS_LOW,
1548                 .size = sizeof(__u64),
1549                 .align = sizeof(__u64),
1550                 .get = s390_vxrs_low_get,
1551                 .set = s390_vxrs_low_set,
1552         },
1553         {
1554                 .core_note_type = NT_S390_VXRS_HIGH,
1555                 .n = __NUM_VXRS_HIGH,
1556                 .size = sizeof(__vector128),
1557                 .align = sizeof(__vector128),
1558                 .get = s390_vxrs_high_get,
1559                 .set = s390_vxrs_high_set,
1560         },
1561         {
1562                 .core_note_type = NT_S390_HIGH_GPRS,
1563                 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1564                 .size = sizeof(compat_long_t),
1565                 .align = sizeof(compat_long_t),
1566                 .get = s390_compat_regs_high_get,
1567                 .set = s390_compat_regs_high_set,
1568         },
1569         {
1570                 .core_note_type = NT_S390_GS_CB,
1571                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1572                 .size = sizeof(__u64),
1573                 .align = sizeof(__u64),
1574                 .get = s390_gs_cb_get,
1575                 .set = s390_gs_cb_set,
1576         },
1577 };
1578
1579 static const struct user_regset_view user_s390_compat_view = {
1580         .name = "s390",
1581         .e_machine = EM_S390,
1582         .regsets = s390_compat_regsets,
1583         .n = ARRAY_SIZE(s390_compat_regsets)
1584 };
1585 #endif
1586
1587 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1588 {
1589 #ifdef CONFIG_COMPAT
1590         if (test_tsk_thread_flag(task, TIF_31BIT))
1591                 return &user_s390_compat_view;
1592 #endif
1593         return &user_s390_view;
1594 }
1595
1596 static const char *gpr_names[NUM_GPRS] = {
1597         "r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1598         "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1599 };
1600
1601 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1602 {
1603         if (offset >= NUM_GPRS)
1604                 return 0;
1605         return regs->gprs[offset];
1606 }
1607
1608 int regs_query_register_offset(const char *name)
1609 {
1610         unsigned long offset;
1611
1612         if (!name || *name != 'r')
1613                 return -EINVAL;
1614         if (kstrtoul(name + 1, 10, &offset))
1615                 return -EINVAL;
1616         if (offset >= NUM_GPRS)
1617                 return -EINVAL;
1618         return offset;
1619 }
1620
1621 const char *regs_query_register_name(unsigned int offset)
1622 {
1623         if (offset >= NUM_GPRS)
1624                 return NULL;
1625         return gpr_names[offset];
1626 }
1627
1628 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1629 {
1630         unsigned long ksp = kernel_stack_pointer(regs);
1631
1632         return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1633 }
1634
1635 /**
1636  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1637  * @regs:pt_regs which contains kernel stack pointer.
1638  * @n:stack entry number.
1639  *
1640  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1641  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1642  * this returns 0.
1643  */
1644 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1645 {
1646         unsigned long addr;
1647
1648         addr = kernel_stack_pointer(regs) + n * sizeof(long);
1649         if (!regs_within_kernel_stack(regs, addr))
1650                 return 0;
1651         return *(unsigned long *)addr;
1652 }