Merge tag 'selinux-pr-20201012' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / s390 / kernel / ptrace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Ptrace user space interface.
4  *
5  *    Copyright IBM Corp. 1999, 2010
6  *    Author(s): Denis Joseph Barrow
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/page.h>
28 #include <linux/uaccess.h>
29 #include <asm/unistd.h>
30 #include <asm/switch_to.h>
31 #include <asm/runtime_instr.h>
32 #include <asm/facility.h>
33
34 #include "entry.h"
35
36 #ifdef CONFIG_COMPAT
37 #include "compat_ptrace.h"
38 #endif
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/syscalls.h>
42
43 void update_cr_regs(struct task_struct *task)
44 {
45         struct pt_regs *regs = task_pt_regs(task);
46         struct thread_struct *thread = &task->thread;
47         struct per_regs old, new;
48         union ctlreg0 cr0_old, cr0_new;
49         union ctlreg2 cr2_old, cr2_new;
50         int cr0_changed, cr2_changed;
51
52         __ctl_store(cr0_old.val, 0, 0);
53         __ctl_store(cr2_old.val, 2, 2);
54         cr0_new = cr0_old;
55         cr2_new = cr2_old;
56         /* Take care of the enable/disable of transactional execution. */
57         if (MACHINE_HAS_TE) {
58                 /* Set or clear transaction execution TXC bit 8. */
59                 cr0_new.tcx = 1;
60                 if (task->thread.per_flags & PER_FLAG_NO_TE)
61                         cr0_new.tcx = 0;
62                 /* Set or clear transaction execution TDC bits 62 and 63. */
63                 cr2_new.tdc = 0;
64                 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
65                         if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
66                                 cr2_new.tdc = 1;
67                         else
68                                 cr2_new.tdc = 2;
69                 }
70         }
71         /* Take care of enable/disable of guarded storage. */
72         if (MACHINE_HAS_GS) {
73                 cr2_new.gse = 0;
74                 if (task->thread.gs_cb)
75                         cr2_new.gse = 1;
76         }
77         /* Load control register 0/2 iff changed */
78         cr0_changed = cr0_new.val != cr0_old.val;
79         cr2_changed = cr2_new.val != cr2_old.val;
80         if (cr0_changed)
81                 __ctl_load(cr0_new.val, 0, 0);
82         if (cr2_changed)
83                 __ctl_load(cr2_new.val, 2, 2);
84         /* Copy user specified PER registers */
85         new.control = thread->per_user.control;
86         new.start = thread->per_user.start;
87         new.end = thread->per_user.end;
88
89         /* merge TIF_SINGLE_STEP into user specified PER registers. */
90         if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91             test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
92                 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
93                         new.control |= PER_EVENT_BRANCH;
94                 else
95                         new.control |= PER_EVENT_IFETCH;
96                 new.control |= PER_CONTROL_SUSPENSION;
97                 new.control |= PER_EVENT_TRANSACTION_END;
98                 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
99                         new.control |= PER_EVENT_IFETCH;
100                 new.start = 0;
101                 new.end = -1UL;
102         }
103
104         /* Take care of the PER enablement bit in the PSW. */
105         if (!(new.control & PER_EVENT_MASK)) {
106                 regs->psw.mask &= ~PSW_MASK_PER;
107                 return;
108         }
109         regs->psw.mask |= PSW_MASK_PER;
110         __ctl_store(old, 9, 11);
111         if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
112                 __ctl_load(new, 9, 11);
113 }
114
115 void user_enable_single_step(struct task_struct *task)
116 {
117         clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
118         set_tsk_thread_flag(task, TIF_SINGLE_STEP);
119 }
120
121 void user_disable_single_step(struct task_struct *task)
122 {
123         clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
124         clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
125 }
126
127 void user_enable_block_step(struct task_struct *task)
128 {
129         set_tsk_thread_flag(task, TIF_SINGLE_STEP);
130         set_tsk_thread_flag(task, TIF_BLOCK_STEP);
131 }
132
133 /*
134  * Called by kernel/ptrace.c when detaching..
135  *
136  * Clear all debugging related fields.
137  */
138 void ptrace_disable(struct task_struct *task)
139 {
140         memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
141         memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
142         clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
143         clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
144         task->thread.per_flags = 0;
145 }
146
147 #define __ADDR_MASK 7
148
149 static inline unsigned long __peek_user_per(struct task_struct *child,
150                                             addr_t addr)
151 {
152         struct per_struct_kernel *dummy = NULL;
153
154         if (addr == (addr_t) &dummy->cr9)
155                 /* Control bits of the active per set. */
156                 return test_thread_flag(TIF_SINGLE_STEP) ?
157                         PER_EVENT_IFETCH : child->thread.per_user.control;
158         else if (addr == (addr_t) &dummy->cr10)
159                 /* Start address of the active per set. */
160                 return test_thread_flag(TIF_SINGLE_STEP) ?
161                         0 : child->thread.per_user.start;
162         else if (addr == (addr_t) &dummy->cr11)
163                 /* End address of the active per set. */
164                 return test_thread_flag(TIF_SINGLE_STEP) ?
165                         -1UL : child->thread.per_user.end;
166         else if (addr == (addr_t) &dummy->bits)
167                 /* Single-step bit. */
168                 return test_thread_flag(TIF_SINGLE_STEP) ?
169                         (1UL << (BITS_PER_LONG - 1)) : 0;
170         else if (addr == (addr_t) &dummy->starting_addr)
171                 /* Start address of the user specified per set. */
172                 return child->thread.per_user.start;
173         else if (addr == (addr_t) &dummy->ending_addr)
174                 /* End address of the user specified per set. */
175                 return child->thread.per_user.end;
176         else if (addr == (addr_t) &dummy->perc_atmid)
177                 /* PER code, ATMID and AI of the last PER trap */
178                 return (unsigned long)
179                         child->thread.per_event.cause << (BITS_PER_LONG - 16);
180         else if (addr == (addr_t) &dummy->address)
181                 /* Address of the last PER trap */
182                 return child->thread.per_event.address;
183         else if (addr == (addr_t) &dummy->access_id)
184                 /* Access id of the last PER trap */
185                 return (unsigned long)
186                         child->thread.per_event.paid << (BITS_PER_LONG - 8);
187         return 0;
188 }
189
190 /*
191  * Read the word at offset addr from the user area of a process. The
192  * trouble here is that the information is littered over different
193  * locations. The process registers are found on the kernel stack,
194  * the floating point stuff and the trace settings are stored in
195  * the task structure. In addition the different structures in
196  * struct user contain pad bytes that should be read as zeroes.
197  * Lovely...
198  */
199 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
200 {
201         struct user *dummy = NULL;
202         addr_t offset, tmp;
203
204         if (addr < (addr_t) &dummy->regs.acrs) {
205                 /*
206                  * psw and gprs are stored on the stack
207                  */
208                 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
209                 if (addr == (addr_t) &dummy->regs.psw.mask) {
210                         /* Return a clean psw mask. */
211                         tmp &= PSW_MASK_USER | PSW_MASK_RI;
212                         tmp |= PSW_USER_BITS;
213                 }
214
215         } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
216                 /*
217                  * access registers are stored in the thread structure
218                  */
219                 offset = addr - (addr_t) &dummy->regs.acrs;
220                 /*
221                  * Very special case: old & broken 64 bit gdb reading
222                  * from acrs[15]. Result is a 64 bit value. Read the
223                  * 32 bit acrs[15] value and shift it by 32. Sick...
224                  */
225                 if (addr == (addr_t) &dummy->regs.acrs[15])
226                         tmp = ((unsigned long) child->thread.acrs[15]) << 32;
227                 else
228                         tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
229
230         } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
231                 /*
232                  * orig_gpr2 is stored on the kernel stack
233                  */
234                 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
235
236         } else if (addr < (addr_t) &dummy->regs.fp_regs) {
237                 /*
238                  * prevent reads of padding hole between
239                  * orig_gpr2 and fp_regs on s390.
240                  */
241                 tmp = 0;
242
243         } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
244                 /*
245                  * floating point control reg. is in the thread structure
246                  */
247                 tmp = child->thread.fpu.fpc;
248                 tmp <<= BITS_PER_LONG - 32;
249
250         } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
251                 /*
252                  * floating point regs. are either in child->thread.fpu
253                  * or the child->thread.fpu.vxrs array
254                  */
255                 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
256                 if (MACHINE_HAS_VX)
257                         tmp = *(addr_t *)
258                                ((addr_t) child->thread.fpu.vxrs + 2*offset);
259                 else
260                         tmp = *(addr_t *)
261                                ((addr_t) child->thread.fpu.fprs + offset);
262
263         } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
264                 /*
265                  * Handle access to the per_info structure.
266                  */
267                 addr -= (addr_t) &dummy->regs.per_info;
268                 tmp = __peek_user_per(child, addr);
269
270         } else
271                 tmp = 0;
272
273         return tmp;
274 }
275
276 static int
277 peek_user(struct task_struct *child, addr_t addr, addr_t data)
278 {
279         addr_t tmp, mask;
280
281         /*
282          * Stupid gdb peeks/pokes the access registers in 64 bit with
283          * an alignment of 4. Programmers from hell...
284          */
285         mask = __ADDR_MASK;
286         if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
287             addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
288                 mask = 3;
289         if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
290                 return -EIO;
291
292         tmp = __peek_user(child, addr);
293         return put_user(tmp, (addr_t __user *) data);
294 }
295
296 static inline void __poke_user_per(struct task_struct *child,
297                                    addr_t addr, addr_t data)
298 {
299         struct per_struct_kernel *dummy = NULL;
300
301         /*
302          * There are only three fields in the per_info struct that the
303          * debugger user can write to.
304          * 1) cr9: the debugger wants to set a new PER event mask
305          * 2) starting_addr: the debugger wants to set a new starting
306          *    address to use with the PER event mask.
307          * 3) ending_addr: the debugger wants to set a new ending
308          *    address to use with the PER event mask.
309          * The user specified PER event mask and the start and end
310          * addresses are used only if single stepping is not in effect.
311          * Writes to any other field in per_info are ignored.
312          */
313         if (addr == (addr_t) &dummy->cr9)
314                 /* PER event mask of the user specified per set. */
315                 child->thread.per_user.control =
316                         data & (PER_EVENT_MASK | PER_CONTROL_MASK);
317         else if (addr == (addr_t) &dummy->starting_addr)
318                 /* Starting address of the user specified per set. */
319                 child->thread.per_user.start = data;
320         else if (addr == (addr_t) &dummy->ending_addr)
321                 /* Ending address of the user specified per set. */
322                 child->thread.per_user.end = data;
323 }
324
325 static void fixup_int_code(struct task_struct *child, addr_t data)
326 {
327         struct pt_regs *regs = task_pt_regs(child);
328         int ilc = regs->int_code >> 16;
329         u16 insn;
330
331         if (ilc > 6)
332                 return;
333
334         if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
335                         &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
336                 return;
337
338         /* double check that tracee stopped on svc instruction */
339         if ((insn >> 8) != 0xa)
340                 return;
341
342         regs->int_code = 0x20000 | (data & 0xffff);
343 }
344 /*
345  * Write a word to the user area of a process at location addr. This
346  * operation does have an additional problem compared to peek_user.
347  * Stores to the program status word and on the floating point
348  * control register needs to get checked for validity.
349  */
350 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
351 {
352         struct user *dummy = NULL;
353         addr_t offset;
354
355
356         if (addr < (addr_t) &dummy->regs.acrs) {
357                 struct pt_regs *regs = task_pt_regs(child);
358                 /*
359                  * psw and gprs are stored on the stack
360                  */
361                 if (addr == (addr_t) &dummy->regs.psw.mask) {
362                         unsigned long mask = PSW_MASK_USER;
363
364                         mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
365                         if ((data ^ PSW_USER_BITS) & ~mask)
366                                 /* Invalid psw mask. */
367                                 return -EINVAL;
368                         if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
369                                 /* Invalid address-space-control bits */
370                                 return -EINVAL;
371                         if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
372                                 /* Invalid addressing mode bits */
373                                 return -EINVAL;
374                 }
375
376                 if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
377                         addr == offsetof(struct user, regs.gprs[2]))
378                         fixup_int_code(child, data);
379                 *(addr_t *)((addr_t) &regs->psw + addr) = data;
380
381         } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
382                 /*
383                  * access registers are stored in the thread structure
384                  */
385                 offset = addr - (addr_t) &dummy->regs.acrs;
386                 /*
387                  * Very special case: old & broken 64 bit gdb writing
388                  * to acrs[15] with a 64 bit value. Ignore the lower
389                  * half of the value and write the upper 32 bit to
390                  * acrs[15]. Sick...
391                  */
392                 if (addr == (addr_t) &dummy->regs.acrs[15])
393                         child->thread.acrs[15] = (unsigned int) (data >> 32);
394                 else
395                         *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
396
397         } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
398                 /*
399                  * orig_gpr2 is stored on the kernel stack
400                  */
401                 task_pt_regs(child)->orig_gpr2 = data;
402
403         } else if (addr < (addr_t) &dummy->regs.fp_regs) {
404                 /*
405                  * prevent writes of padding hole between
406                  * orig_gpr2 and fp_regs on s390.
407                  */
408                 return 0;
409
410         } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
411                 /*
412                  * floating point control reg. is in the thread structure
413                  */
414                 if ((unsigned int) data != 0 ||
415                     test_fp_ctl(data >> (BITS_PER_LONG - 32)))
416                         return -EINVAL;
417                 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
418
419         } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
420                 /*
421                  * floating point regs. are either in child->thread.fpu
422                  * or the child->thread.fpu.vxrs array
423                  */
424                 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
425                 if (MACHINE_HAS_VX)
426                         *(addr_t *)((addr_t)
427                                 child->thread.fpu.vxrs + 2*offset) = data;
428                 else
429                         *(addr_t *)((addr_t)
430                                 child->thread.fpu.fprs + offset) = data;
431
432         } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
433                 /*
434                  * Handle access to the per_info structure.
435                  */
436                 addr -= (addr_t) &dummy->regs.per_info;
437                 __poke_user_per(child, addr, data);
438
439         }
440
441         return 0;
442 }
443
444 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
445 {
446         addr_t mask;
447
448         /*
449          * Stupid gdb peeks/pokes the access registers in 64 bit with
450          * an alignment of 4. Programmers from hell indeed...
451          */
452         mask = __ADDR_MASK;
453         if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
454             addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
455                 mask = 3;
456         if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
457                 return -EIO;
458
459         return __poke_user(child, addr, data);
460 }
461
462 long arch_ptrace(struct task_struct *child, long request,
463                  unsigned long addr, unsigned long data)
464 {
465         ptrace_area parea; 
466         int copied, ret;
467
468         switch (request) {
469         case PTRACE_PEEKUSR:
470                 /* read the word at location addr in the USER area. */
471                 return peek_user(child, addr, data);
472
473         case PTRACE_POKEUSR:
474                 /* write the word at location addr in the USER area */
475                 return poke_user(child, addr, data);
476
477         case PTRACE_PEEKUSR_AREA:
478         case PTRACE_POKEUSR_AREA:
479                 if (copy_from_user(&parea, (void __force __user *) addr,
480                                                         sizeof(parea)))
481                         return -EFAULT;
482                 addr = parea.kernel_addr;
483                 data = parea.process_addr;
484                 copied = 0;
485                 while (copied < parea.len) {
486                         if (request == PTRACE_PEEKUSR_AREA)
487                                 ret = peek_user(child, addr, data);
488                         else {
489                                 addr_t utmp;
490                                 if (get_user(utmp,
491                                              (addr_t __force __user *) data))
492                                         return -EFAULT;
493                                 ret = poke_user(child, addr, utmp);
494                         }
495                         if (ret)
496                                 return ret;
497                         addr += sizeof(unsigned long);
498                         data += sizeof(unsigned long);
499                         copied += sizeof(unsigned long);
500                 }
501                 return 0;
502         case PTRACE_GET_LAST_BREAK:
503                 put_user(child->thread.last_break,
504                          (unsigned long __user *) data);
505                 return 0;
506         case PTRACE_ENABLE_TE:
507                 if (!MACHINE_HAS_TE)
508                         return -EIO;
509                 child->thread.per_flags &= ~PER_FLAG_NO_TE;
510                 return 0;
511         case PTRACE_DISABLE_TE:
512                 if (!MACHINE_HAS_TE)
513                         return -EIO;
514                 child->thread.per_flags |= PER_FLAG_NO_TE;
515                 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
516                 return 0;
517         case PTRACE_TE_ABORT_RAND:
518                 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
519                         return -EIO;
520                 switch (data) {
521                 case 0UL:
522                         child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
523                         break;
524                 case 1UL:
525                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
526                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
527                         break;
528                 case 2UL:
529                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
530                         child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
531                         break;
532                 default:
533                         return -EINVAL;
534                 }
535                 return 0;
536         default:
537                 return ptrace_request(child, request, addr, data);
538         }
539 }
540
541 #ifdef CONFIG_COMPAT
542 /*
543  * Now the fun part starts... a 31 bit program running in the
544  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
545  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
546  * to handle, the difference to the 64 bit versions of the requests
547  * is that the access is done in multiples of 4 byte instead of
548  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
549  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
550  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
551  * is a 31 bit program too, the content of struct user can be
552  * emulated. A 31 bit program peeking into the struct user of
553  * a 64 bit program is a no-no.
554  */
555
556 /*
557  * Same as peek_user_per but for a 31 bit program.
558  */
559 static inline __u32 __peek_user_per_compat(struct task_struct *child,
560                                            addr_t addr)
561 {
562         struct compat_per_struct_kernel *dummy32 = NULL;
563
564         if (addr == (addr_t) &dummy32->cr9)
565                 /* Control bits of the active per set. */
566                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
567                         PER_EVENT_IFETCH : child->thread.per_user.control;
568         else if (addr == (addr_t) &dummy32->cr10)
569                 /* Start address of the active per set. */
570                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
571                         0 : child->thread.per_user.start;
572         else if (addr == (addr_t) &dummy32->cr11)
573                 /* End address of the active per set. */
574                 return test_thread_flag(TIF_SINGLE_STEP) ?
575                         PSW32_ADDR_INSN : child->thread.per_user.end;
576         else if (addr == (addr_t) &dummy32->bits)
577                 /* Single-step bit. */
578                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
579                         0x80000000 : 0;
580         else if (addr == (addr_t) &dummy32->starting_addr)
581                 /* Start address of the user specified per set. */
582                 return (__u32) child->thread.per_user.start;
583         else if (addr == (addr_t) &dummy32->ending_addr)
584                 /* End address of the user specified per set. */
585                 return (__u32) child->thread.per_user.end;
586         else if (addr == (addr_t) &dummy32->perc_atmid)
587                 /* PER code, ATMID and AI of the last PER trap */
588                 return (__u32) child->thread.per_event.cause << 16;
589         else if (addr == (addr_t) &dummy32->address)
590                 /* Address of the last PER trap */
591                 return (__u32) child->thread.per_event.address;
592         else if (addr == (addr_t) &dummy32->access_id)
593                 /* Access id of the last PER trap */
594                 return (__u32) child->thread.per_event.paid << 24;
595         return 0;
596 }
597
598 /*
599  * Same as peek_user but for a 31 bit program.
600  */
601 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
602 {
603         struct compat_user *dummy32 = NULL;
604         addr_t offset;
605         __u32 tmp;
606
607         if (addr < (addr_t) &dummy32->regs.acrs) {
608                 struct pt_regs *regs = task_pt_regs(child);
609                 /*
610                  * psw and gprs are stored on the stack
611                  */
612                 if (addr == (addr_t) &dummy32->regs.psw.mask) {
613                         /* Fake a 31 bit psw mask. */
614                         tmp = (__u32)(regs->psw.mask >> 32);
615                         tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
616                         tmp |= PSW32_USER_BITS;
617                 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
618                         /* Fake a 31 bit psw address. */
619                         tmp = (__u32) regs->psw.addr |
620                                 (__u32)(regs->psw.mask & PSW_MASK_BA);
621                 } else {
622                         /* gpr 0-15 */
623                         tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
624                 }
625         } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
626                 /*
627                  * access registers are stored in the thread structure
628                  */
629                 offset = addr - (addr_t) &dummy32->regs.acrs;
630                 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
631
632         } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
633                 /*
634                  * orig_gpr2 is stored on the kernel stack
635                  */
636                 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
637
638         } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
639                 /*
640                  * prevent reads of padding hole between
641                  * orig_gpr2 and fp_regs on s390.
642                  */
643                 tmp = 0;
644
645         } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
646                 /*
647                  * floating point control reg. is in the thread structure
648                  */
649                 tmp = child->thread.fpu.fpc;
650
651         } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
652                 /*
653                  * floating point regs. are either in child->thread.fpu
654                  * or the child->thread.fpu.vxrs array
655                  */
656                 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
657                 if (MACHINE_HAS_VX)
658                         tmp = *(__u32 *)
659                                ((addr_t) child->thread.fpu.vxrs + 2*offset);
660                 else
661                         tmp = *(__u32 *)
662                                ((addr_t) child->thread.fpu.fprs + offset);
663
664         } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
665                 /*
666                  * Handle access to the per_info structure.
667                  */
668                 addr -= (addr_t) &dummy32->regs.per_info;
669                 tmp = __peek_user_per_compat(child, addr);
670
671         } else
672                 tmp = 0;
673
674         return tmp;
675 }
676
677 static int peek_user_compat(struct task_struct *child,
678                             addr_t addr, addr_t data)
679 {
680         __u32 tmp;
681
682         if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
683                 return -EIO;
684
685         tmp = __peek_user_compat(child, addr);
686         return put_user(tmp, (__u32 __user *) data);
687 }
688
689 /*
690  * Same as poke_user_per but for a 31 bit program.
691  */
692 static inline void __poke_user_per_compat(struct task_struct *child,
693                                           addr_t addr, __u32 data)
694 {
695         struct compat_per_struct_kernel *dummy32 = NULL;
696
697         if (addr == (addr_t) &dummy32->cr9)
698                 /* PER event mask of the user specified per set. */
699                 child->thread.per_user.control =
700                         data & (PER_EVENT_MASK | PER_CONTROL_MASK);
701         else if (addr == (addr_t) &dummy32->starting_addr)
702                 /* Starting address of the user specified per set. */
703                 child->thread.per_user.start = data;
704         else if (addr == (addr_t) &dummy32->ending_addr)
705                 /* Ending address of the user specified per set. */
706                 child->thread.per_user.end = data;
707 }
708
709 /*
710  * Same as poke_user but for a 31 bit program.
711  */
712 static int __poke_user_compat(struct task_struct *child,
713                               addr_t addr, addr_t data)
714 {
715         struct compat_user *dummy32 = NULL;
716         __u32 tmp = (__u32) data;
717         addr_t offset;
718
719         if (addr < (addr_t) &dummy32->regs.acrs) {
720                 struct pt_regs *regs = task_pt_regs(child);
721                 /*
722                  * psw, gprs, acrs and orig_gpr2 are stored on the stack
723                  */
724                 if (addr == (addr_t) &dummy32->regs.psw.mask) {
725                         __u32 mask = PSW32_MASK_USER;
726
727                         mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
728                         /* Build a 64 bit psw mask from 31 bit mask. */
729                         if ((tmp ^ PSW32_USER_BITS) & ~mask)
730                                 /* Invalid psw mask. */
731                                 return -EINVAL;
732                         if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
733                                 /* Invalid address-space-control bits */
734                                 return -EINVAL;
735                         regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
736                                 (regs->psw.mask & PSW_MASK_BA) |
737                                 (__u64)(tmp & mask) << 32;
738                 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
739                         /* Build a 64 bit psw address from 31 bit address. */
740                         regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
741                         /* Transfer 31 bit amode bit to psw mask. */
742                         regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
743                                 (__u64)(tmp & PSW32_ADDR_AMODE);
744                 } else {
745
746                         if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
747                                 addr == offsetof(struct compat_user, regs.gprs[2]))
748                                 fixup_int_code(child, data);
749                         /* gpr 0-15 */
750                         *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
751                 }
752         } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
753                 /*
754                  * access registers are stored in the thread structure
755                  */
756                 offset = addr - (addr_t) &dummy32->regs.acrs;
757                 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
758
759         } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
760                 /*
761                  * orig_gpr2 is stored on the kernel stack
762                  */
763                 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
764
765         } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
766                 /*
767                  * prevent writess of padding hole between
768                  * orig_gpr2 and fp_regs on s390.
769                  */
770                 return 0;
771
772         } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
773                 /*
774                  * floating point control reg. is in the thread structure
775                  */
776                 if (test_fp_ctl(tmp))
777                         return -EINVAL;
778                 child->thread.fpu.fpc = data;
779
780         } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
781                 /*
782                  * floating point regs. are either in child->thread.fpu
783                  * or the child->thread.fpu.vxrs array
784                  */
785                 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
786                 if (MACHINE_HAS_VX)
787                         *(__u32 *)((addr_t)
788                                 child->thread.fpu.vxrs + 2*offset) = tmp;
789                 else
790                         *(__u32 *)((addr_t)
791                                 child->thread.fpu.fprs + offset) = tmp;
792
793         } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
794                 /*
795                  * Handle access to the per_info structure.
796                  */
797                 addr -= (addr_t) &dummy32->regs.per_info;
798                 __poke_user_per_compat(child, addr, data);
799         }
800
801         return 0;
802 }
803
804 static int poke_user_compat(struct task_struct *child,
805                             addr_t addr, addr_t data)
806 {
807         if (!is_compat_task() || (addr & 3) ||
808             addr > sizeof(struct compat_user) - 3)
809                 return -EIO;
810
811         return __poke_user_compat(child, addr, data);
812 }
813
814 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
815                         compat_ulong_t caddr, compat_ulong_t cdata)
816 {
817         unsigned long addr = caddr;
818         unsigned long data = cdata;
819         compat_ptrace_area parea;
820         int copied, ret;
821
822         switch (request) {
823         case PTRACE_PEEKUSR:
824                 /* read the word at location addr in the USER area. */
825                 return peek_user_compat(child, addr, data);
826
827         case PTRACE_POKEUSR:
828                 /* write the word at location addr in the USER area */
829                 return poke_user_compat(child, addr, data);
830
831         case PTRACE_PEEKUSR_AREA:
832         case PTRACE_POKEUSR_AREA:
833                 if (copy_from_user(&parea, (void __force __user *) addr,
834                                                         sizeof(parea)))
835                         return -EFAULT;
836                 addr = parea.kernel_addr;
837                 data = parea.process_addr;
838                 copied = 0;
839                 while (copied < parea.len) {
840                         if (request == PTRACE_PEEKUSR_AREA)
841                                 ret = peek_user_compat(child, addr, data);
842                         else {
843                                 __u32 utmp;
844                                 if (get_user(utmp,
845                                              (__u32 __force __user *) data))
846                                         return -EFAULT;
847                                 ret = poke_user_compat(child, addr, utmp);
848                         }
849                         if (ret)
850                                 return ret;
851                         addr += sizeof(unsigned int);
852                         data += sizeof(unsigned int);
853                         copied += sizeof(unsigned int);
854                 }
855                 return 0;
856         case PTRACE_GET_LAST_BREAK:
857                 put_user(child->thread.last_break,
858                          (unsigned int __user *) data);
859                 return 0;
860         }
861         return compat_ptrace_request(child, request, addr, data);
862 }
863 #endif
864
865 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
866 {
867         unsigned long mask = -1UL;
868         long ret = -1;
869
870         if (is_compat_task())
871                 mask = 0xffffffff;
872
873         /*
874          * The sysc_tracesys code in entry.S stored the system
875          * call number to gprs[2].
876          */
877         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
878             tracehook_report_syscall_entry(regs)) {
879                 /*
880                  * Tracing decided this syscall should not happen. Skip
881                  * the system call and the system call restart handling.
882                  */
883                 goto skip;
884         }
885
886 #ifdef CONFIG_SECCOMP
887         /* Do the secure computing check after ptrace. */
888         if (unlikely(test_thread_flag(TIF_SECCOMP))) {
889                 struct seccomp_data sd;
890
891                 if (is_compat_task()) {
892                         sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
893                         sd.arch = AUDIT_ARCH_S390;
894                 } else {
895                         sd.instruction_pointer = regs->psw.addr;
896                         sd.arch = AUDIT_ARCH_S390X;
897                 }
898
899                 sd.nr = regs->int_code & 0xffff;
900                 sd.args[0] = regs->orig_gpr2 & mask;
901                 sd.args[1] = regs->gprs[3] & mask;
902                 sd.args[2] = regs->gprs[4] & mask;
903                 sd.args[3] = regs->gprs[5] & mask;
904                 sd.args[4] = regs->gprs[6] & mask;
905                 sd.args[5] = regs->gprs[7] & mask;
906
907                 if (__secure_computing(&sd) == -1)
908                         goto skip;
909         }
910 #endif /* CONFIG_SECCOMP */
911
912         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
913                 trace_sys_enter(regs, regs->int_code & 0xffff);
914
915
916         audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
917                             regs->gprs[3] &mask, regs->gprs[4] &mask,
918                             regs->gprs[5] &mask);
919
920         if ((signed long)regs->gprs[2] >= NR_syscalls) {
921                 regs->gprs[2] = -ENOSYS;
922                 ret = -ENOSYS;
923         }
924         return regs->gprs[2];
925 skip:
926         clear_pt_regs_flag(regs, PIF_SYSCALL);
927         return ret;
928 }
929
930 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
931 {
932         audit_syscall_exit(regs);
933
934         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
935                 trace_sys_exit(regs, regs->gprs[2]);
936
937         if (test_thread_flag(TIF_SYSCALL_TRACE))
938                 tracehook_report_syscall_exit(regs, 0);
939 }
940
941 /*
942  * user_regset definitions.
943  */
944
945 static int s390_regs_get(struct task_struct *target,
946                          const struct user_regset *regset,
947                          struct membuf to)
948 {
949         unsigned pos;
950         if (target == current)
951                 save_access_regs(target->thread.acrs);
952
953         for (pos = 0; pos < sizeof(s390_regs); pos += sizeof(long))
954                 membuf_store(&to, __peek_user(target, pos));
955         return 0;
956 }
957
958 static int s390_regs_set(struct task_struct *target,
959                          const struct user_regset *regset,
960                          unsigned int pos, unsigned int count,
961                          const void *kbuf, const void __user *ubuf)
962 {
963         int rc = 0;
964
965         if (target == current)
966                 save_access_regs(target->thread.acrs);
967
968         if (kbuf) {
969                 const unsigned long *k = kbuf;
970                 while (count > 0 && !rc) {
971                         rc = __poke_user(target, pos, *k++);
972                         count -= sizeof(*k);
973                         pos += sizeof(*k);
974                 }
975         } else {
976                 const unsigned long  __user *u = ubuf;
977                 while (count > 0 && !rc) {
978                         unsigned long word;
979                         rc = __get_user(word, u++);
980                         if (rc)
981                                 break;
982                         rc = __poke_user(target, pos, word);
983                         count -= sizeof(*u);
984                         pos += sizeof(*u);
985                 }
986         }
987
988         if (rc == 0 && target == current)
989                 restore_access_regs(target->thread.acrs);
990
991         return rc;
992 }
993
994 static int s390_fpregs_get(struct task_struct *target,
995                            const struct user_regset *regset,
996                            struct membuf to)
997 {
998         _s390_fp_regs fp_regs;
999
1000         if (target == current)
1001                 save_fpu_regs();
1002
1003         fp_regs.fpc = target->thread.fpu.fpc;
1004         fpregs_store(&fp_regs, &target->thread.fpu);
1005
1006         return membuf_write(&to, &fp_regs, sizeof(fp_regs));
1007 }
1008
1009 static int s390_fpregs_set(struct task_struct *target,
1010                            const struct user_regset *regset, unsigned int pos,
1011                            unsigned int count, const void *kbuf,
1012                            const void __user *ubuf)
1013 {
1014         int rc = 0;
1015         freg_t fprs[__NUM_FPRS];
1016
1017         if (target == current)
1018                 save_fpu_regs();
1019
1020         if (MACHINE_HAS_VX)
1021                 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
1022         else
1023                 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
1024
1025         /* If setting FPC, must validate it first. */
1026         if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
1027                 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
1028                 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
1029                                         0, offsetof(s390_fp_regs, fprs));
1030                 if (rc)
1031                         return rc;
1032                 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
1033                         return -EINVAL;
1034                 target->thread.fpu.fpc = ufpc[0];
1035         }
1036
1037         if (rc == 0 && count > 0)
1038                 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1039                                         fprs, offsetof(s390_fp_regs, fprs), -1);
1040         if (rc)
1041                 return rc;
1042
1043         if (MACHINE_HAS_VX)
1044                 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1045         else
1046                 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1047
1048         return rc;
1049 }
1050
1051 static int s390_last_break_get(struct task_struct *target,
1052                                const struct user_regset *regset,
1053                                struct membuf to)
1054 {
1055         return membuf_store(&to, target->thread.last_break);
1056 }
1057
1058 static int s390_last_break_set(struct task_struct *target,
1059                                const struct user_regset *regset,
1060                                unsigned int pos, unsigned int count,
1061                                const void *kbuf, const void __user *ubuf)
1062 {
1063         return 0;
1064 }
1065
1066 static int s390_tdb_get(struct task_struct *target,
1067                         const struct user_regset *regset,
1068                         struct membuf to)
1069 {
1070         struct pt_regs *regs = task_pt_regs(target);
1071
1072         if (!(regs->int_code & 0x200))
1073                 return -ENODATA;
1074         return membuf_write(&to, target->thread.trap_tdb, 256);
1075 }
1076
1077 static int s390_tdb_set(struct task_struct *target,
1078                         const struct user_regset *regset,
1079                         unsigned int pos, unsigned int count,
1080                         const void *kbuf, const void __user *ubuf)
1081 {
1082         return 0;
1083 }
1084
1085 static int s390_vxrs_low_get(struct task_struct *target,
1086                              const struct user_regset *regset,
1087                              struct membuf to)
1088 {
1089         __u64 vxrs[__NUM_VXRS_LOW];
1090         int i;
1091
1092         if (!MACHINE_HAS_VX)
1093                 return -ENODEV;
1094         if (target == current)
1095                 save_fpu_regs();
1096         for (i = 0; i < __NUM_VXRS_LOW; i++)
1097                 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1098         return membuf_write(&to, vxrs, sizeof(vxrs));
1099 }
1100
1101 static int s390_vxrs_low_set(struct task_struct *target,
1102                              const struct user_regset *regset,
1103                              unsigned int pos, unsigned int count,
1104                              const void *kbuf, const void __user *ubuf)
1105 {
1106         __u64 vxrs[__NUM_VXRS_LOW];
1107         int i, rc;
1108
1109         if (!MACHINE_HAS_VX)
1110                 return -ENODEV;
1111         if (target == current)
1112                 save_fpu_regs();
1113
1114         for (i = 0; i < __NUM_VXRS_LOW; i++)
1115                 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1116
1117         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1118         if (rc == 0)
1119                 for (i = 0; i < __NUM_VXRS_LOW; i++)
1120                         *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1121
1122         return rc;
1123 }
1124
1125 static int s390_vxrs_high_get(struct task_struct *target,
1126                               const struct user_regset *regset,
1127                               struct membuf to)
1128 {
1129         if (!MACHINE_HAS_VX)
1130                 return -ENODEV;
1131         if (target == current)
1132                 save_fpu_regs();
1133         return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
1134                             __NUM_VXRS_HIGH * sizeof(__vector128));
1135 }
1136
1137 static int s390_vxrs_high_set(struct task_struct *target,
1138                               const struct user_regset *regset,
1139                               unsigned int pos, unsigned int count,
1140                               const void *kbuf, const void __user *ubuf)
1141 {
1142         int rc;
1143
1144         if (!MACHINE_HAS_VX)
1145                 return -ENODEV;
1146         if (target == current)
1147                 save_fpu_regs();
1148
1149         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1150                                 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1151         return rc;
1152 }
1153
1154 static int s390_system_call_get(struct task_struct *target,
1155                                 const struct user_regset *regset,
1156                                 struct membuf to)
1157 {
1158         return membuf_store(&to, target->thread.system_call);
1159 }
1160
1161 static int s390_system_call_set(struct task_struct *target,
1162                                 const struct user_regset *regset,
1163                                 unsigned int pos, unsigned int count,
1164                                 const void *kbuf, const void __user *ubuf)
1165 {
1166         unsigned int *data = &target->thread.system_call;
1167         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1168                                   data, 0, sizeof(unsigned int));
1169 }
1170
1171 static int s390_gs_cb_get(struct task_struct *target,
1172                           const struct user_regset *regset,
1173                           struct membuf to)
1174 {
1175         struct gs_cb *data = target->thread.gs_cb;
1176
1177         if (!MACHINE_HAS_GS)
1178                 return -ENODEV;
1179         if (!data)
1180                 return -ENODATA;
1181         if (target == current)
1182                 save_gs_cb(data);
1183         return membuf_write(&to, data, sizeof(struct gs_cb));
1184 }
1185
1186 static int s390_gs_cb_set(struct task_struct *target,
1187                           const struct user_regset *regset,
1188                           unsigned int pos, unsigned int count,
1189                           const void *kbuf, const void __user *ubuf)
1190 {
1191         struct gs_cb gs_cb = { }, *data = NULL;
1192         int rc;
1193
1194         if (!MACHINE_HAS_GS)
1195                 return -ENODEV;
1196         if (!target->thread.gs_cb) {
1197                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1198                 if (!data)
1199                         return -ENOMEM;
1200         }
1201         if (!target->thread.gs_cb)
1202                 gs_cb.gsd = 25;
1203         else if (target == current)
1204                 save_gs_cb(&gs_cb);
1205         else
1206                 gs_cb = *target->thread.gs_cb;
1207         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1208                                 &gs_cb, 0, sizeof(gs_cb));
1209         if (rc) {
1210                 kfree(data);
1211                 return -EFAULT;
1212         }
1213         preempt_disable();
1214         if (!target->thread.gs_cb)
1215                 target->thread.gs_cb = data;
1216         *target->thread.gs_cb = gs_cb;
1217         if (target == current) {
1218                 __ctl_set_bit(2, 4);
1219                 restore_gs_cb(target->thread.gs_cb);
1220         }
1221         preempt_enable();
1222         return rc;
1223 }
1224
1225 static int s390_gs_bc_get(struct task_struct *target,
1226                           const struct user_regset *regset,
1227                           struct membuf to)
1228 {
1229         struct gs_cb *data = target->thread.gs_bc_cb;
1230
1231         if (!MACHINE_HAS_GS)
1232                 return -ENODEV;
1233         if (!data)
1234                 return -ENODATA;
1235         return membuf_write(&to, data, sizeof(struct gs_cb));
1236 }
1237
1238 static int s390_gs_bc_set(struct task_struct *target,
1239                           const struct user_regset *regset,
1240                           unsigned int pos, unsigned int count,
1241                           const void *kbuf, const void __user *ubuf)
1242 {
1243         struct gs_cb *data = target->thread.gs_bc_cb;
1244
1245         if (!MACHINE_HAS_GS)
1246                 return -ENODEV;
1247         if (!data) {
1248                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1249                 if (!data)
1250                         return -ENOMEM;
1251                 target->thread.gs_bc_cb = data;
1252         }
1253         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1254                                   data, 0, sizeof(struct gs_cb));
1255 }
1256
1257 static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1258 {
1259         return (cb->rca & 0x1f) == 0 &&
1260                 (cb->roa & 0xfff) == 0 &&
1261                 (cb->rla & 0xfff) == 0xfff &&
1262                 cb->s == 1 &&
1263                 cb->k == 1 &&
1264                 cb->h == 0 &&
1265                 cb->reserved1 == 0 &&
1266                 cb->ps == 1 &&
1267                 cb->qs == 0 &&
1268                 cb->pc == 1 &&
1269                 cb->qc == 0 &&
1270                 cb->reserved2 == 0 &&
1271                 cb->reserved3 == 0 &&
1272                 cb->reserved4 == 0 &&
1273                 cb->reserved5 == 0 &&
1274                 cb->reserved6 == 0 &&
1275                 cb->reserved7 == 0 &&
1276                 cb->reserved8 == 0 &&
1277                 cb->rla >= cb->roa &&
1278                 cb->rca >= cb->roa &&
1279                 cb->rca <= cb->rla+1 &&
1280                 cb->m < 3;
1281 }
1282
1283 static int s390_runtime_instr_get(struct task_struct *target,
1284                                 const struct user_regset *regset,
1285                                 struct membuf to)
1286 {
1287         struct runtime_instr_cb *data = target->thread.ri_cb;
1288
1289         if (!test_facility(64))
1290                 return -ENODEV;
1291         if (!data)
1292                 return -ENODATA;
1293
1294         return membuf_write(&to, data, sizeof(struct runtime_instr_cb));
1295 }
1296
1297 static int s390_runtime_instr_set(struct task_struct *target,
1298                                   const struct user_regset *regset,
1299                                   unsigned int pos, unsigned int count,
1300                                   const void *kbuf, const void __user *ubuf)
1301 {
1302         struct runtime_instr_cb ri_cb = { }, *data = NULL;
1303         int rc;
1304
1305         if (!test_facility(64))
1306                 return -ENODEV;
1307
1308         if (!target->thread.ri_cb) {
1309                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1310                 if (!data)
1311                         return -ENOMEM;
1312         }
1313
1314         if (target->thread.ri_cb) {
1315                 if (target == current)
1316                         store_runtime_instr_cb(&ri_cb);
1317                 else
1318                         ri_cb = *target->thread.ri_cb;
1319         }
1320
1321         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1322                                 &ri_cb, 0, sizeof(struct runtime_instr_cb));
1323         if (rc) {
1324                 kfree(data);
1325                 return -EFAULT;
1326         }
1327
1328         if (!is_ri_cb_valid(&ri_cb)) {
1329                 kfree(data);
1330                 return -EINVAL;
1331         }
1332         /*
1333          * Override access key in any case, since user space should
1334          * not be able to set it, nor should it care about it.
1335          */
1336         ri_cb.key = PAGE_DEFAULT_KEY >> 4;
1337         preempt_disable();
1338         if (!target->thread.ri_cb)
1339                 target->thread.ri_cb = data;
1340         *target->thread.ri_cb = ri_cb;
1341         if (target == current)
1342                 load_runtime_instr_cb(target->thread.ri_cb);
1343         preempt_enable();
1344
1345         return 0;
1346 }
1347
1348 static const struct user_regset s390_regsets[] = {
1349         {
1350                 .core_note_type = NT_PRSTATUS,
1351                 .n = sizeof(s390_regs) / sizeof(long),
1352                 .size = sizeof(long),
1353                 .align = sizeof(long),
1354                 .regset_get = s390_regs_get,
1355                 .set = s390_regs_set,
1356         },
1357         {
1358                 .core_note_type = NT_PRFPREG,
1359                 .n = sizeof(s390_fp_regs) / sizeof(long),
1360                 .size = sizeof(long),
1361                 .align = sizeof(long),
1362                 .regset_get = s390_fpregs_get,
1363                 .set = s390_fpregs_set,
1364         },
1365         {
1366                 .core_note_type = NT_S390_SYSTEM_CALL,
1367                 .n = 1,
1368                 .size = sizeof(unsigned int),
1369                 .align = sizeof(unsigned int),
1370                 .regset_get = s390_system_call_get,
1371                 .set = s390_system_call_set,
1372         },
1373         {
1374                 .core_note_type = NT_S390_LAST_BREAK,
1375                 .n = 1,
1376                 .size = sizeof(long),
1377                 .align = sizeof(long),
1378                 .regset_get = s390_last_break_get,
1379                 .set = s390_last_break_set,
1380         },
1381         {
1382                 .core_note_type = NT_S390_TDB,
1383                 .n = 1,
1384                 .size = 256,
1385                 .align = 1,
1386                 .regset_get = s390_tdb_get,
1387                 .set = s390_tdb_set,
1388         },
1389         {
1390                 .core_note_type = NT_S390_VXRS_LOW,
1391                 .n = __NUM_VXRS_LOW,
1392                 .size = sizeof(__u64),
1393                 .align = sizeof(__u64),
1394                 .regset_get = s390_vxrs_low_get,
1395                 .set = s390_vxrs_low_set,
1396         },
1397         {
1398                 .core_note_type = NT_S390_VXRS_HIGH,
1399                 .n = __NUM_VXRS_HIGH,
1400                 .size = sizeof(__vector128),
1401                 .align = sizeof(__vector128),
1402                 .regset_get = s390_vxrs_high_get,
1403                 .set = s390_vxrs_high_set,
1404         },
1405         {
1406                 .core_note_type = NT_S390_GS_CB,
1407                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1408                 .size = sizeof(__u64),
1409                 .align = sizeof(__u64),
1410                 .regset_get = s390_gs_cb_get,
1411                 .set = s390_gs_cb_set,
1412         },
1413         {
1414                 .core_note_type = NT_S390_GS_BC,
1415                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1416                 .size = sizeof(__u64),
1417                 .align = sizeof(__u64),
1418                 .regset_get = s390_gs_bc_get,
1419                 .set = s390_gs_bc_set,
1420         },
1421         {
1422                 .core_note_type = NT_S390_RI_CB,
1423                 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1424                 .size = sizeof(__u64),
1425                 .align = sizeof(__u64),
1426                 .regset_get = s390_runtime_instr_get,
1427                 .set = s390_runtime_instr_set,
1428         },
1429 };
1430
1431 static const struct user_regset_view user_s390_view = {
1432         .name = "s390x",
1433         .e_machine = EM_S390,
1434         .regsets = s390_regsets,
1435         .n = ARRAY_SIZE(s390_regsets)
1436 };
1437
1438 #ifdef CONFIG_COMPAT
1439 static int s390_compat_regs_get(struct task_struct *target,
1440                                 const struct user_regset *regset,
1441                                 struct membuf to)
1442 {
1443         unsigned n;
1444
1445         if (target == current)
1446                 save_access_regs(target->thread.acrs);
1447
1448         for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t))
1449                 membuf_store(&to, __peek_user_compat(target, n));
1450         return 0;
1451 }
1452
1453 static int s390_compat_regs_set(struct task_struct *target,
1454                                 const struct user_regset *regset,
1455                                 unsigned int pos, unsigned int count,
1456                                 const void *kbuf, const void __user *ubuf)
1457 {
1458         int rc = 0;
1459
1460         if (target == current)
1461                 save_access_regs(target->thread.acrs);
1462
1463         if (kbuf) {
1464                 const compat_ulong_t *k = kbuf;
1465                 while (count > 0 && !rc) {
1466                         rc = __poke_user_compat(target, pos, *k++);
1467                         count -= sizeof(*k);
1468                         pos += sizeof(*k);
1469                 }
1470         } else {
1471                 const compat_ulong_t  __user *u = ubuf;
1472                 while (count > 0 && !rc) {
1473                         compat_ulong_t word;
1474                         rc = __get_user(word, u++);
1475                         if (rc)
1476                                 break;
1477                         rc = __poke_user_compat(target, pos, word);
1478                         count -= sizeof(*u);
1479                         pos += sizeof(*u);
1480                 }
1481         }
1482
1483         if (rc == 0 && target == current)
1484                 restore_access_regs(target->thread.acrs);
1485
1486         return rc;
1487 }
1488
1489 static int s390_compat_regs_high_get(struct task_struct *target,
1490                                      const struct user_regset *regset,
1491                                      struct membuf to)
1492 {
1493         compat_ulong_t *gprs_high;
1494         int i;
1495
1496         gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs;
1497         for (i = 0; i < NUM_GPRS; i++, gprs_high += 2)
1498                 membuf_store(&to, *gprs_high);
1499         return 0;
1500 }
1501
1502 static int s390_compat_regs_high_set(struct task_struct *target,
1503                                      const struct user_regset *regset,
1504                                      unsigned int pos, unsigned int count,
1505                                      const void *kbuf, const void __user *ubuf)
1506 {
1507         compat_ulong_t *gprs_high;
1508         int rc = 0;
1509
1510         gprs_high = (compat_ulong_t *)
1511                 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1512         if (kbuf) {
1513                 const compat_ulong_t *k = kbuf;
1514                 while (count > 0) {
1515                         *gprs_high = *k++;
1516                         *gprs_high += 2;
1517                         count -= sizeof(*k);
1518                 }
1519         } else {
1520                 const compat_ulong_t  __user *u = ubuf;
1521                 while (count > 0 && !rc) {
1522                         unsigned long word;
1523                         rc = __get_user(word, u++);
1524                         if (rc)
1525                                 break;
1526                         *gprs_high = word;
1527                         *gprs_high += 2;
1528                         count -= sizeof(*u);
1529                 }
1530         }
1531
1532         return rc;
1533 }
1534
1535 static int s390_compat_last_break_get(struct task_struct *target,
1536                                       const struct user_regset *regset,
1537                                       struct membuf to)
1538 {
1539         compat_ulong_t last_break = target->thread.last_break;
1540
1541         return membuf_store(&to, (unsigned long)last_break);
1542 }
1543
1544 static int s390_compat_last_break_set(struct task_struct *target,
1545                                       const struct user_regset *regset,
1546                                       unsigned int pos, unsigned int count,
1547                                       const void *kbuf, const void __user *ubuf)
1548 {
1549         return 0;
1550 }
1551
1552 static const struct user_regset s390_compat_regsets[] = {
1553         {
1554                 .core_note_type = NT_PRSTATUS,
1555                 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1556                 .size = sizeof(compat_long_t),
1557                 .align = sizeof(compat_long_t),
1558                 .regset_get = s390_compat_regs_get,
1559                 .set = s390_compat_regs_set,
1560         },
1561         {
1562                 .core_note_type = NT_PRFPREG,
1563                 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1564                 .size = sizeof(compat_long_t),
1565                 .align = sizeof(compat_long_t),
1566                 .regset_get = s390_fpregs_get,
1567                 .set = s390_fpregs_set,
1568         },
1569         {
1570                 .core_note_type = NT_S390_SYSTEM_CALL,
1571                 .n = 1,
1572                 .size = sizeof(compat_uint_t),
1573                 .align = sizeof(compat_uint_t),
1574                 .regset_get = s390_system_call_get,
1575                 .set = s390_system_call_set,
1576         },
1577         {
1578                 .core_note_type = NT_S390_LAST_BREAK,
1579                 .n = 1,
1580                 .size = sizeof(long),
1581                 .align = sizeof(long),
1582                 .regset_get = s390_compat_last_break_get,
1583                 .set = s390_compat_last_break_set,
1584         },
1585         {
1586                 .core_note_type = NT_S390_TDB,
1587                 .n = 1,
1588                 .size = 256,
1589                 .align = 1,
1590                 .regset_get = s390_tdb_get,
1591                 .set = s390_tdb_set,
1592         },
1593         {
1594                 .core_note_type = NT_S390_VXRS_LOW,
1595                 .n = __NUM_VXRS_LOW,
1596                 .size = sizeof(__u64),
1597                 .align = sizeof(__u64),
1598                 .regset_get = s390_vxrs_low_get,
1599                 .set = s390_vxrs_low_set,
1600         },
1601         {
1602                 .core_note_type = NT_S390_VXRS_HIGH,
1603                 .n = __NUM_VXRS_HIGH,
1604                 .size = sizeof(__vector128),
1605                 .align = sizeof(__vector128),
1606                 .regset_get = s390_vxrs_high_get,
1607                 .set = s390_vxrs_high_set,
1608         },
1609         {
1610                 .core_note_type = NT_S390_HIGH_GPRS,
1611                 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1612                 .size = sizeof(compat_long_t),
1613                 .align = sizeof(compat_long_t),
1614                 .regset_get = s390_compat_regs_high_get,
1615                 .set = s390_compat_regs_high_set,
1616         },
1617         {
1618                 .core_note_type = NT_S390_GS_CB,
1619                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1620                 .size = sizeof(__u64),
1621                 .align = sizeof(__u64),
1622                 .regset_get = s390_gs_cb_get,
1623                 .set = s390_gs_cb_set,
1624         },
1625         {
1626                 .core_note_type = NT_S390_GS_BC,
1627                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1628                 .size = sizeof(__u64),
1629                 .align = sizeof(__u64),
1630                 .regset_get = s390_gs_bc_get,
1631                 .set = s390_gs_bc_set,
1632         },
1633         {
1634                 .core_note_type = NT_S390_RI_CB,
1635                 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1636                 .size = sizeof(__u64),
1637                 .align = sizeof(__u64),
1638                 .regset_get = s390_runtime_instr_get,
1639                 .set = s390_runtime_instr_set,
1640         },
1641 };
1642
1643 static const struct user_regset_view user_s390_compat_view = {
1644         .name = "s390",
1645         .e_machine = EM_S390,
1646         .regsets = s390_compat_regsets,
1647         .n = ARRAY_SIZE(s390_compat_regsets)
1648 };
1649 #endif
1650
1651 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1652 {
1653 #ifdef CONFIG_COMPAT
1654         if (test_tsk_thread_flag(task, TIF_31BIT))
1655                 return &user_s390_compat_view;
1656 #endif
1657         return &user_s390_view;
1658 }
1659
1660 static const char *gpr_names[NUM_GPRS] = {
1661         "r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1662         "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1663 };
1664
1665 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1666 {
1667         if (offset >= NUM_GPRS)
1668                 return 0;
1669         return regs->gprs[offset];
1670 }
1671
1672 int regs_query_register_offset(const char *name)
1673 {
1674         unsigned long offset;
1675
1676         if (!name || *name != 'r')
1677                 return -EINVAL;
1678         if (kstrtoul(name + 1, 10, &offset))
1679                 return -EINVAL;
1680         if (offset >= NUM_GPRS)
1681                 return -EINVAL;
1682         return offset;
1683 }
1684
1685 const char *regs_query_register_name(unsigned int offset)
1686 {
1687         if (offset >= NUM_GPRS)
1688                 return NULL;
1689         return gpr_names[offset];
1690 }
1691
1692 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1693 {
1694         unsigned long ksp = kernel_stack_pointer(regs);
1695
1696         return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1697 }
1698
1699 /**
1700  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1701  * @regs:pt_regs which contains kernel stack pointer.
1702  * @n:stack entry number.
1703  *
1704  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1705  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1706  * this returns 0.
1707  */
1708 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1709 {
1710         unsigned long addr;
1711
1712         addr = kernel_stack_pointer(regs) + n * sizeof(long);
1713         if (!regs_within_kernel_stack(regs, addr))
1714                 return 0;
1715         return *(unsigned long *)addr;
1716 }