ceb8105a8086f8585c951ee274c09ee4e748b893
[linux-2.6-microblaze.git] / arch / s390 / kernel / ptrace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Ptrace user space interface.
4  *
5  *    Copyright IBM Corp. 1999, 2010
6  *    Author(s): Denis Joseph Barrow
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/page.h>
28 #include <asm/pgalloc.h>
29 #include <linux/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/switch_to.h>
32 #include <asm/runtime_instr.h>
33 #include <asm/facility.h>
34
35 #include "entry.h"
36
37 #ifdef CONFIG_COMPAT
38 #include "compat_ptrace.h"
39 #endif
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/syscalls.h>
43
44 void update_cr_regs(struct task_struct *task)
45 {
46         struct pt_regs *regs = task_pt_regs(task);
47         struct thread_struct *thread = &task->thread;
48         struct per_regs old, new;
49         union ctlreg0 cr0_old, cr0_new;
50         union ctlreg2 cr2_old, cr2_new;
51         int cr0_changed, cr2_changed;
52
53         __ctl_store(cr0_old.val, 0, 0);
54         __ctl_store(cr2_old.val, 2, 2);
55         cr0_new = cr0_old;
56         cr2_new = cr2_old;
57         /* Take care of the enable/disable of transactional execution. */
58         if (MACHINE_HAS_TE) {
59                 /* Set or clear transaction execution TXC bit 8. */
60                 cr0_new.tcx = 1;
61                 if (task->thread.per_flags & PER_FLAG_NO_TE)
62                         cr0_new.tcx = 0;
63                 /* Set or clear transaction execution TDC bits 62 and 63. */
64                 cr2_new.tdc = 0;
65                 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
66                         if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
67                                 cr2_new.tdc = 1;
68                         else
69                                 cr2_new.tdc = 2;
70                 }
71         }
72         /* Take care of enable/disable of guarded storage. */
73         if (MACHINE_HAS_GS) {
74                 cr2_new.gse = 0;
75                 if (task->thread.gs_cb)
76                         cr2_new.gse = 1;
77         }
78         /* Load control register 0/2 iff changed */
79         cr0_changed = cr0_new.val != cr0_old.val;
80         cr2_changed = cr2_new.val != cr2_old.val;
81         if (cr0_changed)
82                 __ctl_load(cr0_new.val, 0, 0);
83         if (cr2_changed)
84                 __ctl_load(cr2_new.val, 2, 2);
85         /* Copy user specified PER registers */
86         new.control = thread->per_user.control;
87         new.start = thread->per_user.start;
88         new.end = thread->per_user.end;
89
90         /* merge TIF_SINGLE_STEP into user specified PER registers. */
91         if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
92             test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
93                 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
94                         new.control |= PER_EVENT_BRANCH;
95                 else
96                         new.control |= PER_EVENT_IFETCH;
97                 new.control |= PER_CONTROL_SUSPENSION;
98                 new.control |= PER_EVENT_TRANSACTION_END;
99                 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
100                         new.control |= PER_EVENT_IFETCH;
101                 new.start = 0;
102                 new.end = -1UL;
103         }
104
105         /* Take care of the PER enablement bit in the PSW. */
106         if (!(new.control & PER_EVENT_MASK)) {
107                 regs->psw.mask &= ~PSW_MASK_PER;
108                 return;
109         }
110         regs->psw.mask |= PSW_MASK_PER;
111         __ctl_store(old, 9, 11);
112         if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
113                 __ctl_load(new, 9, 11);
114 }
115
116 void user_enable_single_step(struct task_struct *task)
117 {
118         clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
119         set_tsk_thread_flag(task, TIF_SINGLE_STEP);
120 }
121
122 void user_disable_single_step(struct task_struct *task)
123 {
124         clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
125         clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
126 }
127
128 void user_enable_block_step(struct task_struct *task)
129 {
130         set_tsk_thread_flag(task, TIF_SINGLE_STEP);
131         set_tsk_thread_flag(task, TIF_BLOCK_STEP);
132 }
133
134 /*
135  * Called by kernel/ptrace.c when detaching..
136  *
137  * Clear all debugging related fields.
138  */
139 void ptrace_disable(struct task_struct *task)
140 {
141         memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
142         memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
143         clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
144         clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
145         task->thread.per_flags = 0;
146 }
147
148 #define __ADDR_MASK 7
149
150 static inline unsigned long __peek_user_per(struct task_struct *child,
151                                             addr_t addr)
152 {
153         struct per_struct_kernel *dummy = NULL;
154
155         if (addr == (addr_t) &dummy->cr9)
156                 /* Control bits of the active per set. */
157                 return test_thread_flag(TIF_SINGLE_STEP) ?
158                         PER_EVENT_IFETCH : child->thread.per_user.control;
159         else if (addr == (addr_t) &dummy->cr10)
160                 /* Start address of the active per set. */
161                 return test_thread_flag(TIF_SINGLE_STEP) ?
162                         0 : child->thread.per_user.start;
163         else if (addr == (addr_t) &dummy->cr11)
164                 /* End address of the active per set. */
165                 return test_thread_flag(TIF_SINGLE_STEP) ?
166                         -1UL : child->thread.per_user.end;
167         else if (addr == (addr_t) &dummy->bits)
168                 /* Single-step bit. */
169                 return test_thread_flag(TIF_SINGLE_STEP) ?
170                         (1UL << (BITS_PER_LONG - 1)) : 0;
171         else if (addr == (addr_t) &dummy->starting_addr)
172                 /* Start address of the user specified per set. */
173                 return child->thread.per_user.start;
174         else if (addr == (addr_t) &dummy->ending_addr)
175                 /* End address of the user specified per set. */
176                 return child->thread.per_user.end;
177         else if (addr == (addr_t) &dummy->perc_atmid)
178                 /* PER code, ATMID and AI of the last PER trap */
179                 return (unsigned long)
180                         child->thread.per_event.cause << (BITS_PER_LONG - 16);
181         else if (addr == (addr_t) &dummy->address)
182                 /* Address of the last PER trap */
183                 return child->thread.per_event.address;
184         else if (addr == (addr_t) &dummy->access_id)
185                 /* Access id of the last PER trap */
186                 return (unsigned long)
187                         child->thread.per_event.paid << (BITS_PER_LONG - 8);
188         return 0;
189 }
190
191 /*
192  * Read the word at offset addr from the user area of a process. The
193  * trouble here is that the information is littered over different
194  * locations. The process registers are found on the kernel stack,
195  * the floating point stuff and the trace settings are stored in
196  * the task structure. In addition the different structures in
197  * struct user contain pad bytes that should be read as zeroes.
198  * Lovely...
199  */
200 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
201 {
202         struct user *dummy = NULL;
203         addr_t offset, tmp;
204
205         if (addr < (addr_t) &dummy->regs.acrs) {
206                 /*
207                  * psw and gprs are stored on the stack
208                  */
209                 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
210                 if (addr == (addr_t) &dummy->regs.psw.mask) {
211                         /* Return a clean psw mask. */
212                         tmp &= PSW_MASK_USER | PSW_MASK_RI;
213                         tmp |= PSW_USER_BITS;
214                 }
215
216         } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
217                 /*
218                  * access registers are stored in the thread structure
219                  */
220                 offset = addr - (addr_t) &dummy->regs.acrs;
221                 /*
222                  * Very special case: old & broken 64 bit gdb reading
223                  * from acrs[15]. Result is a 64 bit value. Read the
224                  * 32 bit acrs[15] value and shift it by 32. Sick...
225                  */
226                 if (addr == (addr_t) &dummy->regs.acrs[15])
227                         tmp = ((unsigned long) child->thread.acrs[15]) << 32;
228                 else
229                         tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
230
231         } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
232                 /*
233                  * orig_gpr2 is stored on the kernel stack
234                  */
235                 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
236
237         } else if (addr < (addr_t) &dummy->regs.fp_regs) {
238                 /*
239                  * prevent reads of padding hole between
240                  * orig_gpr2 and fp_regs on s390.
241                  */
242                 tmp = 0;
243
244         } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
245                 /*
246                  * floating point control reg. is in the thread structure
247                  */
248                 tmp = child->thread.fpu.fpc;
249                 tmp <<= BITS_PER_LONG - 32;
250
251         } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
252                 /*
253                  * floating point regs. are either in child->thread.fpu
254                  * or the child->thread.fpu.vxrs array
255                  */
256                 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
257                 if (MACHINE_HAS_VX)
258                         tmp = *(addr_t *)
259                                ((addr_t) child->thread.fpu.vxrs + 2*offset);
260                 else
261                         tmp = *(addr_t *)
262                                ((addr_t) child->thread.fpu.fprs + offset);
263
264         } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
265                 /*
266                  * Handle access to the per_info structure.
267                  */
268                 addr -= (addr_t) &dummy->regs.per_info;
269                 tmp = __peek_user_per(child, addr);
270
271         } else
272                 tmp = 0;
273
274         return tmp;
275 }
276
277 static int
278 peek_user(struct task_struct *child, addr_t addr, addr_t data)
279 {
280         addr_t tmp, mask;
281
282         /*
283          * Stupid gdb peeks/pokes the access registers in 64 bit with
284          * an alignment of 4. Programmers from hell...
285          */
286         mask = __ADDR_MASK;
287         if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
288             addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
289                 mask = 3;
290         if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
291                 return -EIO;
292
293         tmp = __peek_user(child, addr);
294         return put_user(tmp, (addr_t __user *) data);
295 }
296
297 static inline void __poke_user_per(struct task_struct *child,
298                                    addr_t addr, addr_t data)
299 {
300         struct per_struct_kernel *dummy = NULL;
301
302         /*
303          * There are only three fields in the per_info struct that the
304          * debugger user can write to.
305          * 1) cr9: the debugger wants to set a new PER event mask
306          * 2) starting_addr: the debugger wants to set a new starting
307          *    address to use with the PER event mask.
308          * 3) ending_addr: the debugger wants to set a new ending
309          *    address to use with the PER event mask.
310          * The user specified PER event mask and the start and end
311          * addresses are used only if single stepping is not in effect.
312          * Writes to any other field in per_info are ignored.
313          */
314         if (addr == (addr_t) &dummy->cr9)
315                 /* PER event mask of the user specified per set. */
316                 child->thread.per_user.control =
317                         data & (PER_EVENT_MASK | PER_CONTROL_MASK);
318         else if (addr == (addr_t) &dummy->starting_addr)
319                 /* Starting address of the user specified per set. */
320                 child->thread.per_user.start = data;
321         else if (addr == (addr_t) &dummy->ending_addr)
322                 /* Ending address of the user specified per set. */
323                 child->thread.per_user.end = data;
324 }
325
326 /*
327  * Write a word to the user area of a process at location addr. This
328  * operation does have an additional problem compared to peek_user.
329  * Stores to the program status word and on the floating point
330  * control register needs to get checked for validity.
331  */
332 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
333 {
334         struct user *dummy = NULL;
335         addr_t offset;
336
337         if (addr < (addr_t) &dummy->regs.acrs) {
338                 /*
339                  * psw and gprs are stored on the stack
340                  */
341                 if (addr == (addr_t) &dummy->regs.psw.mask) {
342                         unsigned long mask = PSW_MASK_USER;
343
344                         mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
345                         if ((data ^ PSW_USER_BITS) & ~mask)
346                                 /* Invalid psw mask. */
347                                 return -EINVAL;
348                         if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
349                                 /* Invalid address-space-control bits */
350                                 return -EINVAL;
351                         if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
352                                 /* Invalid addressing mode bits */
353                                 return -EINVAL;
354                 }
355                 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
356
357         } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
358                 /*
359                  * access registers are stored in the thread structure
360                  */
361                 offset = addr - (addr_t) &dummy->regs.acrs;
362                 /*
363                  * Very special case: old & broken 64 bit gdb writing
364                  * to acrs[15] with a 64 bit value. Ignore the lower
365                  * half of the value and write the upper 32 bit to
366                  * acrs[15]. Sick...
367                  */
368                 if (addr == (addr_t) &dummy->regs.acrs[15])
369                         child->thread.acrs[15] = (unsigned int) (data >> 32);
370                 else
371                         *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
372
373         } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
374                 /*
375                  * orig_gpr2 is stored on the kernel stack
376                  */
377                 task_pt_regs(child)->orig_gpr2 = data;
378
379         } else if (addr < (addr_t) &dummy->regs.fp_regs) {
380                 /*
381                  * prevent writes of padding hole between
382                  * orig_gpr2 and fp_regs on s390.
383                  */
384                 return 0;
385
386         } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
387                 /*
388                  * floating point control reg. is in the thread structure
389                  */
390                 if ((unsigned int) data != 0 ||
391                     test_fp_ctl(data >> (BITS_PER_LONG - 32)))
392                         return -EINVAL;
393                 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
394
395         } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
396                 /*
397                  * floating point regs. are either in child->thread.fpu
398                  * or the child->thread.fpu.vxrs array
399                  */
400                 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
401                 if (MACHINE_HAS_VX)
402                         *(addr_t *)((addr_t)
403                                 child->thread.fpu.vxrs + 2*offset) = data;
404                 else
405                         *(addr_t *)((addr_t)
406                                 child->thread.fpu.fprs + offset) = data;
407
408         } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
409                 /*
410                  * Handle access to the per_info structure.
411                  */
412                 addr -= (addr_t) &dummy->regs.per_info;
413                 __poke_user_per(child, addr, data);
414
415         }
416
417         return 0;
418 }
419
420 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
421 {
422         addr_t mask;
423
424         /*
425          * Stupid gdb peeks/pokes the access registers in 64 bit with
426          * an alignment of 4. Programmers from hell indeed...
427          */
428         mask = __ADDR_MASK;
429         if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
430             addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
431                 mask = 3;
432         if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
433                 return -EIO;
434
435         return __poke_user(child, addr, data);
436 }
437
438 long arch_ptrace(struct task_struct *child, long request,
439                  unsigned long addr, unsigned long data)
440 {
441         ptrace_area parea; 
442         int copied, ret;
443
444         switch (request) {
445         case PTRACE_PEEKUSR:
446                 /* read the word at location addr in the USER area. */
447                 return peek_user(child, addr, data);
448
449         case PTRACE_POKEUSR:
450                 /* write the word at location addr in the USER area */
451                 return poke_user(child, addr, data);
452
453         case PTRACE_PEEKUSR_AREA:
454         case PTRACE_POKEUSR_AREA:
455                 if (copy_from_user(&parea, (void __force __user *) addr,
456                                                         sizeof(parea)))
457                         return -EFAULT;
458                 addr = parea.kernel_addr;
459                 data = parea.process_addr;
460                 copied = 0;
461                 while (copied < parea.len) {
462                         if (request == PTRACE_PEEKUSR_AREA)
463                                 ret = peek_user(child, addr, data);
464                         else {
465                                 addr_t utmp;
466                                 if (get_user(utmp,
467                                              (addr_t __force __user *) data))
468                                         return -EFAULT;
469                                 ret = poke_user(child, addr, utmp);
470                         }
471                         if (ret)
472                                 return ret;
473                         addr += sizeof(unsigned long);
474                         data += sizeof(unsigned long);
475                         copied += sizeof(unsigned long);
476                 }
477                 return 0;
478         case PTRACE_GET_LAST_BREAK:
479                 put_user(child->thread.last_break,
480                          (unsigned long __user *) data);
481                 return 0;
482         case PTRACE_ENABLE_TE:
483                 if (!MACHINE_HAS_TE)
484                         return -EIO;
485                 child->thread.per_flags &= ~PER_FLAG_NO_TE;
486                 return 0;
487         case PTRACE_DISABLE_TE:
488                 if (!MACHINE_HAS_TE)
489                         return -EIO;
490                 child->thread.per_flags |= PER_FLAG_NO_TE;
491                 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
492                 return 0;
493         case PTRACE_TE_ABORT_RAND:
494                 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
495                         return -EIO;
496                 switch (data) {
497                 case 0UL:
498                         child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
499                         break;
500                 case 1UL:
501                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
502                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
503                         break;
504                 case 2UL:
505                         child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
506                         child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
507                         break;
508                 default:
509                         return -EINVAL;
510                 }
511                 return 0;
512         default:
513                 return ptrace_request(child, request, addr, data);
514         }
515 }
516
517 #ifdef CONFIG_COMPAT
518 /*
519  * Now the fun part starts... a 31 bit program running in the
520  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
521  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
522  * to handle, the difference to the 64 bit versions of the requests
523  * is that the access is done in multiples of 4 byte instead of
524  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
525  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
526  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
527  * is a 31 bit program too, the content of struct user can be
528  * emulated. A 31 bit program peeking into the struct user of
529  * a 64 bit program is a no-no.
530  */
531
532 /*
533  * Same as peek_user_per but for a 31 bit program.
534  */
535 static inline __u32 __peek_user_per_compat(struct task_struct *child,
536                                            addr_t addr)
537 {
538         struct compat_per_struct_kernel *dummy32 = NULL;
539
540         if (addr == (addr_t) &dummy32->cr9)
541                 /* Control bits of the active per set. */
542                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
543                         PER_EVENT_IFETCH : child->thread.per_user.control;
544         else if (addr == (addr_t) &dummy32->cr10)
545                 /* Start address of the active per set. */
546                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
547                         0 : child->thread.per_user.start;
548         else if (addr == (addr_t) &dummy32->cr11)
549                 /* End address of the active per set. */
550                 return test_thread_flag(TIF_SINGLE_STEP) ?
551                         PSW32_ADDR_INSN : child->thread.per_user.end;
552         else if (addr == (addr_t) &dummy32->bits)
553                 /* Single-step bit. */
554                 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
555                         0x80000000 : 0;
556         else if (addr == (addr_t) &dummy32->starting_addr)
557                 /* Start address of the user specified per set. */
558                 return (__u32) child->thread.per_user.start;
559         else if (addr == (addr_t) &dummy32->ending_addr)
560                 /* End address of the user specified per set. */
561                 return (__u32) child->thread.per_user.end;
562         else if (addr == (addr_t) &dummy32->perc_atmid)
563                 /* PER code, ATMID and AI of the last PER trap */
564                 return (__u32) child->thread.per_event.cause << 16;
565         else if (addr == (addr_t) &dummy32->address)
566                 /* Address of the last PER trap */
567                 return (__u32) child->thread.per_event.address;
568         else if (addr == (addr_t) &dummy32->access_id)
569                 /* Access id of the last PER trap */
570                 return (__u32) child->thread.per_event.paid << 24;
571         return 0;
572 }
573
574 /*
575  * Same as peek_user but for a 31 bit program.
576  */
577 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
578 {
579         struct compat_user *dummy32 = NULL;
580         addr_t offset;
581         __u32 tmp;
582
583         if (addr < (addr_t) &dummy32->regs.acrs) {
584                 struct pt_regs *regs = task_pt_regs(child);
585                 /*
586                  * psw and gprs are stored on the stack
587                  */
588                 if (addr == (addr_t) &dummy32->regs.psw.mask) {
589                         /* Fake a 31 bit psw mask. */
590                         tmp = (__u32)(regs->psw.mask >> 32);
591                         tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
592                         tmp |= PSW32_USER_BITS;
593                 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
594                         /* Fake a 31 bit psw address. */
595                         tmp = (__u32) regs->psw.addr |
596                                 (__u32)(regs->psw.mask & PSW_MASK_BA);
597                 } else {
598                         /* gpr 0-15 */
599                         tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
600                 }
601         } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
602                 /*
603                  * access registers are stored in the thread structure
604                  */
605                 offset = addr - (addr_t) &dummy32->regs.acrs;
606                 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
607
608         } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
609                 /*
610                  * orig_gpr2 is stored on the kernel stack
611                  */
612                 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
613
614         } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
615                 /*
616                  * prevent reads of padding hole between
617                  * orig_gpr2 and fp_regs on s390.
618                  */
619                 tmp = 0;
620
621         } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
622                 /*
623                  * floating point control reg. is in the thread structure
624                  */
625                 tmp = child->thread.fpu.fpc;
626
627         } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
628                 /*
629                  * floating point regs. are either in child->thread.fpu
630                  * or the child->thread.fpu.vxrs array
631                  */
632                 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
633                 if (MACHINE_HAS_VX)
634                         tmp = *(__u32 *)
635                                ((addr_t) child->thread.fpu.vxrs + 2*offset);
636                 else
637                         tmp = *(__u32 *)
638                                ((addr_t) child->thread.fpu.fprs + offset);
639
640         } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
641                 /*
642                  * Handle access to the per_info structure.
643                  */
644                 addr -= (addr_t) &dummy32->regs.per_info;
645                 tmp = __peek_user_per_compat(child, addr);
646
647         } else
648                 tmp = 0;
649
650         return tmp;
651 }
652
653 static int peek_user_compat(struct task_struct *child,
654                             addr_t addr, addr_t data)
655 {
656         __u32 tmp;
657
658         if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
659                 return -EIO;
660
661         tmp = __peek_user_compat(child, addr);
662         return put_user(tmp, (__u32 __user *) data);
663 }
664
665 /*
666  * Same as poke_user_per but for a 31 bit program.
667  */
668 static inline void __poke_user_per_compat(struct task_struct *child,
669                                           addr_t addr, __u32 data)
670 {
671         struct compat_per_struct_kernel *dummy32 = NULL;
672
673         if (addr == (addr_t) &dummy32->cr9)
674                 /* PER event mask of the user specified per set. */
675                 child->thread.per_user.control =
676                         data & (PER_EVENT_MASK | PER_CONTROL_MASK);
677         else if (addr == (addr_t) &dummy32->starting_addr)
678                 /* Starting address of the user specified per set. */
679                 child->thread.per_user.start = data;
680         else if (addr == (addr_t) &dummy32->ending_addr)
681                 /* Ending address of the user specified per set. */
682                 child->thread.per_user.end = data;
683 }
684
685 /*
686  * Same as poke_user but for a 31 bit program.
687  */
688 static int __poke_user_compat(struct task_struct *child,
689                               addr_t addr, addr_t data)
690 {
691         struct compat_user *dummy32 = NULL;
692         __u32 tmp = (__u32) data;
693         addr_t offset;
694
695         if (addr < (addr_t) &dummy32->regs.acrs) {
696                 struct pt_regs *regs = task_pt_regs(child);
697                 /*
698                  * psw, gprs, acrs and orig_gpr2 are stored on the stack
699                  */
700                 if (addr == (addr_t) &dummy32->regs.psw.mask) {
701                         __u32 mask = PSW32_MASK_USER;
702
703                         mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
704                         /* Build a 64 bit psw mask from 31 bit mask. */
705                         if ((tmp ^ PSW32_USER_BITS) & ~mask)
706                                 /* Invalid psw mask. */
707                                 return -EINVAL;
708                         if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
709                                 /* Invalid address-space-control bits */
710                                 return -EINVAL;
711                         regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
712                                 (regs->psw.mask & PSW_MASK_BA) |
713                                 (__u64)(tmp & mask) << 32;
714                 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
715                         /* Build a 64 bit psw address from 31 bit address. */
716                         regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
717                         /* Transfer 31 bit amode bit to psw mask. */
718                         regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
719                                 (__u64)(tmp & PSW32_ADDR_AMODE);
720                 } else {
721                         /* gpr 0-15 */
722                         *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
723                 }
724         } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
725                 /*
726                  * access registers are stored in the thread structure
727                  */
728                 offset = addr - (addr_t) &dummy32->regs.acrs;
729                 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
730
731         } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
732                 /*
733                  * orig_gpr2 is stored on the kernel stack
734                  */
735                 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
736
737         } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
738                 /*
739                  * prevent writess of padding hole between
740                  * orig_gpr2 and fp_regs on s390.
741                  */
742                 return 0;
743
744         } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
745                 /*
746                  * floating point control reg. is in the thread structure
747                  */
748                 if (test_fp_ctl(tmp))
749                         return -EINVAL;
750                 child->thread.fpu.fpc = data;
751
752         } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
753                 /*
754                  * floating point regs. are either in child->thread.fpu
755                  * or the child->thread.fpu.vxrs array
756                  */
757                 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
758                 if (MACHINE_HAS_VX)
759                         *(__u32 *)((addr_t)
760                                 child->thread.fpu.vxrs + 2*offset) = tmp;
761                 else
762                         *(__u32 *)((addr_t)
763                                 child->thread.fpu.fprs + offset) = tmp;
764
765         } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
766                 /*
767                  * Handle access to the per_info structure.
768                  */
769                 addr -= (addr_t) &dummy32->regs.per_info;
770                 __poke_user_per_compat(child, addr, data);
771         }
772
773         return 0;
774 }
775
776 static int poke_user_compat(struct task_struct *child,
777                             addr_t addr, addr_t data)
778 {
779         if (!is_compat_task() || (addr & 3) ||
780             addr > sizeof(struct compat_user) - 3)
781                 return -EIO;
782
783         return __poke_user_compat(child, addr, data);
784 }
785
786 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
787                         compat_ulong_t caddr, compat_ulong_t cdata)
788 {
789         unsigned long addr = caddr;
790         unsigned long data = cdata;
791         compat_ptrace_area parea;
792         int copied, ret;
793
794         switch (request) {
795         case PTRACE_PEEKUSR:
796                 /* read the word at location addr in the USER area. */
797                 return peek_user_compat(child, addr, data);
798
799         case PTRACE_POKEUSR:
800                 /* write the word at location addr in the USER area */
801                 return poke_user_compat(child, addr, data);
802
803         case PTRACE_PEEKUSR_AREA:
804         case PTRACE_POKEUSR_AREA:
805                 if (copy_from_user(&parea, (void __force __user *) addr,
806                                                         sizeof(parea)))
807                         return -EFAULT;
808                 addr = parea.kernel_addr;
809                 data = parea.process_addr;
810                 copied = 0;
811                 while (copied < parea.len) {
812                         if (request == PTRACE_PEEKUSR_AREA)
813                                 ret = peek_user_compat(child, addr, data);
814                         else {
815                                 __u32 utmp;
816                                 if (get_user(utmp,
817                                              (__u32 __force __user *) data))
818                                         return -EFAULT;
819                                 ret = poke_user_compat(child, addr, utmp);
820                         }
821                         if (ret)
822                                 return ret;
823                         addr += sizeof(unsigned int);
824                         data += sizeof(unsigned int);
825                         copied += sizeof(unsigned int);
826                 }
827                 return 0;
828         case PTRACE_GET_LAST_BREAK:
829                 put_user(child->thread.last_break,
830                          (unsigned int __user *) data);
831                 return 0;
832         }
833         return compat_ptrace_request(child, request, addr, data);
834 }
835 #endif
836
837 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
838 {
839         unsigned long mask = -1UL;
840         long ret = -1;
841
842         if (is_compat_task())
843                 mask = 0xffffffff;
844
845         /*
846          * The sysc_tracesys code in entry.S stored the system
847          * call number to gprs[2].
848          */
849         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
850             (tracehook_report_syscall_entry(regs) ||
851              regs->gprs[2] >= NR_syscalls)) {
852                 /*
853                  * Tracing decided this syscall should not happen or the
854                  * debugger stored an invalid system call number. Skip
855                  * the system call and the system call restart handling.
856                  */
857                 goto skip;
858         }
859
860 #ifdef CONFIG_SECCOMP
861         /* Do the secure computing check after ptrace. */
862         if (unlikely(test_thread_flag(TIF_SECCOMP))) {
863                 struct seccomp_data sd;
864
865                 if (is_compat_task()) {
866                         sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
867                         sd.arch = AUDIT_ARCH_S390;
868                 } else {
869                         sd.instruction_pointer = regs->psw.addr;
870                         sd.arch = AUDIT_ARCH_S390X;
871                 }
872
873                 sd.nr = regs->int_code & 0xffff;
874                 sd.args[0] = regs->orig_gpr2 & mask;
875                 sd.args[1] = regs->gprs[3] & mask;
876                 sd.args[2] = regs->gprs[4] & mask;
877                 sd.args[3] = regs->gprs[5] & mask;
878                 sd.args[4] = regs->gprs[6] & mask;
879                 sd.args[5] = regs->gprs[7] & mask;
880
881                 if (__secure_computing(&sd) == -1)
882                         goto skip;
883         }
884 #endif /* CONFIG_SECCOMP */
885
886         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
887                 trace_sys_enter(regs, regs->int_code & 0xffff);
888
889
890         audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
891                             regs->gprs[3] &mask, regs->gprs[4] &mask,
892                             regs->gprs[5] &mask);
893
894         if ((signed long)regs->gprs[2] >= NR_syscalls) {
895                 regs->gprs[2] = -ENOSYS;
896                 ret = -ENOSYS;
897         }
898         return regs->gprs[2];
899 skip:
900         clear_pt_regs_flag(regs, PIF_SYSCALL);
901         return ret;
902 }
903
904 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
905 {
906         audit_syscall_exit(regs);
907
908         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
909                 trace_sys_exit(regs, regs->gprs[2]);
910
911         if (test_thread_flag(TIF_SYSCALL_TRACE))
912                 tracehook_report_syscall_exit(regs, 0);
913 }
914
915 /*
916  * user_regset definitions.
917  */
918
919 static int s390_regs_get(struct task_struct *target,
920                          const struct user_regset *regset,
921                          unsigned int pos, unsigned int count,
922                          void *kbuf, void __user *ubuf)
923 {
924         if (target == current)
925                 save_access_regs(target->thread.acrs);
926
927         if (kbuf) {
928                 unsigned long *k = kbuf;
929                 while (count > 0) {
930                         *k++ = __peek_user(target, pos);
931                         count -= sizeof(*k);
932                         pos += sizeof(*k);
933                 }
934         } else {
935                 unsigned long __user *u = ubuf;
936                 while (count > 0) {
937                         if (__put_user(__peek_user(target, pos), u++))
938                                 return -EFAULT;
939                         count -= sizeof(*u);
940                         pos += sizeof(*u);
941                 }
942         }
943         return 0;
944 }
945
946 static int s390_regs_set(struct task_struct *target,
947                          const struct user_regset *regset,
948                          unsigned int pos, unsigned int count,
949                          const void *kbuf, const void __user *ubuf)
950 {
951         int rc = 0;
952
953         if (target == current)
954                 save_access_regs(target->thread.acrs);
955
956         if (kbuf) {
957                 const unsigned long *k = kbuf;
958                 while (count > 0 && !rc) {
959                         rc = __poke_user(target, pos, *k++);
960                         count -= sizeof(*k);
961                         pos += sizeof(*k);
962                 }
963         } else {
964                 const unsigned long  __user *u = ubuf;
965                 while (count > 0 && !rc) {
966                         unsigned long word;
967                         rc = __get_user(word, u++);
968                         if (rc)
969                                 break;
970                         rc = __poke_user(target, pos, word);
971                         count -= sizeof(*u);
972                         pos += sizeof(*u);
973                 }
974         }
975
976         if (rc == 0 && target == current)
977                 restore_access_regs(target->thread.acrs);
978
979         return rc;
980 }
981
982 static int s390_fpregs_get(struct task_struct *target,
983                            const struct user_regset *regset, unsigned int pos,
984                            unsigned int count, void *kbuf, void __user *ubuf)
985 {
986         _s390_fp_regs fp_regs;
987
988         if (target == current)
989                 save_fpu_regs();
990
991         fp_regs.fpc = target->thread.fpu.fpc;
992         fpregs_store(&fp_regs, &target->thread.fpu);
993
994         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
995                                    &fp_regs, 0, -1);
996 }
997
998 static int s390_fpregs_set(struct task_struct *target,
999                            const struct user_regset *regset, unsigned int pos,
1000                            unsigned int count, const void *kbuf,
1001                            const void __user *ubuf)
1002 {
1003         int rc = 0;
1004         freg_t fprs[__NUM_FPRS];
1005
1006         if (target == current)
1007                 save_fpu_regs();
1008
1009         if (MACHINE_HAS_VX)
1010                 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
1011         else
1012                 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
1013
1014         /* If setting FPC, must validate it first. */
1015         if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
1016                 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
1017                 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
1018                                         0, offsetof(s390_fp_regs, fprs));
1019                 if (rc)
1020                         return rc;
1021                 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
1022                         return -EINVAL;
1023                 target->thread.fpu.fpc = ufpc[0];
1024         }
1025
1026         if (rc == 0 && count > 0)
1027                 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1028                                         fprs, offsetof(s390_fp_regs, fprs), -1);
1029         if (rc)
1030                 return rc;
1031
1032         if (MACHINE_HAS_VX)
1033                 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1034         else
1035                 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1036
1037         return rc;
1038 }
1039
1040 static int s390_last_break_get(struct task_struct *target,
1041                                const struct user_regset *regset,
1042                                unsigned int pos, unsigned int count,
1043                                void *kbuf, void __user *ubuf)
1044 {
1045         if (count > 0) {
1046                 if (kbuf) {
1047                         unsigned long *k = kbuf;
1048                         *k = target->thread.last_break;
1049                 } else {
1050                         unsigned long  __user *u = ubuf;
1051                         if (__put_user(target->thread.last_break, u))
1052                                 return -EFAULT;
1053                 }
1054         }
1055         return 0;
1056 }
1057
1058 static int s390_last_break_set(struct task_struct *target,
1059                                const struct user_regset *regset,
1060                                unsigned int pos, unsigned int count,
1061                                const void *kbuf, const void __user *ubuf)
1062 {
1063         return 0;
1064 }
1065
1066 static int s390_tdb_get(struct task_struct *target,
1067                         const struct user_regset *regset,
1068                         unsigned int pos, unsigned int count,
1069                         void *kbuf, void __user *ubuf)
1070 {
1071         struct pt_regs *regs = task_pt_regs(target);
1072         unsigned char *data;
1073
1074         if (!(regs->int_code & 0x200))
1075                 return -ENODATA;
1076         data = target->thread.trap_tdb;
1077         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1078 }
1079
1080 static int s390_tdb_set(struct task_struct *target,
1081                         const struct user_regset *regset,
1082                         unsigned int pos, unsigned int count,
1083                         const void *kbuf, const void __user *ubuf)
1084 {
1085         return 0;
1086 }
1087
1088 static int s390_vxrs_low_get(struct task_struct *target,
1089                              const struct user_regset *regset,
1090                              unsigned int pos, unsigned int count,
1091                              void *kbuf, void __user *ubuf)
1092 {
1093         __u64 vxrs[__NUM_VXRS_LOW];
1094         int i;
1095
1096         if (!MACHINE_HAS_VX)
1097                 return -ENODEV;
1098         if (target == current)
1099                 save_fpu_regs();
1100         for (i = 0; i < __NUM_VXRS_LOW; i++)
1101                 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1102         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1103 }
1104
1105 static int s390_vxrs_low_set(struct task_struct *target,
1106                              const struct user_regset *regset,
1107                              unsigned int pos, unsigned int count,
1108                              const void *kbuf, const void __user *ubuf)
1109 {
1110         __u64 vxrs[__NUM_VXRS_LOW];
1111         int i, rc;
1112
1113         if (!MACHINE_HAS_VX)
1114                 return -ENODEV;
1115         if (target == current)
1116                 save_fpu_regs();
1117
1118         for (i = 0; i < __NUM_VXRS_LOW; i++)
1119                 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1120
1121         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1122         if (rc == 0)
1123                 for (i = 0; i < __NUM_VXRS_LOW; i++)
1124                         *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1125
1126         return rc;
1127 }
1128
1129 static int s390_vxrs_high_get(struct task_struct *target,
1130                               const struct user_regset *regset,
1131                               unsigned int pos, unsigned int count,
1132                               void *kbuf, void __user *ubuf)
1133 {
1134         __vector128 vxrs[__NUM_VXRS_HIGH];
1135
1136         if (!MACHINE_HAS_VX)
1137                 return -ENODEV;
1138         if (target == current)
1139                 save_fpu_regs();
1140         memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1141
1142         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1143 }
1144
1145 static int s390_vxrs_high_set(struct task_struct *target,
1146                               const struct user_regset *regset,
1147                               unsigned int pos, unsigned int count,
1148                               const void *kbuf, const void __user *ubuf)
1149 {
1150         int rc;
1151
1152         if (!MACHINE_HAS_VX)
1153                 return -ENODEV;
1154         if (target == current)
1155                 save_fpu_regs();
1156
1157         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1158                                 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1159         return rc;
1160 }
1161
1162 static int s390_system_call_get(struct task_struct *target,
1163                                 const struct user_regset *regset,
1164                                 unsigned int pos, unsigned int count,
1165                                 void *kbuf, void __user *ubuf)
1166 {
1167         unsigned int *data = &target->thread.system_call;
1168         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1169                                    data, 0, sizeof(unsigned int));
1170 }
1171
1172 static int s390_system_call_set(struct task_struct *target,
1173                                 const struct user_regset *regset,
1174                                 unsigned int pos, unsigned int count,
1175                                 const void *kbuf, const void __user *ubuf)
1176 {
1177         unsigned int *data = &target->thread.system_call;
1178         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1179                                   data, 0, sizeof(unsigned int));
1180 }
1181
1182 static int s390_gs_cb_get(struct task_struct *target,
1183                           const struct user_regset *regset,
1184                           unsigned int pos, unsigned int count,
1185                           void *kbuf, void __user *ubuf)
1186 {
1187         struct gs_cb *data = target->thread.gs_cb;
1188
1189         if (!MACHINE_HAS_GS)
1190                 return -ENODEV;
1191         if (!data)
1192                 return -ENODATA;
1193         if (target == current)
1194                 save_gs_cb(data);
1195         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1196                                    data, 0, sizeof(struct gs_cb));
1197 }
1198
1199 static int s390_gs_cb_set(struct task_struct *target,
1200                           const struct user_regset *regset,
1201                           unsigned int pos, unsigned int count,
1202                           const void *kbuf, const void __user *ubuf)
1203 {
1204         struct gs_cb gs_cb = { }, *data = NULL;
1205         int rc;
1206
1207         if (!MACHINE_HAS_GS)
1208                 return -ENODEV;
1209         if (!target->thread.gs_cb) {
1210                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1211                 if (!data)
1212                         return -ENOMEM;
1213         }
1214         if (!target->thread.gs_cb)
1215                 gs_cb.gsd = 25;
1216         else if (target == current)
1217                 save_gs_cb(&gs_cb);
1218         else
1219                 gs_cb = *target->thread.gs_cb;
1220         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1221                                 &gs_cb, 0, sizeof(gs_cb));
1222         if (rc) {
1223                 kfree(data);
1224                 return -EFAULT;
1225         }
1226         preempt_disable();
1227         if (!target->thread.gs_cb)
1228                 target->thread.gs_cb = data;
1229         *target->thread.gs_cb = gs_cb;
1230         if (target == current) {
1231                 __ctl_set_bit(2, 4);
1232                 restore_gs_cb(target->thread.gs_cb);
1233         }
1234         preempt_enable();
1235         return rc;
1236 }
1237
1238 static int s390_gs_bc_get(struct task_struct *target,
1239                           const struct user_regset *regset,
1240                           unsigned int pos, unsigned int count,
1241                           void *kbuf, void __user *ubuf)
1242 {
1243         struct gs_cb *data = target->thread.gs_bc_cb;
1244
1245         if (!MACHINE_HAS_GS)
1246                 return -ENODEV;
1247         if (!data)
1248                 return -ENODATA;
1249         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1250                                    data, 0, sizeof(struct gs_cb));
1251 }
1252
1253 static int s390_gs_bc_set(struct task_struct *target,
1254                           const struct user_regset *regset,
1255                           unsigned int pos, unsigned int count,
1256                           const void *kbuf, const void __user *ubuf)
1257 {
1258         struct gs_cb *data = target->thread.gs_bc_cb;
1259
1260         if (!MACHINE_HAS_GS)
1261                 return -ENODEV;
1262         if (!data) {
1263                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1264                 if (!data)
1265                         return -ENOMEM;
1266                 target->thread.gs_bc_cb = data;
1267         }
1268         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1269                                   data, 0, sizeof(struct gs_cb));
1270 }
1271
1272 static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1273 {
1274         return (cb->rca & 0x1f) == 0 &&
1275                 (cb->roa & 0xfff) == 0 &&
1276                 (cb->rla & 0xfff) == 0xfff &&
1277                 cb->s == 1 &&
1278                 cb->k == 1 &&
1279                 cb->h == 0 &&
1280                 cb->reserved1 == 0 &&
1281                 cb->ps == 1 &&
1282                 cb->qs == 0 &&
1283                 cb->pc == 1 &&
1284                 cb->qc == 0 &&
1285                 cb->reserved2 == 0 &&
1286                 cb->key == PAGE_DEFAULT_KEY &&
1287                 cb->reserved3 == 0 &&
1288                 cb->reserved4 == 0 &&
1289                 cb->reserved5 == 0 &&
1290                 cb->reserved6 == 0 &&
1291                 cb->reserved7 == 0 &&
1292                 cb->reserved8 == 0 &&
1293                 cb->rla >= cb->roa &&
1294                 cb->rca >= cb->roa &&
1295                 cb->rca <= cb->rla+1 &&
1296                 cb->m < 3;
1297 }
1298
1299 static int s390_runtime_instr_get(struct task_struct *target,
1300                                 const struct user_regset *regset,
1301                                 unsigned int pos, unsigned int count,
1302                                 void *kbuf, void __user *ubuf)
1303 {
1304         struct runtime_instr_cb *data = target->thread.ri_cb;
1305
1306         if (!test_facility(64))
1307                 return -ENODEV;
1308         if (!data)
1309                 return -ENODATA;
1310
1311         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1312                                    data, 0, sizeof(struct runtime_instr_cb));
1313 }
1314
1315 static int s390_runtime_instr_set(struct task_struct *target,
1316                                   const struct user_regset *regset,
1317                                   unsigned int pos, unsigned int count,
1318                                   const void *kbuf, const void __user *ubuf)
1319 {
1320         struct runtime_instr_cb ri_cb = { }, *data = NULL;
1321         int rc;
1322
1323         if (!test_facility(64))
1324                 return -ENODEV;
1325
1326         if (!target->thread.ri_cb) {
1327                 data = kzalloc(sizeof(*data), GFP_KERNEL);
1328                 if (!data)
1329                         return -ENOMEM;
1330         }
1331
1332         if (target->thread.ri_cb) {
1333                 if (target == current)
1334                         store_runtime_instr_cb(&ri_cb);
1335                 else
1336                         ri_cb = *target->thread.ri_cb;
1337         }
1338
1339         rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1340                                 &ri_cb, 0, sizeof(struct runtime_instr_cb));
1341         if (rc) {
1342                 kfree(data);
1343                 return -EFAULT;
1344         }
1345
1346         if (!is_ri_cb_valid(&ri_cb)) {
1347                 kfree(data);
1348                 return -EINVAL;
1349         }
1350
1351         preempt_disable();
1352         if (!target->thread.ri_cb)
1353                 target->thread.ri_cb = data;
1354         *target->thread.ri_cb = ri_cb;
1355         if (target == current)
1356                 load_runtime_instr_cb(target->thread.ri_cb);
1357         preempt_enable();
1358
1359         return 0;
1360 }
1361
1362 static const struct user_regset s390_regsets[] = {
1363         {
1364                 .core_note_type = NT_PRSTATUS,
1365                 .n = sizeof(s390_regs) / sizeof(long),
1366                 .size = sizeof(long),
1367                 .align = sizeof(long),
1368                 .get = s390_regs_get,
1369                 .set = s390_regs_set,
1370         },
1371         {
1372                 .core_note_type = NT_PRFPREG,
1373                 .n = sizeof(s390_fp_regs) / sizeof(long),
1374                 .size = sizeof(long),
1375                 .align = sizeof(long),
1376                 .get = s390_fpregs_get,
1377                 .set = s390_fpregs_set,
1378         },
1379         {
1380                 .core_note_type = NT_S390_SYSTEM_CALL,
1381                 .n = 1,
1382                 .size = sizeof(unsigned int),
1383                 .align = sizeof(unsigned int),
1384                 .get = s390_system_call_get,
1385                 .set = s390_system_call_set,
1386         },
1387         {
1388                 .core_note_type = NT_S390_LAST_BREAK,
1389                 .n = 1,
1390                 .size = sizeof(long),
1391                 .align = sizeof(long),
1392                 .get = s390_last_break_get,
1393                 .set = s390_last_break_set,
1394         },
1395         {
1396                 .core_note_type = NT_S390_TDB,
1397                 .n = 1,
1398                 .size = 256,
1399                 .align = 1,
1400                 .get = s390_tdb_get,
1401                 .set = s390_tdb_set,
1402         },
1403         {
1404                 .core_note_type = NT_S390_VXRS_LOW,
1405                 .n = __NUM_VXRS_LOW,
1406                 .size = sizeof(__u64),
1407                 .align = sizeof(__u64),
1408                 .get = s390_vxrs_low_get,
1409                 .set = s390_vxrs_low_set,
1410         },
1411         {
1412                 .core_note_type = NT_S390_VXRS_HIGH,
1413                 .n = __NUM_VXRS_HIGH,
1414                 .size = sizeof(__vector128),
1415                 .align = sizeof(__vector128),
1416                 .get = s390_vxrs_high_get,
1417                 .set = s390_vxrs_high_set,
1418         },
1419         {
1420                 .core_note_type = NT_S390_GS_CB,
1421                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1422                 .size = sizeof(__u64),
1423                 .align = sizeof(__u64),
1424                 .get = s390_gs_cb_get,
1425                 .set = s390_gs_cb_set,
1426         },
1427         {
1428                 .core_note_type = NT_S390_GS_BC,
1429                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1430                 .size = sizeof(__u64),
1431                 .align = sizeof(__u64),
1432                 .get = s390_gs_bc_get,
1433                 .set = s390_gs_bc_set,
1434         },
1435         {
1436                 .core_note_type = NT_S390_RI_CB,
1437                 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1438                 .size = sizeof(__u64),
1439                 .align = sizeof(__u64),
1440                 .get = s390_runtime_instr_get,
1441                 .set = s390_runtime_instr_set,
1442         },
1443 };
1444
1445 static const struct user_regset_view user_s390_view = {
1446         .name = "s390x",
1447         .e_machine = EM_S390,
1448         .regsets = s390_regsets,
1449         .n = ARRAY_SIZE(s390_regsets)
1450 };
1451
1452 #ifdef CONFIG_COMPAT
1453 static int s390_compat_regs_get(struct task_struct *target,
1454                                 const struct user_regset *regset,
1455                                 unsigned int pos, unsigned int count,
1456                                 void *kbuf, void __user *ubuf)
1457 {
1458         if (target == current)
1459                 save_access_regs(target->thread.acrs);
1460
1461         if (kbuf) {
1462                 compat_ulong_t *k = kbuf;
1463                 while (count > 0) {
1464                         *k++ = __peek_user_compat(target, pos);
1465                         count -= sizeof(*k);
1466                         pos += sizeof(*k);
1467                 }
1468         } else {
1469                 compat_ulong_t __user *u = ubuf;
1470                 while (count > 0) {
1471                         if (__put_user(__peek_user_compat(target, pos), u++))
1472                                 return -EFAULT;
1473                         count -= sizeof(*u);
1474                         pos += sizeof(*u);
1475                 }
1476         }
1477         return 0;
1478 }
1479
1480 static int s390_compat_regs_set(struct task_struct *target,
1481                                 const struct user_regset *regset,
1482                                 unsigned int pos, unsigned int count,
1483                                 const void *kbuf, const void __user *ubuf)
1484 {
1485         int rc = 0;
1486
1487         if (target == current)
1488                 save_access_regs(target->thread.acrs);
1489
1490         if (kbuf) {
1491                 const compat_ulong_t *k = kbuf;
1492                 while (count > 0 && !rc) {
1493                         rc = __poke_user_compat(target, pos, *k++);
1494                         count -= sizeof(*k);
1495                         pos += sizeof(*k);
1496                 }
1497         } else {
1498                 const compat_ulong_t  __user *u = ubuf;
1499                 while (count > 0 && !rc) {
1500                         compat_ulong_t word;
1501                         rc = __get_user(word, u++);
1502                         if (rc)
1503                                 break;
1504                         rc = __poke_user_compat(target, pos, word);
1505                         count -= sizeof(*u);
1506                         pos += sizeof(*u);
1507                 }
1508         }
1509
1510         if (rc == 0 && target == current)
1511                 restore_access_regs(target->thread.acrs);
1512
1513         return rc;
1514 }
1515
1516 static int s390_compat_regs_high_get(struct task_struct *target,
1517                                      const struct user_regset *regset,
1518                                      unsigned int pos, unsigned int count,
1519                                      void *kbuf, void __user *ubuf)
1520 {
1521         compat_ulong_t *gprs_high;
1522
1523         gprs_high = (compat_ulong_t *)
1524                 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1525         if (kbuf) {
1526                 compat_ulong_t *k = kbuf;
1527                 while (count > 0) {
1528                         *k++ = *gprs_high;
1529                         gprs_high += 2;
1530                         count -= sizeof(*k);
1531                 }
1532         } else {
1533                 compat_ulong_t __user *u = ubuf;
1534                 while (count > 0) {
1535                         if (__put_user(*gprs_high, u++))
1536                                 return -EFAULT;
1537                         gprs_high += 2;
1538                         count -= sizeof(*u);
1539                 }
1540         }
1541         return 0;
1542 }
1543
1544 static int s390_compat_regs_high_set(struct task_struct *target,
1545                                      const struct user_regset *regset,
1546                                      unsigned int pos, unsigned int count,
1547                                      const void *kbuf, const void __user *ubuf)
1548 {
1549         compat_ulong_t *gprs_high;
1550         int rc = 0;
1551
1552         gprs_high = (compat_ulong_t *)
1553                 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1554         if (kbuf) {
1555                 const compat_ulong_t *k = kbuf;
1556                 while (count > 0) {
1557                         *gprs_high = *k++;
1558                         *gprs_high += 2;
1559                         count -= sizeof(*k);
1560                 }
1561         } else {
1562                 const compat_ulong_t  __user *u = ubuf;
1563                 while (count > 0 && !rc) {
1564                         unsigned long word;
1565                         rc = __get_user(word, u++);
1566                         if (rc)
1567                                 break;
1568                         *gprs_high = word;
1569                         *gprs_high += 2;
1570                         count -= sizeof(*u);
1571                 }
1572         }
1573
1574         return rc;
1575 }
1576
1577 static int s390_compat_last_break_get(struct task_struct *target,
1578                                       const struct user_regset *regset,
1579                                       unsigned int pos, unsigned int count,
1580                                       void *kbuf, void __user *ubuf)
1581 {
1582         compat_ulong_t last_break;
1583
1584         if (count > 0) {
1585                 last_break = target->thread.last_break;
1586                 if (kbuf) {
1587                         unsigned long *k = kbuf;
1588                         *k = last_break;
1589                 } else {
1590                         unsigned long  __user *u = ubuf;
1591                         if (__put_user(last_break, u))
1592                                 return -EFAULT;
1593                 }
1594         }
1595         return 0;
1596 }
1597
1598 static int s390_compat_last_break_set(struct task_struct *target,
1599                                       const struct user_regset *regset,
1600                                       unsigned int pos, unsigned int count,
1601                                       const void *kbuf, const void __user *ubuf)
1602 {
1603         return 0;
1604 }
1605
1606 static const struct user_regset s390_compat_regsets[] = {
1607         {
1608                 .core_note_type = NT_PRSTATUS,
1609                 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1610                 .size = sizeof(compat_long_t),
1611                 .align = sizeof(compat_long_t),
1612                 .get = s390_compat_regs_get,
1613                 .set = s390_compat_regs_set,
1614         },
1615         {
1616                 .core_note_type = NT_PRFPREG,
1617                 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1618                 .size = sizeof(compat_long_t),
1619                 .align = sizeof(compat_long_t),
1620                 .get = s390_fpregs_get,
1621                 .set = s390_fpregs_set,
1622         },
1623         {
1624                 .core_note_type = NT_S390_SYSTEM_CALL,
1625                 .n = 1,
1626                 .size = sizeof(compat_uint_t),
1627                 .align = sizeof(compat_uint_t),
1628                 .get = s390_system_call_get,
1629                 .set = s390_system_call_set,
1630         },
1631         {
1632                 .core_note_type = NT_S390_LAST_BREAK,
1633                 .n = 1,
1634                 .size = sizeof(long),
1635                 .align = sizeof(long),
1636                 .get = s390_compat_last_break_get,
1637                 .set = s390_compat_last_break_set,
1638         },
1639         {
1640                 .core_note_type = NT_S390_TDB,
1641                 .n = 1,
1642                 .size = 256,
1643                 .align = 1,
1644                 .get = s390_tdb_get,
1645                 .set = s390_tdb_set,
1646         },
1647         {
1648                 .core_note_type = NT_S390_VXRS_LOW,
1649                 .n = __NUM_VXRS_LOW,
1650                 .size = sizeof(__u64),
1651                 .align = sizeof(__u64),
1652                 .get = s390_vxrs_low_get,
1653                 .set = s390_vxrs_low_set,
1654         },
1655         {
1656                 .core_note_type = NT_S390_VXRS_HIGH,
1657                 .n = __NUM_VXRS_HIGH,
1658                 .size = sizeof(__vector128),
1659                 .align = sizeof(__vector128),
1660                 .get = s390_vxrs_high_get,
1661                 .set = s390_vxrs_high_set,
1662         },
1663         {
1664                 .core_note_type = NT_S390_HIGH_GPRS,
1665                 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1666                 .size = sizeof(compat_long_t),
1667                 .align = sizeof(compat_long_t),
1668                 .get = s390_compat_regs_high_get,
1669                 .set = s390_compat_regs_high_set,
1670         },
1671         {
1672                 .core_note_type = NT_S390_GS_CB,
1673                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1674                 .size = sizeof(__u64),
1675                 .align = sizeof(__u64),
1676                 .get = s390_gs_cb_get,
1677                 .set = s390_gs_cb_set,
1678         },
1679         {
1680                 .core_note_type = NT_S390_GS_BC,
1681                 .n = sizeof(struct gs_cb) / sizeof(__u64),
1682                 .size = sizeof(__u64),
1683                 .align = sizeof(__u64),
1684                 .get = s390_gs_bc_get,
1685                 .set = s390_gs_bc_set,
1686         },
1687         {
1688                 .core_note_type = NT_S390_RI_CB,
1689                 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1690                 .size = sizeof(__u64),
1691                 .align = sizeof(__u64),
1692                 .get = s390_runtime_instr_get,
1693                 .set = s390_runtime_instr_set,
1694         },
1695 };
1696
1697 static const struct user_regset_view user_s390_compat_view = {
1698         .name = "s390",
1699         .e_machine = EM_S390,
1700         .regsets = s390_compat_regsets,
1701         .n = ARRAY_SIZE(s390_compat_regsets)
1702 };
1703 #endif
1704
1705 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1706 {
1707 #ifdef CONFIG_COMPAT
1708         if (test_tsk_thread_flag(task, TIF_31BIT))
1709                 return &user_s390_compat_view;
1710 #endif
1711         return &user_s390_view;
1712 }
1713
1714 static const char *gpr_names[NUM_GPRS] = {
1715         "r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1716         "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1717 };
1718
1719 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1720 {
1721         if (offset >= NUM_GPRS)
1722                 return 0;
1723         return regs->gprs[offset];
1724 }
1725
1726 int regs_query_register_offset(const char *name)
1727 {
1728         unsigned long offset;
1729
1730         if (!name || *name != 'r')
1731                 return -EINVAL;
1732         if (kstrtoul(name + 1, 10, &offset))
1733                 return -EINVAL;
1734         if (offset >= NUM_GPRS)
1735                 return -EINVAL;
1736         return offset;
1737 }
1738
1739 const char *regs_query_register_name(unsigned int offset)
1740 {
1741         if (offset >= NUM_GPRS)
1742                 return NULL;
1743         return gpr_names[offset];
1744 }
1745
1746 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1747 {
1748         unsigned long ksp = kernel_stack_pointer(regs);
1749
1750         return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1751 }
1752
1753 /**
1754  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1755  * @regs:pt_regs which contains kernel stack pointer.
1756  * @n:stack entry number.
1757  *
1758  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1759  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1760  * this returns 0.
1761  */
1762 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1763 {
1764         unsigned long addr;
1765
1766         addr = kernel_stack_pointer(regs) + n * sizeof(long);
1767         if (!regs_within_kernel_stack(regs, addr))
1768                 return 0;
1769         return *(unsigned long *)addr;
1770 }