Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / arch / powerpc / kernel / ptrace.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
36
37 #include <linux/uaccess.h>
38 #include <linux/pkeys.h>
39 #include <asm/page.h>
40 #include <asm/pgtable.h>
41 #include <asm/switch_to.h>
42 #include <asm/tm.h>
43 #include <asm/asm-prototypes.h>
44 #include <asm/debug.h>
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/syscalls.h>
48
49 /*
50  * The parameter save area on the stack is used to store arguments being passed
51  * to callee function and is located at fixed offset from stack pointer.
52  */
53 #ifdef CONFIG_PPC32
54 #define PARAMETER_SAVE_AREA_OFFSET      24  /* bytes */
55 #else /* CONFIG_PPC32 */
56 #define PARAMETER_SAVE_AREA_OFFSET      48  /* bytes */
57 #endif
58
59 struct pt_regs_offset {
60         const char *name;
61         int offset;
62 };
63
64 #define STR(s)  #s                      /* convert to string */
65 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
66 #define GPR_OFFSET_NAME(num)    \
67         {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
68         {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
69 #define REG_OFFSET_END {.name = NULL, .offset = 0}
70
71 #define TVSO(f) (offsetof(struct thread_vr_state, f))
72 #define TFSO(f) (offsetof(struct thread_fp_state, f))
73 #define TSO(f)  (offsetof(struct thread_struct, f))
74
75 static const struct pt_regs_offset regoffset_table[] = {
76         GPR_OFFSET_NAME(0),
77         GPR_OFFSET_NAME(1),
78         GPR_OFFSET_NAME(2),
79         GPR_OFFSET_NAME(3),
80         GPR_OFFSET_NAME(4),
81         GPR_OFFSET_NAME(5),
82         GPR_OFFSET_NAME(6),
83         GPR_OFFSET_NAME(7),
84         GPR_OFFSET_NAME(8),
85         GPR_OFFSET_NAME(9),
86         GPR_OFFSET_NAME(10),
87         GPR_OFFSET_NAME(11),
88         GPR_OFFSET_NAME(12),
89         GPR_OFFSET_NAME(13),
90         GPR_OFFSET_NAME(14),
91         GPR_OFFSET_NAME(15),
92         GPR_OFFSET_NAME(16),
93         GPR_OFFSET_NAME(17),
94         GPR_OFFSET_NAME(18),
95         GPR_OFFSET_NAME(19),
96         GPR_OFFSET_NAME(20),
97         GPR_OFFSET_NAME(21),
98         GPR_OFFSET_NAME(22),
99         GPR_OFFSET_NAME(23),
100         GPR_OFFSET_NAME(24),
101         GPR_OFFSET_NAME(25),
102         GPR_OFFSET_NAME(26),
103         GPR_OFFSET_NAME(27),
104         GPR_OFFSET_NAME(28),
105         GPR_OFFSET_NAME(29),
106         GPR_OFFSET_NAME(30),
107         GPR_OFFSET_NAME(31),
108         REG_OFFSET_NAME(nip),
109         REG_OFFSET_NAME(msr),
110         REG_OFFSET_NAME(ctr),
111         REG_OFFSET_NAME(link),
112         REG_OFFSET_NAME(xer),
113         REG_OFFSET_NAME(ccr),
114 #ifdef CONFIG_PPC64
115         REG_OFFSET_NAME(softe),
116 #else
117         REG_OFFSET_NAME(mq),
118 #endif
119         REG_OFFSET_NAME(trap),
120         REG_OFFSET_NAME(dar),
121         REG_OFFSET_NAME(dsisr),
122         REG_OFFSET_END,
123 };
124
125 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
126 static void flush_tmregs_to_thread(struct task_struct *tsk)
127 {
128         /*
129          * If task is not current, it will have been flushed already to
130          * it's thread_struct during __switch_to().
131          *
132          * A reclaim flushes ALL the state or if not in TM save TM SPRs
133          * in the appropriate thread structures from live.
134          */
135
136         if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
137                 return;
138
139         if (MSR_TM_SUSPENDED(mfmsr())) {
140                 tm_reclaim_current(TM_CAUSE_SIGNAL);
141         } else {
142                 tm_enable();
143                 tm_save_sprs(&(tsk->thread));
144         }
145 }
146 #else
147 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
148 #endif
149
150 /**
151  * regs_query_register_offset() - query register offset from its name
152  * @name:       the name of a register
153  *
154  * regs_query_register_offset() returns the offset of a register in struct
155  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
156  */
157 int regs_query_register_offset(const char *name)
158 {
159         const struct pt_regs_offset *roff;
160         for (roff = regoffset_table; roff->name != NULL; roff++)
161                 if (!strcmp(roff->name, name))
162                         return roff->offset;
163         return -EINVAL;
164 }
165
166 /**
167  * regs_query_register_name() - query register name from its offset
168  * @offset:     the offset of a register in struct pt_regs.
169  *
170  * regs_query_register_name() returns the name of a register from its
171  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
172  */
173 const char *regs_query_register_name(unsigned int offset)
174 {
175         const struct pt_regs_offset *roff;
176         for (roff = regoffset_table; roff->name != NULL; roff++)
177                 if (roff->offset == offset)
178                         return roff->name;
179         return NULL;
180 }
181
182 /*
183  * does not yet catch signals sent when the child dies.
184  * in exit.c or in signal.c.
185  */
186
187 /*
188  * Set of msr bits that gdb can change on behalf of a process.
189  */
190 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
191 #define MSR_DEBUGCHANGE 0
192 #else
193 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
194 #endif
195
196 /*
197  * Max register writeable via put_reg
198  */
199 #ifdef CONFIG_PPC32
200 #define PT_MAX_PUT_REG  PT_MQ
201 #else
202 #define PT_MAX_PUT_REG  PT_CCR
203 #endif
204
205 static unsigned long get_user_msr(struct task_struct *task)
206 {
207         return task->thread.regs->msr | task->thread.fpexc_mode;
208 }
209
210 static int set_user_msr(struct task_struct *task, unsigned long msr)
211 {
212         task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
213         task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
214         return 0;
215 }
216
217 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
218 static unsigned long get_user_ckpt_msr(struct task_struct *task)
219 {
220         return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
221 }
222
223 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
224 {
225         task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
226         task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
227         return 0;
228 }
229
230 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
231 {
232         task->thread.ckpt_regs.trap = trap & 0xfff0;
233         return 0;
234 }
235 #endif
236
237 #ifdef CONFIG_PPC64
238 static int get_user_dscr(struct task_struct *task, unsigned long *data)
239 {
240         *data = task->thread.dscr;
241         return 0;
242 }
243
244 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
245 {
246         task->thread.dscr = dscr;
247         task->thread.dscr_inherit = 1;
248         return 0;
249 }
250 #else
251 static int get_user_dscr(struct task_struct *task, unsigned long *data)
252 {
253         return -EIO;
254 }
255
256 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
257 {
258         return -EIO;
259 }
260 #endif
261
262 /*
263  * We prevent mucking around with the reserved area of trap
264  * which are used internally by the kernel.
265  */
266 static int set_user_trap(struct task_struct *task, unsigned long trap)
267 {
268         task->thread.regs->trap = trap & 0xfff0;
269         return 0;
270 }
271
272 /*
273  * Get contents of register REGNO in task TASK.
274  */
275 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
276 {
277         if ((task->thread.regs == NULL) || !data)
278                 return -EIO;
279
280         if (regno == PT_MSR) {
281                 *data = get_user_msr(task);
282                 return 0;
283         }
284
285         if (regno == PT_DSCR)
286                 return get_user_dscr(task, data);
287
288 #ifdef CONFIG_PPC64
289         /*
290          * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
291          * no more used as a flag, lets force usr to alway see the softe value as 1
292          * which means interrupts are not soft disabled.
293          */
294         if (regno == PT_SOFTE) {
295                 *data = 1;
296                 return  0;
297         }
298 #endif
299
300         if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
301                 *data = ((unsigned long *)task->thread.regs)[regno];
302                 return 0;
303         }
304
305         return -EIO;
306 }
307
308 /*
309  * Write contents of register REGNO in task TASK.
310  */
311 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
312 {
313         if (task->thread.regs == NULL)
314                 return -EIO;
315
316         if (regno == PT_MSR)
317                 return set_user_msr(task, data);
318         if (regno == PT_TRAP)
319                 return set_user_trap(task, data);
320         if (regno == PT_DSCR)
321                 return set_user_dscr(task, data);
322
323         if (regno <= PT_MAX_PUT_REG) {
324                 ((unsigned long *)task->thread.regs)[regno] = data;
325                 return 0;
326         }
327         return -EIO;
328 }
329
330 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
331                    unsigned int pos, unsigned int count,
332                    void *kbuf, void __user *ubuf)
333 {
334         int i, ret;
335
336         if (target->thread.regs == NULL)
337                 return -EIO;
338
339         if (!FULL_REGS(target->thread.regs)) {
340                 /* We have a partial register set.  Fill 14-31 with bogus values */
341                 for (i = 14; i < 32; i++)
342                         target->thread.regs->gpr[i] = NV_REG_POISON;
343         }
344
345         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
346                                   target->thread.regs,
347                                   0, offsetof(struct pt_regs, msr));
348         if (!ret) {
349                 unsigned long msr = get_user_msr(target);
350                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
351                                           offsetof(struct pt_regs, msr),
352                                           offsetof(struct pt_regs, msr) +
353                                           sizeof(msr));
354         }
355
356         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
357                      offsetof(struct pt_regs, msr) + sizeof(long));
358
359         if (!ret)
360                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
361                                           &target->thread.regs->orig_gpr3,
362                                           offsetof(struct pt_regs, orig_gpr3),
363                                           sizeof(struct pt_regs));
364         if (!ret)
365                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
366                                                sizeof(struct pt_regs), -1);
367
368         return ret;
369 }
370
371 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
372                    unsigned int pos, unsigned int count,
373                    const void *kbuf, const void __user *ubuf)
374 {
375         unsigned long reg;
376         int ret;
377
378         if (target->thread.regs == NULL)
379                 return -EIO;
380
381         CHECK_FULL_REGS(target->thread.regs);
382
383         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
384                                  target->thread.regs,
385                                  0, PT_MSR * sizeof(reg));
386
387         if (!ret && count > 0) {
388                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
389                                          PT_MSR * sizeof(reg),
390                                          (PT_MSR + 1) * sizeof(reg));
391                 if (!ret)
392                         ret = set_user_msr(target, reg);
393         }
394
395         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
396                      offsetof(struct pt_regs, msr) + sizeof(long));
397
398         if (!ret)
399                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
400                                          &target->thread.regs->orig_gpr3,
401                                          PT_ORIG_R3 * sizeof(reg),
402                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
403
404         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
405                 ret = user_regset_copyin_ignore(
406                         &pos, &count, &kbuf, &ubuf,
407                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
408                         PT_TRAP * sizeof(reg));
409
410         if (!ret && count > 0) {
411                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
412                                          PT_TRAP * sizeof(reg),
413                                          (PT_TRAP + 1) * sizeof(reg));
414                 if (!ret)
415                         ret = set_user_trap(target, reg);
416         }
417
418         if (!ret)
419                 ret = user_regset_copyin_ignore(
420                         &pos, &count, &kbuf, &ubuf,
421                         (PT_TRAP + 1) * sizeof(reg), -1);
422
423         return ret;
424 }
425
426 /*
427  * Regardless of transactions, 'fp_state' holds the current running
428  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
429  * value of all FPR registers for the current transaction.
430  *
431  * Userspace interface buffer layout:
432  *
433  * struct data {
434  *      u64     fpr[32];
435  *      u64     fpscr;
436  * };
437  */
438 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
439                    unsigned int pos, unsigned int count,
440                    void *kbuf, void __user *ubuf)
441 {
442 #ifdef CONFIG_VSX
443         u64 buf[33];
444         int i;
445
446         flush_fp_to_thread(target);
447
448         /* copy to local buffer then write that out */
449         for (i = 0; i < 32 ; i++)
450                 buf[i] = target->thread.TS_FPR(i);
451         buf[32] = target->thread.fp_state.fpscr;
452         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
453 #else
454         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
455                      offsetof(struct thread_fp_state, fpr[32]));
456
457         flush_fp_to_thread(target);
458
459         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
460                                    &target->thread.fp_state, 0, -1);
461 #endif
462 }
463
464 /*
465  * Regardless of transactions, 'fp_state' holds the current running
466  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
467  * value of all FPR registers for the current transaction.
468  *
469  * Userspace interface buffer layout:
470  *
471  * struct data {
472  *      u64     fpr[32];
473  *      u64     fpscr;
474  * };
475  *
476  */
477 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
478                    unsigned int pos, unsigned int count,
479                    const void *kbuf, const void __user *ubuf)
480 {
481 #ifdef CONFIG_VSX
482         u64 buf[33];
483         int i;
484
485         flush_fp_to_thread(target);
486
487         for (i = 0; i < 32 ; i++)
488                 buf[i] = target->thread.TS_FPR(i);
489         buf[32] = target->thread.fp_state.fpscr;
490
491         /* copy to local buffer then write that out */
492         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
493         if (i)
494                 return i;
495
496         for (i = 0; i < 32 ; i++)
497                 target->thread.TS_FPR(i) = buf[i];
498         target->thread.fp_state.fpscr = buf[32];
499         return 0;
500 #else
501         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
502                      offsetof(struct thread_fp_state, fpr[32]));
503
504         flush_fp_to_thread(target);
505
506         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
507                                   &target->thread.fp_state, 0, -1);
508 #endif
509 }
510
511 #ifdef CONFIG_ALTIVEC
512 /*
513  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
514  * The transfer totals 34 quadword.  Quadwords 0-31 contain the
515  * corresponding vector registers.  Quadword 32 contains the vscr as the
516  * last word (offset 12) within that quadword.  Quadword 33 contains the
517  * vrsave as the first word (offset 0) within the quadword.
518  *
519  * This definition of the VMX state is compatible with the current PPC32
520  * ptrace interface.  This allows signal handling and ptrace to use the
521  * same structures.  This also simplifies the implementation of a bi-arch
522  * (combined (32- and 64-bit) gdb.
523  */
524
525 static int vr_active(struct task_struct *target,
526                      const struct user_regset *regset)
527 {
528         flush_altivec_to_thread(target);
529         return target->thread.used_vr ? regset->n : 0;
530 }
531
532 /*
533  * Regardless of transactions, 'vr_state' holds the current running
534  * value of all the VMX registers and 'ckvr_state' holds the last
535  * checkpointed value of all the VMX registers for the current
536  * transaction to fall back on in case it aborts.
537  *
538  * Userspace interface buffer layout:
539  *
540  * struct data {
541  *      vector128       vr[32];
542  *      vector128       vscr;
543  *      vector128       vrsave;
544  * };
545  */
546 static int vr_get(struct task_struct *target, const struct user_regset *regset,
547                   unsigned int pos, unsigned int count,
548                   void *kbuf, void __user *ubuf)
549 {
550         int ret;
551
552         flush_altivec_to_thread(target);
553
554         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
555                      offsetof(struct thread_vr_state, vr[32]));
556
557         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
558                                   &target->thread.vr_state, 0,
559                                   33 * sizeof(vector128));
560         if (!ret) {
561                 /*
562                  * Copy out only the low-order word of vrsave.
563                  */
564                 union {
565                         elf_vrreg_t reg;
566                         u32 word;
567                 } vrsave;
568                 memset(&vrsave, 0, sizeof(vrsave));
569
570                 vrsave.word = target->thread.vrsave;
571
572                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
573                                           33 * sizeof(vector128), -1);
574         }
575
576         return ret;
577 }
578
579 /*
580  * Regardless of transactions, 'vr_state' holds the current running
581  * value of all the VMX registers and 'ckvr_state' holds the last
582  * checkpointed value of all the VMX registers for the current
583  * transaction to fall back on in case it aborts.
584  *
585  * Userspace interface buffer layout:
586  *
587  * struct data {
588  *      vector128       vr[32];
589  *      vector128       vscr;
590  *      vector128       vrsave;
591  * };
592  */
593 static int vr_set(struct task_struct *target, const struct user_regset *regset,
594                   unsigned int pos, unsigned int count,
595                   const void *kbuf, const void __user *ubuf)
596 {
597         int ret;
598
599         flush_altivec_to_thread(target);
600
601         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
602                      offsetof(struct thread_vr_state, vr[32]));
603
604         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
605                                  &target->thread.vr_state, 0,
606                                  33 * sizeof(vector128));
607         if (!ret && count > 0) {
608                 /*
609                  * We use only the first word of vrsave.
610                  */
611                 union {
612                         elf_vrreg_t reg;
613                         u32 word;
614                 } vrsave;
615                 memset(&vrsave, 0, sizeof(vrsave));
616
617                 vrsave.word = target->thread.vrsave;
618
619                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
620                                          33 * sizeof(vector128), -1);
621                 if (!ret)
622                         target->thread.vrsave = vrsave.word;
623         }
624
625         return ret;
626 }
627 #endif /* CONFIG_ALTIVEC */
628
629 #ifdef CONFIG_VSX
630 /*
631  * Currently to set and and get all the vsx state, you need to call
632  * the fp and VMX calls as well.  This only get/sets the lower 32
633  * 128bit VSX registers.
634  */
635
636 static int vsr_active(struct task_struct *target,
637                       const struct user_regset *regset)
638 {
639         flush_vsx_to_thread(target);
640         return target->thread.used_vsr ? regset->n : 0;
641 }
642
643 /*
644  * Regardless of transactions, 'fp_state' holds the current running
645  * value of all FPR registers and 'ckfp_state' holds the last
646  * checkpointed value of all FPR registers for the current
647  * transaction.
648  *
649  * Userspace interface buffer layout:
650  *
651  * struct data {
652  *      u64     vsx[32];
653  * };
654  */
655 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
656                    unsigned int pos, unsigned int count,
657                    void *kbuf, void __user *ubuf)
658 {
659         u64 buf[32];
660         int ret, i;
661
662         flush_tmregs_to_thread(target);
663         flush_fp_to_thread(target);
664         flush_altivec_to_thread(target);
665         flush_vsx_to_thread(target);
666
667         for (i = 0; i < 32 ; i++)
668                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
669
670         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
671                                   buf, 0, 32 * sizeof(double));
672
673         return ret;
674 }
675
676 /*
677  * Regardless of transactions, 'fp_state' holds the current running
678  * value of all FPR registers and 'ckfp_state' holds the last
679  * checkpointed value of all FPR registers for the current
680  * transaction.
681  *
682  * Userspace interface buffer layout:
683  *
684  * struct data {
685  *      u64     vsx[32];
686  * };
687  */
688 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
689                    unsigned int pos, unsigned int count,
690                    const void *kbuf, const void __user *ubuf)
691 {
692         u64 buf[32];
693         int ret,i;
694
695         flush_tmregs_to_thread(target);
696         flush_fp_to_thread(target);
697         flush_altivec_to_thread(target);
698         flush_vsx_to_thread(target);
699
700         for (i = 0; i < 32 ; i++)
701                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
702
703         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
704                                  buf, 0, 32 * sizeof(double));
705         if (!ret)
706                 for (i = 0; i < 32 ; i++)
707                         target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
708
709         return ret;
710 }
711 #endif /* CONFIG_VSX */
712
713 #ifdef CONFIG_SPE
714
715 /*
716  * For get_evrregs/set_evrregs functions 'data' has the following layout:
717  *
718  * struct {
719  *   u32 evr[32];
720  *   u64 acc;
721  *   u32 spefscr;
722  * }
723  */
724
725 static int evr_active(struct task_struct *target,
726                       const struct user_regset *regset)
727 {
728         flush_spe_to_thread(target);
729         return target->thread.used_spe ? regset->n : 0;
730 }
731
732 static int evr_get(struct task_struct *target, const struct user_regset *regset,
733                    unsigned int pos, unsigned int count,
734                    void *kbuf, void __user *ubuf)
735 {
736         int ret;
737
738         flush_spe_to_thread(target);
739
740         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
741                                   &target->thread.evr,
742                                   0, sizeof(target->thread.evr));
743
744         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
745                      offsetof(struct thread_struct, spefscr));
746
747         if (!ret)
748                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
749                                           &target->thread.acc,
750                                           sizeof(target->thread.evr), -1);
751
752         return ret;
753 }
754
755 static int evr_set(struct task_struct *target, const struct user_regset *regset,
756                    unsigned int pos, unsigned int count,
757                    const void *kbuf, const void __user *ubuf)
758 {
759         int ret;
760
761         flush_spe_to_thread(target);
762
763         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
764                                  &target->thread.evr,
765                                  0, sizeof(target->thread.evr));
766
767         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
768                      offsetof(struct thread_struct, spefscr));
769
770         if (!ret)
771                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
772                                          &target->thread.acc,
773                                          sizeof(target->thread.evr), -1);
774
775         return ret;
776 }
777 #endif /* CONFIG_SPE */
778
779 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
780 /**
781  * tm_cgpr_active - get active number of registers in CGPR
782  * @target:     The target task.
783  * @regset:     The user regset structure.
784  *
785  * This function checks for the active number of available
786  * regisers in transaction checkpointed GPR category.
787  */
788 static int tm_cgpr_active(struct task_struct *target,
789                           const struct user_regset *regset)
790 {
791         if (!cpu_has_feature(CPU_FTR_TM))
792                 return -ENODEV;
793
794         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
795                 return 0;
796
797         return regset->n;
798 }
799
800 /**
801  * tm_cgpr_get - get CGPR registers
802  * @target:     The target task.
803  * @regset:     The user regset structure.
804  * @pos:        The buffer position.
805  * @count:      Number of bytes to copy.
806  * @kbuf:       Kernel buffer to copy from.
807  * @ubuf:       User buffer to copy into.
808  *
809  * This function gets transaction checkpointed GPR registers.
810  *
811  * When the transaction is active, 'ckpt_regs' holds all the checkpointed
812  * GPR register values for the current transaction to fall back on if it
813  * aborts in between. This function gets those checkpointed GPR registers.
814  * The userspace interface buffer layout is as follows.
815  *
816  * struct data {
817  *      struct pt_regs ckpt_regs;
818  * };
819  */
820 static int tm_cgpr_get(struct task_struct *target,
821                         const struct user_regset *regset,
822                         unsigned int pos, unsigned int count,
823                         void *kbuf, void __user *ubuf)
824 {
825         int ret;
826
827         if (!cpu_has_feature(CPU_FTR_TM))
828                 return -ENODEV;
829
830         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
831                 return -ENODATA;
832
833         flush_tmregs_to_thread(target);
834         flush_fp_to_thread(target);
835         flush_altivec_to_thread(target);
836
837         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
838                                   &target->thread.ckpt_regs,
839                                   0, offsetof(struct pt_regs, msr));
840         if (!ret) {
841                 unsigned long msr = get_user_ckpt_msr(target);
842
843                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
844                                           offsetof(struct pt_regs, msr),
845                                           offsetof(struct pt_regs, msr) +
846                                           sizeof(msr));
847         }
848
849         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
850                      offsetof(struct pt_regs, msr) + sizeof(long));
851
852         if (!ret)
853                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
854                                           &target->thread.ckpt_regs.orig_gpr3,
855                                           offsetof(struct pt_regs, orig_gpr3),
856                                           sizeof(struct pt_regs));
857         if (!ret)
858                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
859                                                sizeof(struct pt_regs), -1);
860
861         return ret;
862 }
863
864 /*
865  * tm_cgpr_set - set the CGPR registers
866  * @target:     The target task.
867  * @regset:     The user regset structure.
868  * @pos:        The buffer position.
869  * @count:      Number of bytes to copy.
870  * @kbuf:       Kernel buffer to copy into.
871  * @ubuf:       User buffer to copy from.
872  *
873  * This function sets in transaction checkpointed GPR registers.
874  *
875  * When the transaction is active, 'ckpt_regs' holds the checkpointed
876  * GPR register values for the current transaction to fall back on if it
877  * aborts in between. This function sets those checkpointed GPR registers.
878  * The userspace interface buffer layout is as follows.
879  *
880  * struct data {
881  *      struct pt_regs ckpt_regs;
882  * };
883  */
884 static int tm_cgpr_set(struct task_struct *target,
885                         const struct user_regset *regset,
886                         unsigned int pos, unsigned int count,
887                         const void *kbuf, const void __user *ubuf)
888 {
889         unsigned long reg;
890         int ret;
891
892         if (!cpu_has_feature(CPU_FTR_TM))
893                 return -ENODEV;
894
895         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
896                 return -ENODATA;
897
898         flush_tmregs_to_thread(target);
899         flush_fp_to_thread(target);
900         flush_altivec_to_thread(target);
901
902         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
903                                  &target->thread.ckpt_regs,
904                                  0, PT_MSR * sizeof(reg));
905
906         if (!ret && count > 0) {
907                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
908                                          PT_MSR * sizeof(reg),
909                                          (PT_MSR + 1) * sizeof(reg));
910                 if (!ret)
911                         ret = set_user_ckpt_msr(target, reg);
912         }
913
914         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
915                      offsetof(struct pt_regs, msr) + sizeof(long));
916
917         if (!ret)
918                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
919                                          &target->thread.ckpt_regs.orig_gpr3,
920                                          PT_ORIG_R3 * sizeof(reg),
921                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
922
923         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
924                 ret = user_regset_copyin_ignore(
925                         &pos, &count, &kbuf, &ubuf,
926                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
927                         PT_TRAP * sizeof(reg));
928
929         if (!ret && count > 0) {
930                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
931                                          PT_TRAP * sizeof(reg),
932                                          (PT_TRAP + 1) * sizeof(reg));
933                 if (!ret)
934                         ret = set_user_ckpt_trap(target, reg);
935         }
936
937         if (!ret)
938                 ret = user_regset_copyin_ignore(
939                         &pos, &count, &kbuf, &ubuf,
940                         (PT_TRAP + 1) * sizeof(reg), -1);
941
942         return ret;
943 }
944
945 /**
946  * tm_cfpr_active - get active number of registers in CFPR
947  * @target:     The target task.
948  * @regset:     The user regset structure.
949  *
950  * This function checks for the active number of available
951  * regisers in transaction checkpointed FPR category.
952  */
953 static int tm_cfpr_active(struct task_struct *target,
954                                 const struct user_regset *regset)
955 {
956         if (!cpu_has_feature(CPU_FTR_TM))
957                 return -ENODEV;
958
959         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
960                 return 0;
961
962         return regset->n;
963 }
964
965 /**
966  * tm_cfpr_get - get CFPR registers
967  * @target:     The target task.
968  * @regset:     The user regset structure.
969  * @pos:        The buffer position.
970  * @count:      Number of bytes to copy.
971  * @kbuf:       Kernel buffer to copy from.
972  * @ubuf:       User buffer to copy into.
973  *
974  * This function gets in transaction checkpointed FPR registers.
975  *
976  * When the transaction is active 'ckfp_state' holds the checkpointed
977  * values for the current transaction to fall back on if it aborts
978  * in between. This function gets those checkpointed FPR registers.
979  * The userspace interface buffer layout is as follows.
980  *
981  * struct data {
982  *      u64     fpr[32];
983  *      u64     fpscr;
984  *};
985  */
986 static int tm_cfpr_get(struct task_struct *target,
987                         const struct user_regset *regset,
988                         unsigned int pos, unsigned int count,
989                         void *kbuf, void __user *ubuf)
990 {
991         u64 buf[33];
992         int i;
993
994         if (!cpu_has_feature(CPU_FTR_TM))
995                 return -ENODEV;
996
997         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
998                 return -ENODATA;
999
1000         flush_tmregs_to_thread(target);
1001         flush_fp_to_thread(target);
1002         flush_altivec_to_thread(target);
1003
1004         /* copy to local buffer then write that out */
1005         for (i = 0; i < 32 ; i++)
1006                 buf[i] = target->thread.TS_CKFPR(i);
1007         buf[32] = target->thread.ckfp_state.fpscr;
1008         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1009 }
1010
1011 /**
1012  * tm_cfpr_set - set CFPR registers
1013  * @target:     The target task.
1014  * @regset:     The user regset structure.
1015  * @pos:        The buffer position.
1016  * @count:      Number of bytes to copy.
1017  * @kbuf:       Kernel buffer to copy into.
1018  * @ubuf:       User buffer to copy from.
1019  *
1020  * This function sets in transaction checkpointed FPR registers.
1021  *
1022  * When the transaction is active 'ckfp_state' holds the checkpointed
1023  * FPR register values for the current transaction to fall back on
1024  * if it aborts in between. This function sets these checkpointed
1025  * FPR registers. The userspace interface buffer layout is as follows.
1026  *
1027  * struct data {
1028  *      u64     fpr[32];
1029  *      u64     fpscr;
1030  *};
1031  */
1032 static int tm_cfpr_set(struct task_struct *target,
1033                         const struct user_regset *regset,
1034                         unsigned int pos, unsigned int count,
1035                         const void *kbuf, const void __user *ubuf)
1036 {
1037         u64 buf[33];
1038         int i;
1039
1040         if (!cpu_has_feature(CPU_FTR_TM))
1041                 return -ENODEV;
1042
1043         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1044                 return -ENODATA;
1045
1046         flush_tmregs_to_thread(target);
1047         flush_fp_to_thread(target);
1048         flush_altivec_to_thread(target);
1049
1050         for (i = 0; i < 32; i++)
1051                 buf[i] = target->thread.TS_CKFPR(i);
1052         buf[32] = target->thread.ckfp_state.fpscr;
1053
1054         /* copy to local buffer then write that out */
1055         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1056         if (i)
1057                 return i;
1058         for (i = 0; i < 32 ; i++)
1059                 target->thread.TS_CKFPR(i) = buf[i];
1060         target->thread.ckfp_state.fpscr = buf[32];
1061         return 0;
1062 }
1063
1064 /**
1065  * tm_cvmx_active - get active number of registers in CVMX
1066  * @target:     The target task.
1067  * @regset:     The user regset structure.
1068  *
1069  * This function checks for the active number of available
1070  * regisers in checkpointed VMX category.
1071  */
1072 static int tm_cvmx_active(struct task_struct *target,
1073                                 const struct user_regset *regset)
1074 {
1075         if (!cpu_has_feature(CPU_FTR_TM))
1076                 return -ENODEV;
1077
1078         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1079                 return 0;
1080
1081         return regset->n;
1082 }
1083
1084 /**
1085  * tm_cvmx_get - get CMVX registers
1086  * @target:     The target task.
1087  * @regset:     The user regset structure.
1088  * @pos:        The buffer position.
1089  * @count:      Number of bytes to copy.
1090  * @kbuf:       Kernel buffer to copy from.
1091  * @ubuf:       User buffer to copy into.
1092  *
1093  * This function gets in transaction checkpointed VMX registers.
1094  *
1095  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1096  * the checkpointed values for the current transaction to fall
1097  * back on if it aborts in between. The userspace interface buffer
1098  * layout is as follows.
1099  *
1100  * struct data {
1101  *      vector128       vr[32];
1102  *      vector128       vscr;
1103  *      vector128       vrsave;
1104  *};
1105  */
1106 static int tm_cvmx_get(struct task_struct *target,
1107                         const struct user_regset *regset,
1108                         unsigned int pos, unsigned int count,
1109                         void *kbuf, void __user *ubuf)
1110 {
1111         int ret;
1112
1113         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1114
1115         if (!cpu_has_feature(CPU_FTR_TM))
1116                 return -ENODEV;
1117
1118         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1119                 return -ENODATA;
1120
1121         /* Flush the state */
1122         flush_tmregs_to_thread(target);
1123         flush_fp_to_thread(target);
1124         flush_altivec_to_thread(target);
1125
1126         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1127                                         &target->thread.ckvr_state, 0,
1128                                         33 * sizeof(vector128));
1129         if (!ret) {
1130                 /*
1131                  * Copy out only the low-order word of vrsave.
1132                  */
1133                 union {
1134                         elf_vrreg_t reg;
1135                         u32 word;
1136                 } vrsave;
1137                 memset(&vrsave, 0, sizeof(vrsave));
1138                 vrsave.word = target->thread.ckvrsave;
1139                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1140                                                 33 * sizeof(vector128), -1);
1141         }
1142
1143         return ret;
1144 }
1145
1146 /**
1147  * tm_cvmx_set - set CMVX registers
1148  * @target:     The target task.
1149  * @regset:     The user regset structure.
1150  * @pos:        The buffer position.
1151  * @count:      Number of bytes to copy.
1152  * @kbuf:       Kernel buffer to copy into.
1153  * @ubuf:       User buffer to copy from.
1154  *
1155  * This function sets in transaction checkpointed VMX registers.
1156  *
1157  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1158  * the checkpointed values for the current transaction to fall
1159  * back on if it aborts in between. The userspace interface buffer
1160  * layout is as follows.
1161  *
1162  * struct data {
1163  *      vector128       vr[32];
1164  *      vector128       vscr;
1165  *      vector128       vrsave;
1166  *};
1167  */
1168 static int tm_cvmx_set(struct task_struct *target,
1169                         const struct user_regset *regset,
1170                         unsigned int pos, unsigned int count,
1171                         const void *kbuf, const void __user *ubuf)
1172 {
1173         int ret;
1174
1175         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1176
1177         if (!cpu_has_feature(CPU_FTR_TM))
1178                 return -ENODEV;
1179
1180         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1181                 return -ENODATA;
1182
1183         flush_tmregs_to_thread(target);
1184         flush_fp_to_thread(target);
1185         flush_altivec_to_thread(target);
1186
1187         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1188                                         &target->thread.ckvr_state, 0,
1189                                         33 * sizeof(vector128));
1190         if (!ret && count > 0) {
1191                 /*
1192                  * We use only the low-order word of vrsave.
1193                  */
1194                 union {
1195                         elf_vrreg_t reg;
1196                         u32 word;
1197                 } vrsave;
1198                 memset(&vrsave, 0, sizeof(vrsave));
1199                 vrsave.word = target->thread.ckvrsave;
1200                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1201                                                 33 * sizeof(vector128), -1);
1202                 if (!ret)
1203                         target->thread.ckvrsave = vrsave.word;
1204         }
1205
1206         return ret;
1207 }
1208
1209 /**
1210  * tm_cvsx_active - get active number of registers in CVSX
1211  * @target:     The target task.
1212  * @regset:     The user regset structure.
1213  *
1214  * This function checks for the active number of available
1215  * regisers in transaction checkpointed VSX category.
1216  */
1217 static int tm_cvsx_active(struct task_struct *target,
1218                                 const struct user_regset *regset)
1219 {
1220         if (!cpu_has_feature(CPU_FTR_TM))
1221                 return -ENODEV;
1222
1223         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1224                 return 0;
1225
1226         flush_vsx_to_thread(target);
1227         return target->thread.used_vsr ? regset->n : 0;
1228 }
1229
1230 /**
1231  * tm_cvsx_get - get CVSX registers
1232  * @target:     The target task.
1233  * @regset:     The user regset structure.
1234  * @pos:        The buffer position.
1235  * @count:      Number of bytes to copy.
1236  * @kbuf:       Kernel buffer to copy from.
1237  * @ubuf:       User buffer to copy into.
1238  *
1239  * This function gets in transaction checkpointed VSX registers.
1240  *
1241  * When the transaction is active 'ckfp_state' holds the checkpointed
1242  * values for the current transaction to fall back on if it aborts
1243  * in between. This function gets those checkpointed VSX registers.
1244  * The userspace interface buffer layout is as follows.
1245  *
1246  * struct data {
1247  *      u64     vsx[32];
1248  *};
1249  */
1250 static int tm_cvsx_get(struct task_struct *target,
1251                         const struct user_regset *regset,
1252                         unsigned int pos, unsigned int count,
1253                         void *kbuf, void __user *ubuf)
1254 {
1255         u64 buf[32];
1256         int ret, i;
1257
1258         if (!cpu_has_feature(CPU_FTR_TM))
1259                 return -ENODEV;
1260
1261         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1262                 return -ENODATA;
1263
1264         /* Flush the state */
1265         flush_tmregs_to_thread(target);
1266         flush_fp_to_thread(target);
1267         flush_altivec_to_thread(target);
1268         flush_vsx_to_thread(target);
1269
1270         for (i = 0; i < 32 ; i++)
1271                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1272         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1273                                   buf, 0, 32 * sizeof(double));
1274
1275         return ret;
1276 }
1277
1278 /**
1279  * tm_cvsx_set - set CFPR registers
1280  * @target:     The target task.
1281  * @regset:     The user regset structure.
1282  * @pos:        The buffer position.
1283  * @count:      Number of bytes to copy.
1284  * @kbuf:       Kernel buffer to copy into.
1285  * @ubuf:       User buffer to copy from.
1286  *
1287  * This function sets in transaction checkpointed VSX registers.
1288  *
1289  * When the transaction is active 'ckfp_state' holds the checkpointed
1290  * VSX register values for the current transaction to fall back on
1291  * if it aborts in between. This function sets these checkpointed
1292  * FPR registers. The userspace interface buffer layout is as follows.
1293  *
1294  * struct data {
1295  *      u64     vsx[32];
1296  *};
1297  */
1298 static int tm_cvsx_set(struct task_struct *target,
1299                         const struct user_regset *regset,
1300                         unsigned int pos, unsigned int count,
1301                         const void *kbuf, const void __user *ubuf)
1302 {
1303         u64 buf[32];
1304         int ret, i;
1305
1306         if (!cpu_has_feature(CPU_FTR_TM))
1307                 return -ENODEV;
1308
1309         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1310                 return -ENODATA;
1311
1312         /* Flush the state */
1313         flush_tmregs_to_thread(target);
1314         flush_fp_to_thread(target);
1315         flush_altivec_to_thread(target);
1316         flush_vsx_to_thread(target);
1317
1318         for (i = 0; i < 32 ; i++)
1319                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1320
1321         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1322                                  buf, 0, 32 * sizeof(double));
1323         if (!ret)
1324                 for (i = 0; i < 32 ; i++)
1325                         target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1326
1327         return ret;
1328 }
1329
1330 /**
1331  * tm_spr_active - get active number of registers in TM SPR
1332  * @target:     The target task.
1333  * @regset:     The user regset structure.
1334  *
1335  * This function checks the active number of available
1336  * regisers in the transactional memory SPR category.
1337  */
1338 static int tm_spr_active(struct task_struct *target,
1339                          const struct user_regset *regset)
1340 {
1341         if (!cpu_has_feature(CPU_FTR_TM))
1342                 return -ENODEV;
1343
1344         return regset->n;
1345 }
1346
1347 /**
1348  * tm_spr_get - get the TM related SPR registers
1349  * @target:     The target task.
1350  * @regset:     The user regset structure.
1351  * @pos:        The buffer position.
1352  * @count:      Number of bytes to copy.
1353  * @kbuf:       Kernel buffer to copy from.
1354  * @ubuf:       User buffer to copy into.
1355  *
1356  * This function gets transactional memory related SPR registers.
1357  * The userspace interface buffer layout is as follows.
1358  *
1359  * struct {
1360  *      u64             tm_tfhar;
1361  *      u64             tm_texasr;
1362  *      u64             tm_tfiar;
1363  * };
1364  */
1365 static int tm_spr_get(struct task_struct *target,
1366                       const struct user_regset *regset,
1367                       unsigned int pos, unsigned int count,
1368                       void *kbuf, void __user *ubuf)
1369 {
1370         int ret;
1371
1372         /* Build tests */
1373         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1374         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1375         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1376
1377         if (!cpu_has_feature(CPU_FTR_TM))
1378                 return -ENODEV;
1379
1380         /* Flush the states */
1381         flush_tmregs_to_thread(target);
1382         flush_fp_to_thread(target);
1383         flush_altivec_to_thread(target);
1384
1385         /* TFHAR register */
1386         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1387                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1388
1389         /* TEXASR register */
1390         if (!ret)
1391                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1392                                 &target->thread.tm_texasr, sizeof(u64),
1393                                 2 * sizeof(u64));
1394
1395         /* TFIAR register */
1396         if (!ret)
1397                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1398                                 &target->thread.tm_tfiar,
1399                                 2 * sizeof(u64), 3 * sizeof(u64));
1400         return ret;
1401 }
1402
1403 /**
1404  * tm_spr_set - set the TM related SPR registers
1405  * @target:     The target task.
1406  * @regset:     The user regset structure.
1407  * @pos:        The buffer position.
1408  * @count:      Number of bytes to copy.
1409  * @kbuf:       Kernel buffer to copy into.
1410  * @ubuf:       User buffer to copy from.
1411  *
1412  * This function sets transactional memory related SPR registers.
1413  * The userspace interface buffer layout is as follows.
1414  *
1415  * struct {
1416  *      u64             tm_tfhar;
1417  *      u64             tm_texasr;
1418  *      u64             tm_tfiar;
1419  * };
1420  */
1421 static int tm_spr_set(struct task_struct *target,
1422                       const struct user_regset *regset,
1423                       unsigned int pos, unsigned int count,
1424                       const void *kbuf, const void __user *ubuf)
1425 {
1426         int ret;
1427
1428         /* Build tests */
1429         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1430         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1431         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1432
1433         if (!cpu_has_feature(CPU_FTR_TM))
1434                 return -ENODEV;
1435
1436         /* Flush the states */
1437         flush_tmregs_to_thread(target);
1438         flush_fp_to_thread(target);
1439         flush_altivec_to_thread(target);
1440
1441         /* TFHAR register */
1442         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1443                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1444
1445         /* TEXASR register */
1446         if (!ret)
1447                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1448                                 &target->thread.tm_texasr, sizeof(u64),
1449                                 2 * sizeof(u64));
1450
1451         /* TFIAR register */
1452         if (!ret)
1453                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1454                                 &target->thread.tm_tfiar,
1455                                  2 * sizeof(u64), 3 * sizeof(u64));
1456         return ret;
1457 }
1458
1459 static int tm_tar_active(struct task_struct *target,
1460                          const struct user_regset *regset)
1461 {
1462         if (!cpu_has_feature(CPU_FTR_TM))
1463                 return -ENODEV;
1464
1465         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1466                 return regset->n;
1467
1468         return 0;
1469 }
1470
1471 static int tm_tar_get(struct task_struct *target,
1472                       const struct user_regset *regset,
1473                       unsigned int pos, unsigned int count,
1474                       void *kbuf, void __user *ubuf)
1475 {
1476         int ret;
1477
1478         if (!cpu_has_feature(CPU_FTR_TM))
1479                 return -ENODEV;
1480
1481         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1482                 return -ENODATA;
1483
1484         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1485                                 &target->thread.tm_tar, 0, sizeof(u64));
1486         return ret;
1487 }
1488
1489 static int tm_tar_set(struct task_struct *target,
1490                       const struct user_regset *regset,
1491                       unsigned int pos, unsigned int count,
1492                       const void *kbuf, const void __user *ubuf)
1493 {
1494         int ret;
1495
1496         if (!cpu_has_feature(CPU_FTR_TM))
1497                 return -ENODEV;
1498
1499         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1500                 return -ENODATA;
1501
1502         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1503                                 &target->thread.tm_tar, 0, sizeof(u64));
1504         return ret;
1505 }
1506
1507 static int tm_ppr_active(struct task_struct *target,
1508                          const struct user_regset *regset)
1509 {
1510         if (!cpu_has_feature(CPU_FTR_TM))
1511                 return -ENODEV;
1512
1513         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1514                 return regset->n;
1515
1516         return 0;
1517 }
1518
1519
1520 static int tm_ppr_get(struct task_struct *target,
1521                       const struct user_regset *regset,
1522                       unsigned int pos, unsigned int count,
1523                       void *kbuf, void __user *ubuf)
1524 {
1525         int ret;
1526
1527         if (!cpu_has_feature(CPU_FTR_TM))
1528                 return -ENODEV;
1529
1530         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1531                 return -ENODATA;
1532
1533         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1534                                 &target->thread.tm_ppr, 0, sizeof(u64));
1535         return ret;
1536 }
1537
1538 static int tm_ppr_set(struct task_struct *target,
1539                       const struct user_regset *regset,
1540                       unsigned int pos, unsigned int count,
1541                       const void *kbuf, const void __user *ubuf)
1542 {
1543         int ret;
1544
1545         if (!cpu_has_feature(CPU_FTR_TM))
1546                 return -ENODEV;
1547
1548         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1549                 return -ENODATA;
1550
1551         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1552                                 &target->thread.tm_ppr, 0, sizeof(u64));
1553         return ret;
1554 }
1555
1556 static int tm_dscr_active(struct task_struct *target,
1557                          const struct user_regset *regset)
1558 {
1559         if (!cpu_has_feature(CPU_FTR_TM))
1560                 return -ENODEV;
1561
1562         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1563                 return regset->n;
1564
1565         return 0;
1566 }
1567
1568 static int tm_dscr_get(struct task_struct *target,
1569                       const struct user_regset *regset,
1570                       unsigned int pos, unsigned int count,
1571                       void *kbuf, void __user *ubuf)
1572 {
1573         int ret;
1574
1575         if (!cpu_has_feature(CPU_FTR_TM))
1576                 return -ENODEV;
1577
1578         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1579                 return -ENODATA;
1580
1581         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1582                                 &target->thread.tm_dscr, 0, sizeof(u64));
1583         return ret;
1584 }
1585
1586 static int tm_dscr_set(struct task_struct *target,
1587                       const struct user_regset *regset,
1588                       unsigned int pos, unsigned int count,
1589                       const void *kbuf, const void __user *ubuf)
1590 {
1591         int ret;
1592
1593         if (!cpu_has_feature(CPU_FTR_TM))
1594                 return -ENODEV;
1595
1596         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1597                 return -ENODATA;
1598
1599         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1600                                 &target->thread.tm_dscr, 0, sizeof(u64));
1601         return ret;
1602 }
1603 #endif  /* CONFIG_PPC_TRANSACTIONAL_MEM */
1604
1605 #ifdef CONFIG_PPC64
1606 static int ppr_get(struct task_struct *target,
1607                       const struct user_regset *regset,
1608                       unsigned int pos, unsigned int count,
1609                       void *kbuf, void __user *ubuf)
1610 {
1611         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1612                                    &target->thread.ppr, 0, sizeof(u64));
1613 }
1614
1615 static int ppr_set(struct task_struct *target,
1616                       const struct user_regset *regset,
1617                       unsigned int pos, unsigned int count,
1618                       const void *kbuf, const void __user *ubuf)
1619 {
1620         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1621                                   &target->thread.ppr, 0, sizeof(u64));
1622 }
1623
1624 static int dscr_get(struct task_struct *target,
1625                       const struct user_regset *regset,
1626                       unsigned int pos, unsigned int count,
1627                       void *kbuf, void __user *ubuf)
1628 {
1629         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1630                                    &target->thread.dscr, 0, sizeof(u64));
1631 }
1632 static int dscr_set(struct task_struct *target,
1633                       const struct user_regset *regset,
1634                       unsigned int pos, unsigned int count,
1635                       const void *kbuf, const void __user *ubuf)
1636 {
1637         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1638                                   &target->thread.dscr, 0, sizeof(u64));
1639 }
1640 #endif
1641 #ifdef CONFIG_PPC_BOOK3S_64
1642 static int tar_get(struct task_struct *target,
1643                       const struct user_regset *regset,
1644                       unsigned int pos, unsigned int count,
1645                       void *kbuf, void __user *ubuf)
1646 {
1647         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1648                                    &target->thread.tar, 0, sizeof(u64));
1649 }
1650 static int tar_set(struct task_struct *target,
1651                       const struct user_regset *regset,
1652                       unsigned int pos, unsigned int count,
1653                       const void *kbuf, const void __user *ubuf)
1654 {
1655         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1656                                   &target->thread.tar, 0, sizeof(u64));
1657 }
1658
1659 static int ebb_active(struct task_struct *target,
1660                          const struct user_regset *regset)
1661 {
1662         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1663                 return -ENODEV;
1664
1665         if (target->thread.used_ebb)
1666                 return regset->n;
1667
1668         return 0;
1669 }
1670
1671 static int ebb_get(struct task_struct *target,
1672                       const struct user_regset *regset,
1673                       unsigned int pos, unsigned int count,
1674                       void *kbuf, void __user *ubuf)
1675 {
1676         /* Build tests */
1677         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1678         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1679
1680         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1681                 return -ENODEV;
1682
1683         if (!target->thread.used_ebb)
1684                 return -ENODATA;
1685
1686         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1687                         &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1688 }
1689
1690 static int ebb_set(struct task_struct *target,
1691                       const struct user_regset *regset,
1692                       unsigned int pos, unsigned int count,
1693                       const void *kbuf, const void __user *ubuf)
1694 {
1695         int ret = 0;
1696
1697         /* Build tests */
1698         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1699         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1700
1701         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1702                 return -ENODEV;
1703
1704         if (target->thread.used_ebb)
1705                 return -ENODATA;
1706
1707         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1708                         &target->thread.ebbrr, 0, sizeof(unsigned long));
1709
1710         if (!ret)
1711                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1712                         &target->thread.ebbhr, sizeof(unsigned long),
1713                         2 * sizeof(unsigned long));
1714
1715         if (!ret)
1716                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1717                         &target->thread.bescr,
1718                         2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1719
1720         return ret;
1721 }
1722 static int pmu_active(struct task_struct *target,
1723                          const struct user_regset *regset)
1724 {
1725         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1726                 return -ENODEV;
1727
1728         return regset->n;
1729 }
1730
1731 static int pmu_get(struct task_struct *target,
1732                       const struct user_regset *regset,
1733                       unsigned int pos, unsigned int count,
1734                       void *kbuf, void __user *ubuf)
1735 {
1736         /* Build tests */
1737         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1738         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1739         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1740         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1741
1742         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1743                 return -ENODEV;
1744
1745         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1746                         &target->thread.siar, 0,
1747                         5 * sizeof(unsigned long));
1748 }
1749
1750 static int pmu_set(struct task_struct *target,
1751                       const struct user_regset *regset,
1752                       unsigned int pos, unsigned int count,
1753                       const void *kbuf, const void __user *ubuf)
1754 {
1755         int ret = 0;
1756
1757         /* Build tests */
1758         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1759         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1760         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1761         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1762
1763         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1764                 return -ENODEV;
1765
1766         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1767                         &target->thread.siar, 0,
1768                         sizeof(unsigned long));
1769
1770         if (!ret)
1771                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1772                         &target->thread.sdar, sizeof(unsigned long),
1773                         2 * sizeof(unsigned long));
1774
1775         if (!ret)
1776                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1777                         &target->thread.sier, 2 * sizeof(unsigned long),
1778                         3 * sizeof(unsigned long));
1779
1780         if (!ret)
1781                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1782                         &target->thread.mmcr2, 3 * sizeof(unsigned long),
1783                         4 * sizeof(unsigned long));
1784
1785         if (!ret)
1786                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1787                         &target->thread.mmcr0, 4 * sizeof(unsigned long),
1788                         5 * sizeof(unsigned long));
1789         return ret;
1790 }
1791 #endif
1792
1793 #ifdef CONFIG_PPC_MEM_KEYS
1794 static int pkey_active(struct task_struct *target,
1795                        const struct user_regset *regset)
1796 {
1797         if (!arch_pkeys_enabled())
1798                 return -ENODEV;
1799
1800         return regset->n;
1801 }
1802
1803 static int pkey_get(struct task_struct *target,
1804                     const struct user_regset *regset,
1805                     unsigned int pos, unsigned int count,
1806                     void *kbuf, void __user *ubuf)
1807 {
1808         BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1809         BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1810
1811         if (!arch_pkeys_enabled())
1812                 return -ENODEV;
1813
1814         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1815                                    &target->thread.amr, 0,
1816                                    ELF_NPKEY * sizeof(unsigned long));
1817 }
1818
1819 static int pkey_set(struct task_struct *target,
1820                       const struct user_regset *regset,
1821                       unsigned int pos, unsigned int count,
1822                       const void *kbuf, const void __user *ubuf)
1823 {
1824         u64 new_amr;
1825         int ret;
1826
1827         if (!arch_pkeys_enabled())
1828                 return -ENODEV;
1829
1830         /* Only the AMR can be set from userspace */
1831         if (pos != 0 || count != sizeof(new_amr))
1832                 return -EINVAL;
1833
1834         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1835                                  &new_amr, 0, sizeof(new_amr));
1836         if (ret)
1837                 return ret;
1838
1839         /* UAMOR determines which bits of the AMR can be set from userspace. */
1840         target->thread.amr = (new_amr & target->thread.uamor) |
1841                 (target->thread.amr & ~target->thread.uamor);
1842
1843         return 0;
1844 }
1845 #endif /* CONFIG_PPC_MEM_KEYS */
1846
1847 /*
1848  * These are our native regset flavors.
1849  */
1850 enum powerpc_regset {
1851         REGSET_GPR,
1852         REGSET_FPR,
1853 #ifdef CONFIG_ALTIVEC
1854         REGSET_VMX,
1855 #endif
1856 #ifdef CONFIG_VSX
1857         REGSET_VSX,
1858 #endif
1859 #ifdef CONFIG_SPE
1860         REGSET_SPE,
1861 #endif
1862 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1863         REGSET_TM_CGPR,         /* TM checkpointed GPR registers */
1864         REGSET_TM_CFPR,         /* TM checkpointed FPR registers */
1865         REGSET_TM_CVMX,         /* TM checkpointed VMX registers */
1866         REGSET_TM_CVSX,         /* TM checkpointed VSX registers */
1867         REGSET_TM_SPR,          /* TM specific SPR registers */
1868         REGSET_TM_CTAR,         /* TM checkpointed TAR register */
1869         REGSET_TM_CPPR,         /* TM checkpointed PPR register */
1870         REGSET_TM_CDSCR,        /* TM checkpointed DSCR register */
1871 #endif
1872 #ifdef CONFIG_PPC64
1873         REGSET_PPR,             /* PPR register */
1874         REGSET_DSCR,            /* DSCR register */
1875 #endif
1876 #ifdef CONFIG_PPC_BOOK3S_64
1877         REGSET_TAR,             /* TAR register */
1878         REGSET_EBB,             /* EBB registers */
1879         REGSET_PMR,             /* Performance Monitor Registers */
1880 #endif
1881 #ifdef CONFIG_PPC_MEM_KEYS
1882         REGSET_PKEY,            /* AMR register */
1883 #endif
1884 };
1885
1886 static const struct user_regset native_regsets[] = {
1887         [REGSET_GPR] = {
1888                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1889                 .size = sizeof(long), .align = sizeof(long),
1890                 .get = gpr_get, .set = gpr_set
1891         },
1892         [REGSET_FPR] = {
1893                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1894                 .size = sizeof(double), .align = sizeof(double),
1895                 .get = fpr_get, .set = fpr_set
1896         },
1897 #ifdef CONFIG_ALTIVEC
1898         [REGSET_VMX] = {
1899                 .core_note_type = NT_PPC_VMX, .n = 34,
1900                 .size = sizeof(vector128), .align = sizeof(vector128),
1901                 .active = vr_active, .get = vr_get, .set = vr_set
1902         },
1903 #endif
1904 #ifdef CONFIG_VSX
1905         [REGSET_VSX] = {
1906                 .core_note_type = NT_PPC_VSX, .n = 32,
1907                 .size = sizeof(double), .align = sizeof(double),
1908                 .active = vsr_active, .get = vsr_get, .set = vsr_set
1909         },
1910 #endif
1911 #ifdef CONFIG_SPE
1912         [REGSET_SPE] = {
1913                 .core_note_type = NT_PPC_SPE, .n = 35,
1914                 .size = sizeof(u32), .align = sizeof(u32),
1915                 .active = evr_active, .get = evr_get, .set = evr_set
1916         },
1917 #endif
1918 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1919         [REGSET_TM_CGPR] = {
1920                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1921                 .size = sizeof(long), .align = sizeof(long),
1922                 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1923         },
1924         [REGSET_TM_CFPR] = {
1925                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1926                 .size = sizeof(double), .align = sizeof(double),
1927                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1928         },
1929         [REGSET_TM_CVMX] = {
1930                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1931                 .size = sizeof(vector128), .align = sizeof(vector128),
1932                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1933         },
1934         [REGSET_TM_CVSX] = {
1935                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1936                 .size = sizeof(double), .align = sizeof(double),
1937                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1938         },
1939         [REGSET_TM_SPR] = {
1940                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1941                 .size = sizeof(u64), .align = sizeof(u64),
1942                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1943         },
1944         [REGSET_TM_CTAR] = {
1945                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1946                 .size = sizeof(u64), .align = sizeof(u64),
1947                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1948         },
1949         [REGSET_TM_CPPR] = {
1950                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1951                 .size = sizeof(u64), .align = sizeof(u64),
1952                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1953         },
1954         [REGSET_TM_CDSCR] = {
1955                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1956                 .size = sizeof(u64), .align = sizeof(u64),
1957                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1958         },
1959 #endif
1960 #ifdef CONFIG_PPC64
1961         [REGSET_PPR] = {
1962                 .core_note_type = NT_PPC_PPR, .n = 1,
1963                 .size = sizeof(u64), .align = sizeof(u64),
1964                 .get = ppr_get, .set = ppr_set
1965         },
1966         [REGSET_DSCR] = {
1967                 .core_note_type = NT_PPC_DSCR, .n = 1,
1968                 .size = sizeof(u64), .align = sizeof(u64),
1969                 .get = dscr_get, .set = dscr_set
1970         },
1971 #endif
1972 #ifdef CONFIG_PPC_BOOK3S_64
1973         [REGSET_TAR] = {
1974                 .core_note_type = NT_PPC_TAR, .n = 1,
1975                 .size = sizeof(u64), .align = sizeof(u64),
1976                 .get = tar_get, .set = tar_set
1977         },
1978         [REGSET_EBB] = {
1979                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1980                 .size = sizeof(u64), .align = sizeof(u64),
1981                 .active = ebb_active, .get = ebb_get, .set = ebb_set
1982         },
1983         [REGSET_PMR] = {
1984                 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1985                 .size = sizeof(u64), .align = sizeof(u64),
1986                 .active = pmu_active, .get = pmu_get, .set = pmu_set
1987         },
1988 #endif
1989 #ifdef CONFIG_PPC_MEM_KEYS
1990         [REGSET_PKEY] = {
1991                 .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
1992                 .size = sizeof(u64), .align = sizeof(u64),
1993                 .active = pkey_active, .get = pkey_get, .set = pkey_set
1994         },
1995 #endif
1996 };
1997
1998 static const struct user_regset_view user_ppc_native_view = {
1999         .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2000         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2001 };
2002
2003 #ifdef CONFIG_PPC64
2004 #include <linux/compat.h>
2005
2006 static int gpr32_get_common(struct task_struct *target,
2007                      const struct user_regset *regset,
2008                      unsigned int pos, unsigned int count,
2009                             void *kbuf, void __user *ubuf,
2010                             unsigned long *regs)
2011 {
2012         compat_ulong_t *k = kbuf;
2013         compat_ulong_t __user *u = ubuf;
2014         compat_ulong_t reg;
2015
2016         pos /= sizeof(reg);
2017         count /= sizeof(reg);
2018
2019         if (kbuf)
2020                 for (; count > 0 && pos < PT_MSR; --count)
2021                         *k++ = regs[pos++];
2022         else
2023                 for (; count > 0 && pos < PT_MSR; --count)
2024                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2025                                 return -EFAULT;
2026
2027         if (count > 0 && pos == PT_MSR) {
2028                 reg = get_user_msr(target);
2029                 if (kbuf)
2030                         *k++ = reg;
2031                 else if (__put_user(reg, u++))
2032                         return -EFAULT;
2033                 ++pos;
2034                 --count;
2035         }
2036
2037         if (kbuf)
2038                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2039                         *k++ = regs[pos++];
2040         else
2041                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2042                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2043                                 return -EFAULT;
2044
2045         kbuf = k;
2046         ubuf = u;
2047         pos *= sizeof(reg);
2048         count *= sizeof(reg);
2049         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2050                                         PT_REGS_COUNT * sizeof(reg), -1);
2051 }
2052
2053 static int gpr32_set_common(struct task_struct *target,
2054                      const struct user_regset *regset,
2055                      unsigned int pos, unsigned int count,
2056                      const void *kbuf, const void __user *ubuf,
2057                      unsigned long *regs)
2058 {
2059         const compat_ulong_t *k = kbuf;
2060         const compat_ulong_t __user *u = ubuf;
2061         compat_ulong_t reg;
2062
2063         pos /= sizeof(reg);
2064         count /= sizeof(reg);
2065
2066         if (kbuf)
2067                 for (; count > 0 && pos < PT_MSR; --count)
2068                         regs[pos++] = *k++;
2069         else
2070                 for (; count > 0 && pos < PT_MSR; --count) {
2071                         if (__get_user(reg, u++))
2072                                 return -EFAULT;
2073                         regs[pos++] = reg;
2074                 }
2075
2076
2077         if (count > 0 && pos == PT_MSR) {
2078                 if (kbuf)
2079                         reg = *k++;
2080                 else if (__get_user(reg, u++))
2081                         return -EFAULT;
2082                 set_user_msr(target, reg);
2083                 ++pos;
2084                 --count;
2085         }
2086
2087         if (kbuf) {
2088                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2089                         regs[pos++] = *k++;
2090                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2091                         ++k;
2092         } else {
2093                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2094                         if (__get_user(reg, u++))
2095                                 return -EFAULT;
2096                         regs[pos++] = reg;
2097                 }
2098                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2099                         if (__get_user(reg, u++))
2100                                 return -EFAULT;
2101         }
2102
2103         if (count > 0 && pos == PT_TRAP) {
2104                 if (kbuf)
2105                         reg = *k++;
2106                 else if (__get_user(reg, u++))
2107                         return -EFAULT;
2108                 set_user_trap(target, reg);
2109                 ++pos;
2110                 --count;
2111         }
2112
2113         kbuf = k;
2114         ubuf = u;
2115         pos *= sizeof(reg);
2116         count *= sizeof(reg);
2117         return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2118                                          (PT_TRAP + 1) * sizeof(reg), -1);
2119 }
2120
2121 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2122 static int tm_cgpr32_get(struct task_struct *target,
2123                      const struct user_regset *regset,
2124                      unsigned int pos, unsigned int count,
2125                      void *kbuf, void __user *ubuf)
2126 {
2127         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2128                         &target->thread.ckpt_regs.gpr[0]);
2129 }
2130
2131 static int tm_cgpr32_set(struct task_struct *target,
2132                      const struct user_regset *regset,
2133                      unsigned int pos, unsigned int count,
2134                      const void *kbuf, const void __user *ubuf)
2135 {
2136         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2137                         &target->thread.ckpt_regs.gpr[0]);
2138 }
2139 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2140
2141 static int gpr32_get(struct task_struct *target,
2142                      const struct user_regset *regset,
2143                      unsigned int pos, unsigned int count,
2144                      void *kbuf, void __user *ubuf)
2145 {
2146         int i;
2147
2148         if (target->thread.regs == NULL)
2149                 return -EIO;
2150
2151         if (!FULL_REGS(target->thread.regs)) {
2152                 /*
2153                  * We have a partial register set.
2154                  * Fill 14-31 with bogus values.
2155                  */
2156                 for (i = 14; i < 32; i++)
2157                         target->thread.regs->gpr[i] = NV_REG_POISON;
2158         }
2159         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2160                         &target->thread.regs->gpr[0]);
2161 }
2162
2163 static int gpr32_set(struct task_struct *target,
2164                      const struct user_regset *regset,
2165                      unsigned int pos, unsigned int count,
2166                      const void *kbuf, const void __user *ubuf)
2167 {
2168         if (target->thread.regs == NULL)
2169                 return -EIO;
2170
2171         CHECK_FULL_REGS(target->thread.regs);
2172         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2173                         &target->thread.regs->gpr[0]);
2174 }
2175
2176 /*
2177  * These are the regset flavors matching the CONFIG_PPC32 native set.
2178  */
2179 static const struct user_regset compat_regsets[] = {
2180         [REGSET_GPR] = {
2181                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2182                 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2183                 .get = gpr32_get, .set = gpr32_set
2184         },
2185         [REGSET_FPR] = {
2186                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2187                 .size = sizeof(double), .align = sizeof(double),
2188                 .get = fpr_get, .set = fpr_set
2189         },
2190 #ifdef CONFIG_ALTIVEC
2191         [REGSET_VMX] = {
2192                 .core_note_type = NT_PPC_VMX, .n = 34,
2193                 .size = sizeof(vector128), .align = sizeof(vector128),
2194                 .active = vr_active, .get = vr_get, .set = vr_set
2195         },
2196 #endif
2197 #ifdef CONFIG_SPE
2198         [REGSET_SPE] = {
2199                 .core_note_type = NT_PPC_SPE, .n = 35,
2200                 .size = sizeof(u32), .align = sizeof(u32),
2201                 .active = evr_active, .get = evr_get, .set = evr_set
2202         },
2203 #endif
2204 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2205         [REGSET_TM_CGPR] = {
2206                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2207                 .size = sizeof(long), .align = sizeof(long),
2208                 .active = tm_cgpr_active,
2209                 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2210         },
2211         [REGSET_TM_CFPR] = {
2212                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2213                 .size = sizeof(double), .align = sizeof(double),
2214                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2215         },
2216         [REGSET_TM_CVMX] = {
2217                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2218                 .size = sizeof(vector128), .align = sizeof(vector128),
2219                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2220         },
2221         [REGSET_TM_CVSX] = {
2222                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2223                 .size = sizeof(double), .align = sizeof(double),
2224                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2225         },
2226         [REGSET_TM_SPR] = {
2227                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2228                 .size = sizeof(u64), .align = sizeof(u64),
2229                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2230         },
2231         [REGSET_TM_CTAR] = {
2232                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2233                 .size = sizeof(u64), .align = sizeof(u64),
2234                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2235         },
2236         [REGSET_TM_CPPR] = {
2237                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2238                 .size = sizeof(u64), .align = sizeof(u64),
2239                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2240         },
2241         [REGSET_TM_CDSCR] = {
2242                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2243                 .size = sizeof(u64), .align = sizeof(u64),
2244                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2245         },
2246 #endif
2247 #ifdef CONFIG_PPC64
2248         [REGSET_PPR] = {
2249                 .core_note_type = NT_PPC_PPR, .n = 1,
2250                 .size = sizeof(u64), .align = sizeof(u64),
2251                 .get = ppr_get, .set = ppr_set
2252         },
2253         [REGSET_DSCR] = {
2254                 .core_note_type = NT_PPC_DSCR, .n = 1,
2255                 .size = sizeof(u64), .align = sizeof(u64),
2256                 .get = dscr_get, .set = dscr_set
2257         },
2258 #endif
2259 #ifdef CONFIG_PPC_BOOK3S_64
2260         [REGSET_TAR] = {
2261                 .core_note_type = NT_PPC_TAR, .n = 1,
2262                 .size = sizeof(u64), .align = sizeof(u64),
2263                 .get = tar_get, .set = tar_set
2264         },
2265         [REGSET_EBB] = {
2266                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2267                 .size = sizeof(u64), .align = sizeof(u64),
2268                 .active = ebb_active, .get = ebb_get, .set = ebb_set
2269         },
2270 #endif
2271 };
2272
2273 static const struct user_regset_view user_ppc_compat_view = {
2274         .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2275         .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2276 };
2277 #endif  /* CONFIG_PPC64 */
2278
2279 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2280 {
2281 #ifdef CONFIG_PPC64
2282         if (test_tsk_thread_flag(task, TIF_32BIT))
2283                 return &user_ppc_compat_view;
2284 #endif
2285         return &user_ppc_native_view;
2286 }
2287
2288
2289 void user_enable_single_step(struct task_struct *task)
2290 {
2291         struct pt_regs *regs = task->thread.regs;
2292
2293         if (regs != NULL) {
2294 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2295                 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2296                 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2297                 regs->msr |= MSR_DE;
2298 #else
2299                 regs->msr &= ~MSR_BE;
2300                 regs->msr |= MSR_SE;
2301 #endif
2302         }
2303         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2304 }
2305
2306 void user_enable_block_step(struct task_struct *task)
2307 {
2308         struct pt_regs *regs = task->thread.regs;
2309
2310         if (regs != NULL) {
2311 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2312                 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2313                 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2314                 regs->msr |= MSR_DE;
2315 #else
2316                 regs->msr &= ~MSR_SE;
2317                 regs->msr |= MSR_BE;
2318 #endif
2319         }
2320         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2321 }
2322
2323 void user_disable_single_step(struct task_struct *task)
2324 {
2325         struct pt_regs *regs = task->thread.regs;
2326
2327         if (regs != NULL) {
2328 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2329                 /*
2330                  * The logic to disable single stepping should be as
2331                  * simple as turning off the Instruction Complete flag.
2332                  * And, after doing so, if all debug flags are off, turn
2333                  * off DBCR0(IDM) and MSR(DE) .... Torez
2334                  */
2335                 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2336                 /*
2337                  * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2338                  */
2339                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2340                                         task->thread.debug.dbcr1)) {
2341                         /*
2342                          * All debug events were off.....
2343                          */
2344                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2345                         regs->msr &= ~MSR_DE;
2346                 }
2347 #else
2348                 regs->msr &= ~(MSR_SE | MSR_BE);
2349 #endif
2350         }
2351         clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2352 }
2353
2354 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2355 void ptrace_triggered(struct perf_event *bp,
2356                       struct perf_sample_data *data, struct pt_regs *regs)
2357 {
2358         struct perf_event_attr attr;
2359
2360         /*
2361          * Disable the breakpoint request here since ptrace has defined a
2362          * one-shot behaviour for breakpoint exceptions in PPC64.
2363          * The SIGTRAP signal is generated automatically for us in do_dabr().
2364          * We don't have to do anything about that here
2365          */
2366         attr = bp->attr;
2367         attr.disabled = true;
2368         modify_user_hw_breakpoint(bp, &attr);
2369 }
2370 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2371
2372 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2373                                unsigned long data)
2374 {
2375 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2376         int ret;
2377         struct thread_struct *thread = &(task->thread);
2378         struct perf_event *bp;
2379         struct perf_event_attr attr;
2380 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2381 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2382         bool set_bp = true;
2383         struct arch_hw_breakpoint hw_brk;
2384 #endif
2385
2386         /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2387          *  For embedded processors we support one DAC and no IAC's at the
2388          *  moment.
2389          */
2390         if (addr > 0)
2391                 return -EINVAL;
2392
2393         /* The bottom 3 bits in dabr are flags */
2394         if ((data & ~0x7UL) >= TASK_SIZE)
2395                 return -EIO;
2396
2397 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2398         /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2399          *  It was assumed, on previous implementations, that 3 bits were
2400          *  passed together with the data address, fitting the design of the
2401          *  DABR register, as follows:
2402          *
2403          *  bit 0: Read flag
2404          *  bit 1: Write flag
2405          *  bit 2: Breakpoint translation
2406          *
2407          *  Thus, we use them here as so.
2408          */
2409
2410         /* Ensure breakpoint translation bit is set */
2411         if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2412                 return -EIO;
2413         hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2414         hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2415         hw_brk.len = 8;
2416         set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
2417 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2418         bp = thread->ptrace_bps[0];
2419         if (!set_bp) {
2420                 if (bp) {
2421                         unregister_hw_breakpoint(bp);
2422                         thread->ptrace_bps[0] = NULL;
2423                 }
2424                 return 0;
2425         }
2426         if (bp) {
2427                 attr = bp->attr;
2428                 attr.bp_addr = hw_brk.address;
2429                 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2430
2431                 /* Enable breakpoint */
2432                 attr.disabled = false;
2433
2434                 ret =  modify_user_hw_breakpoint(bp, &attr);
2435                 if (ret) {
2436                         return ret;
2437                 }
2438                 thread->ptrace_bps[0] = bp;
2439                 thread->hw_brk = hw_brk;
2440                 return 0;
2441         }
2442
2443         /* Create a new breakpoint request if one doesn't exist already */
2444         hw_breakpoint_init(&attr);
2445         attr.bp_addr = hw_brk.address;
2446         arch_bp_generic_fields(hw_brk.type,
2447                                &attr.bp_type);
2448
2449         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2450                                                ptrace_triggered, NULL, task);
2451         if (IS_ERR(bp)) {
2452                 thread->ptrace_bps[0] = NULL;
2453                 return PTR_ERR(bp);
2454         }
2455
2456 #else /* !CONFIG_HAVE_HW_BREAKPOINT */
2457         if (set_bp && (!ppc_breakpoint_available()))
2458                 return -ENODEV;
2459 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2460         task->thread.hw_brk = hw_brk;
2461 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2462         /* As described above, it was assumed 3 bits were passed with the data
2463          *  address, but we will assume only the mode bits will be passed
2464          *  as to not cause alignment restrictions for DAC-based processors.
2465          */
2466
2467         /* DAC's hold the whole address without any mode flags */
2468         task->thread.debug.dac1 = data & ~0x3UL;
2469
2470         if (task->thread.debug.dac1 == 0) {
2471                 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2472                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2473                                         task->thread.debug.dbcr1)) {
2474                         task->thread.regs->msr &= ~MSR_DE;
2475                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2476                 }
2477                 return 0;
2478         }
2479
2480         /* Read or Write bits must be set */
2481
2482         if (!(data & 0x3UL))
2483                 return -EINVAL;
2484
2485         /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2486            register */
2487         task->thread.debug.dbcr0 |= DBCR0_IDM;
2488
2489         /* Check for write and read flags and set DBCR0
2490            accordingly */
2491         dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2492         if (data & 0x1UL)
2493                 dbcr_dac(task) |= DBCR_DAC1R;
2494         if (data & 0x2UL)
2495                 dbcr_dac(task) |= DBCR_DAC1W;
2496         task->thread.regs->msr |= MSR_DE;
2497 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2498         return 0;
2499 }
2500
2501 /*
2502  * Called by kernel/ptrace.c when detaching..
2503  *
2504  * Make sure single step bits etc are not set.
2505  */
2506 void ptrace_disable(struct task_struct *child)
2507 {
2508         /* make sure the single step bit is not set. */
2509         user_disable_single_step(child);
2510 }
2511
2512 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2513 static long set_instruction_bp(struct task_struct *child,
2514                               struct ppc_hw_breakpoint *bp_info)
2515 {
2516         int slot;
2517         int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2518         int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2519         int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2520         int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2521
2522         if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2523                 slot2_in_use = 1;
2524         if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2525                 slot4_in_use = 1;
2526
2527         if (bp_info->addr >= TASK_SIZE)
2528                 return -EIO;
2529
2530         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2531
2532                 /* Make sure range is valid. */
2533                 if (bp_info->addr2 >= TASK_SIZE)
2534                         return -EIO;
2535
2536                 /* We need a pair of IAC regsisters */
2537                 if ((!slot1_in_use) && (!slot2_in_use)) {
2538                         slot = 1;
2539                         child->thread.debug.iac1 = bp_info->addr;
2540                         child->thread.debug.iac2 = bp_info->addr2;
2541                         child->thread.debug.dbcr0 |= DBCR0_IAC1;
2542                         if (bp_info->addr_mode ==
2543                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2544                                 dbcr_iac_range(child) |= DBCR_IAC12X;
2545                         else
2546                                 dbcr_iac_range(child) |= DBCR_IAC12I;
2547 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2548                 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2549                         slot = 3;
2550                         child->thread.debug.iac3 = bp_info->addr;
2551                         child->thread.debug.iac4 = bp_info->addr2;
2552                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2553                         if (bp_info->addr_mode ==
2554                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2555                                 dbcr_iac_range(child) |= DBCR_IAC34X;
2556                         else
2557                                 dbcr_iac_range(child) |= DBCR_IAC34I;
2558 #endif
2559                 } else
2560                         return -ENOSPC;
2561         } else {
2562                 /* We only need one.  If possible leave a pair free in
2563                  * case a range is needed later
2564                  */
2565                 if (!slot1_in_use) {
2566                         /*
2567                          * Don't use iac1 if iac1-iac2 are free and either
2568                          * iac3 or iac4 (but not both) are free
2569                          */
2570                         if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2571                                 slot = 1;
2572                                 child->thread.debug.iac1 = bp_info->addr;
2573                                 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2574                                 goto out;
2575                         }
2576                 }
2577                 if (!slot2_in_use) {
2578                         slot = 2;
2579                         child->thread.debug.iac2 = bp_info->addr;
2580                         child->thread.debug.dbcr0 |= DBCR0_IAC2;
2581 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2582                 } else if (!slot3_in_use) {
2583                         slot = 3;
2584                         child->thread.debug.iac3 = bp_info->addr;
2585                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2586                 } else if (!slot4_in_use) {
2587                         slot = 4;
2588                         child->thread.debug.iac4 = bp_info->addr;
2589                         child->thread.debug.dbcr0 |= DBCR0_IAC4;
2590 #endif
2591                 } else
2592                         return -ENOSPC;
2593         }
2594 out:
2595         child->thread.debug.dbcr0 |= DBCR0_IDM;
2596         child->thread.regs->msr |= MSR_DE;
2597
2598         return slot;
2599 }
2600
2601 static int del_instruction_bp(struct task_struct *child, int slot)
2602 {
2603         switch (slot) {
2604         case 1:
2605                 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2606                         return -ENOENT;
2607
2608                 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2609                         /* address range - clear slots 1 & 2 */
2610                         child->thread.debug.iac2 = 0;
2611                         dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2612                 }
2613                 child->thread.debug.iac1 = 0;
2614                 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2615                 break;
2616         case 2:
2617                 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2618                         return -ENOENT;
2619
2620                 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2621                         /* used in a range */
2622                         return -EINVAL;
2623                 child->thread.debug.iac2 = 0;
2624                 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2625                 break;
2626 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2627         case 3:
2628                 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2629                         return -ENOENT;
2630
2631                 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2632                         /* address range - clear slots 3 & 4 */
2633                         child->thread.debug.iac4 = 0;
2634                         dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2635                 }
2636                 child->thread.debug.iac3 = 0;
2637                 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2638                 break;
2639         case 4:
2640                 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2641                         return -ENOENT;
2642
2643                 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2644                         /* Used in a range */
2645                         return -EINVAL;
2646                 child->thread.debug.iac4 = 0;
2647                 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2648                 break;
2649 #endif
2650         default:
2651                 return -EINVAL;
2652         }
2653         return 0;
2654 }
2655
2656 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2657 {
2658         int byte_enable =
2659                 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2660                 & 0xf;
2661         int condition_mode =
2662                 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2663         int slot;
2664
2665         if (byte_enable && (condition_mode == 0))
2666                 return -EINVAL;
2667
2668         if (bp_info->addr >= TASK_SIZE)
2669                 return -EIO;
2670
2671         if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2672                 slot = 1;
2673                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2674                         dbcr_dac(child) |= DBCR_DAC1R;
2675                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2676                         dbcr_dac(child) |= DBCR_DAC1W;
2677                 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2678 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2679                 if (byte_enable) {
2680                         child->thread.debug.dvc1 =
2681                                 (unsigned long)bp_info->condition_value;
2682                         child->thread.debug.dbcr2 |=
2683                                 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2684                                  (condition_mode << DBCR2_DVC1M_SHIFT));
2685                 }
2686 #endif
2687 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2688         } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2689                 /* Both dac1 and dac2 are part of a range */
2690                 return -ENOSPC;
2691 #endif
2692         } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2693                 slot = 2;
2694                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2695                         dbcr_dac(child) |= DBCR_DAC2R;
2696                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2697                         dbcr_dac(child) |= DBCR_DAC2W;
2698                 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2699 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2700                 if (byte_enable) {
2701                         child->thread.debug.dvc2 =
2702                                 (unsigned long)bp_info->condition_value;
2703                         child->thread.debug.dbcr2 |=
2704                                 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2705                                  (condition_mode << DBCR2_DVC2M_SHIFT));
2706                 }
2707 #endif
2708         } else
2709                 return -ENOSPC;
2710         child->thread.debug.dbcr0 |= DBCR0_IDM;
2711         child->thread.regs->msr |= MSR_DE;
2712
2713         return slot + 4;
2714 }
2715
2716 static int del_dac(struct task_struct *child, int slot)
2717 {
2718         if (slot == 1) {
2719                 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2720                         return -ENOENT;
2721
2722                 child->thread.debug.dac1 = 0;
2723                 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2724 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2725                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2726                         child->thread.debug.dac2 = 0;
2727                         child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2728                 }
2729                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2730 #endif
2731 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2732                 child->thread.debug.dvc1 = 0;
2733 #endif
2734         } else if (slot == 2) {
2735                 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2736                         return -ENOENT;
2737
2738 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2739                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2740                         /* Part of a range */
2741                         return -EINVAL;
2742                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2743 #endif
2744 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2745                 child->thread.debug.dvc2 = 0;
2746 #endif
2747                 child->thread.debug.dac2 = 0;
2748                 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2749         } else
2750                 return -EINVAL;
2751
2752         return 0;
2753 }
2754 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2755
2756 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2757 static int set_dac_range(struct task_struct *child,
2758                          struct ppc_hw_breakpoint *bp_info)
2759 {
2760         int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2761
2762         /* We don't allow range watchpoints to be used with DVC */
2763         if (bp_info->condition_mode)
2764                 return -EINVAL;
2765
2766         /*
2767          * Best effort to verify the address range.  The user/supervisor bits
2768          * prevent trapping in kernel space, but let's fail on an obvious bad
2769          * range.  The simple test on the mask is not fool-proof, and any
2770          * exclusive range will spill over into kernel space.
2771          */
2772         if (bp_info->addr >= TASK_SIZE)
2773                 return -EIO;
2774         if (mode == PPC_BREAKPOINT_MODE_MASK) {
2775                 /*
2776                  * dac2 is a bitmask.  Don't allow a mask that makes a
2777                  * kernel space address from a valid dac1 value
2778                  */
2779                 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2780                         return -EIO;
2781         } else {
2782                 /*
2783                  * For range breakpoints, addr2 must also be a valid address
2784                  */
2785                 if (bp_info->addr2 >= TASK_SIZE)
2786                         return -EIO;
2787         }
2788
2789         if (child->thread.debug.dbcr0 &
2790             (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2791                 return -ENOSPC;
2792
2793         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2794                 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2795         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2796                 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2797         child->thread.debug.dac1 = bp_info->addr;
2798         child->thread.debug.dac2 = bp_info->addr2;
2799         if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2800                 child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2801         else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2802                 child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2803         else    /* PPC_BREAKPOINT_MODE_MASK */
2804                 child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2805         child->thread.regs->msr |= MSR_DE;
2806
2807         return 5;
2808 }
2809 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2810
2811 static long ppc_set_hwdebug(struct task_struct *child,
2812                      struct ppc_hw_breakpoint *bp_info)
2813 {
2814 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2815         int len = 0;
2816         struct thread_struct *thread = &(child->thread);
2817         struct perf_event *bp;
2818         struct perf_event_attr attr;
2819 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2820 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2821         struct arch_hw_breakpoint brk;
2822 #endif
2823
2824         if (bp_info->version != 1)
2825                 return -ENOTSUPP;
2826 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2827         /*
2828          * Check for invalid flags and combinations
2829          */
2830         if ((bp_info->trigger_type == 0) ||
2831             (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2832                                        PPC_BREAKPOINT_TRIGGER_RW)) ||
2833             (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2834             (bp_info->condition_mode &
2835              ~(PPC_BREAKPOINT_CONDITION_MODE |
2836                PPC_BREAKPOINT_CONDITION_BE_ALL)))
2837                 return -EINVAL;
2838 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2839         if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2840                 return -EINVAL;
2841 #endif
2842
2843         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2844                 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2845                     (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2846                         return -EINVAL;
2847                 return set_instruction_bp(child, bp_info);
2848         }
2849         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2850                 return set_dac(child, bp_info);
2851
2852 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2853         return set_dac_range(child, bp_info);
2854 #else
2855         return -EINVAL;
2856 #endif
2857 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2858         /*
2859          * We only support one data breakpoint
2860          */
2861         if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2862             (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2863             bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2864                 return -EINVAL;
2865
2866         if ((unsigned long)bp_info->addr >= TASK_SIZE)
2867                 return -EIO;
2868
2869         brk.address = bp_info->addr & ~7UL;
2870         brk.type = HW_BRK_TYPE_TRANSLATE;
2871         brk.len = 8;
2872         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2873                 brk.type |= HW_BRK_TYPE_READ;
2874         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2875                 brk.type |= HW_BRK_TYPE_WRITE;
2876 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2877         /*
2878          * Check if the request is for 'range' breakpoints. We can
2879          * support it if range < 8 bytes.
2880          */
2881         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2882                 len = bp_info->addr2 - bp_info->addr;
2883         else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2884                 len = 1;
2885         else
2886                 return -EINVAL;
2887         bp = thread->ptrace_bps[0];
2888         if (bp)
2889                 return -ENOSPC;
2890
2891         /* Create a new breakpoint request if one doesn't exist already */
2892         hw_breakpoint_init(&attr);
2893         attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2894         attr.bp_len = len;
2895         arch_bp_generic_fields(brk.type, &attr.bp_type);
2896
2897         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2898                                                ptrace_triggered, NULL, child);
2899         if (IS_ERR(bp)) {
2900                 thread->ptrace_bps[0] = NULL;
2901                 return PTR_ERR(bp);
2902         }
2903
2904         return 1;
2905 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2906
2907         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2908                 return -EINVAL;
2909
2910         if (child->thread.hw_brk.address)
2911                 return -ENOSPC;
2912
2913         if (!ppc_breakpoint_available())
2914                 return -ENODEV;
2915
2916         child->thread.hw_brk = brk;
2917
2918         return 1;
2919 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2920 }
2921
2922 static long ppc_del_hwdebug(struct task_struct *child, long data)
2923 {
2924 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2925         int ret = 0;
2926         struct thread_struct *thread = &(child->thread);
2927         struct perf_event *bp;
2928 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2929 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2930         int rc;
2931
2932         if (data <= 4)
2933                 rc = del_instruction_bp(child, (int)data);
2934         else
2935                 rc = del_dac(child, (int)data - 4);
2936
2937         if (!rc) {
2938                 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2939                                         child->thread.debug.dbcr1)) {
2940                         child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2941                         child->thread.regs->msr &= ~MSR_DE;
2942                 }
2943         }
2944         return rc;
2945 #else
2946         if (data != 1)
2947                 return -EINVAL;
2948
2949 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2950         bp = thread->ptrace_bps[0];
2951         if (bp) {
2952                 unregister_hw_breakpoint(bp);
2953                 thread->ptrace_bps[0] = NULL;
2954         } else
2955                 ret = -ENOENT;
2956         return ret;
2957 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2958         if (child->thread.hw_brk.address == 0)
2959                 return -ENOENT;
2960
2961         child->thread.hw_brk.address = 0;
2962         child->thread.hw_brk.type = 0;
2963 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2964
2965         return 0;
2966 #endif
2967 }
2968
2969 long arch_ptrace(struct task_struct *child, long request,
2970                  unsigned long addr, unsigned long data)
2971 {
2972         int ret = -EPERM;
2973         void __user *datavp = (void __user *) data;
2974         unsigned long __user *datalp = datavp;
2975
2976         switch (request) {
2977         /* read the word at location addr in the USER area. */
2978         case PTRACE_PEEKUSR: {
2979                 unsigned long index, tmp;
2980
2981                 ret = -EIO;
2982                 /* convert to index and check */
2983 #ifdef CONFIG_PPC32
2984                 index = addr >> 2;
2985                 if ((addr & 3) || (index > PT_FPSCR)
2986                     || (child->thread.regs == NULL))
2987 #else
2988                 index = addr >> 3;
2989                 if ((addr & 7) || (index > PT_FPSCR))
2990 #endif
2991                         break;
2992
2993                 CHECK_FULL_REGS(child->thread.regs);
2994                 if (index < PT_FPR0) {
2995                         ret = ptrace_get_reg(child, (int) index, &tmp);
2996                         if (ret)
2997                                 break;
2998                 } else {
2999                         unsigned int fpidx = index - PT_FPR0;
3000
3001                         flush_fp_to_thread(child);
3002                         if (fpidx < (PT_FPSCR - PT_FPR0))
3003                                 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
3004                                        sizeof(long));
3005                         else
3006                                 tmp = child->thread.fp_state.fpscr;
3007                 }
3008                 ret = put_user(tmp, datalp);
3009                 break;
3010         }
3011
3012         /* write the word at location addr in the USER area */
3013         case PTRACE_POKEUSR: {
3014                 unsigned long index;
3015
3016                 ret = -EIO;
3017                 /* convert to index and check */
3018 #ifdef CONFIG_PPC32
3019                 index = addr >> 2;
3020                 if ((addr & 3) || (index > PT_FPSCR)
3021                     || (child->thread.regs == NULL))
3022 #else
3023                 index = addr >> 3;
3024                 if ((addr & 7) || (index > PT_FPSCR))
3025 #endif
3026                         break;
3027
3028                 CHECK_FULL_REGS(child->thread.regs);
3029                 if (index < PT_FPR0) {
3030                         ret = ptrace_put_reg(child, index, data);
3031                 } else {
3032                         unsigned int fpidx = index - PT_FPR0;
3033
3034                         flush_fp_to_thread(child);
3035                         if (fpidx < (PT_FPSCR - PT_FPR0))
3036                                 memcpy(&child->thread.TS_FPR(fpidx), &data,
3037                                        sizeof(long));
3038                         else
3039                                 child->thread.fp_state.fpscr = data;
3040                         ret = 0;
3041                 }
3042                 break;
3043         }
3044
3045         case PPC_PTRACE_GETHWDBGINFO: {
3046                 struct ppc_debug_info dbginfo;
3047
3048                 dbginfo.version = 1;
3049 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3050                 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3051                 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3052                 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3053                 dbginfo.data_bp_alignment = 4;
3054                 dbginfo.sizeof_condition = 4;
3055                 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3056                                    PPC_DEBUG_FEATURE_INSN_BP_MASK;
3057 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3058                 dbginfo.features |=
3059                                    PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3060                                    PPC_DEBUG_FEATURE_DATA_BP_MASK;
3061 #endif
3062 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3063                 dbginfo.num_instruction_bps = 0;
3064                 if (ppc_breakpoint_available())
3065                         dbginfo.num_data_bps = 1;
3066                 else
3067                         dbginfo.num_data_bps = 0;
3068                 dbginfo.num_condition_regs = 0;
3069 #ifdef CONFIG_PPC64
3070                 dbginfo.data_bp_alignment = 8;
3071 #else
3072                 dbginfo.data_bp_alignment = 4;
3073 #endif
3074                 dbginfo.sizeof_condition = 0;
3075 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3076                 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3077                 if (cpu_has_feature(CPU_FTR_DAWR))
3078                         dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3079 #else
3080                 dbginfo.features = 0;
3081 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3082 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3083
3084                 if (!access_ok(VERIFY_WRITE, datavp,
3085                                sizeof(struct ppc_debug_info)))
3086                         return -EFAULT;
3087                 ret = __copy_to_user(datavp, &dbginfo,
3088                                      sizeof(struct ppc_debug_info)) ?
3089                       -EFAULT : 0;
3090                 break;
3091         }
3092
3093         case PPC_PTRACE_SETHWDEBUG: {
3094                 struct ppc_hw_breakpoint bp_info;
3095
3096                 if (!access_ok(VERIFY_READ, datavp,
3097                                sizeof(struct ppc_hw_breakpoint)))
3098                         return -EFAULT;
3099                 ret = __copy_from_user(&bp_info, datavp,
3100                                        sizeof(struct ppc_hw_breakpoint)) ?
3101                       -EFAULT : 0;
3102                 if (!ret)
3103                         ret = ppc_set_hwdebug(child, &bp_info);
3104                 break;
3105         }
3106
3107         case PPC_PTRACE_DELHWDEBUG: {
3108                 ret = ppc_del_hwdebug(child, data);
3109                 break;
3110         }
3111
3112         case PTRACE_GET_DEBUGREG: {
3113 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3114                 unsigned long dabr_fake;
3115 #endif
3116                 ret = -EINVAL;
3117                 /* We only support one DABR and no IABRS at the moment */
3118                 if (addr > 0)
3119                         break;
3120 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3121                 ret = put_user(child->thread.debug.dac1, datalp);
3122 #else
3123                 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3124                              (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3125                 ret = put_user(dabr_fake, datalp);
3126 #endif
3127                 break;
3128         }
3129
3130         case PTRACE_SET_DEBUGREG:
3131                 ret = ptrace_set_debugreg(child, addr, data);
3132                 break;
3133
3134 #ifdef CONFIG_PPC64
3135         case PTRACE_GETREGS64:
3136 #endif
3137         case PTRACE_GETREGS:    /* Get all pt_regs from the child. */
3138                 return copy_regset_to_user(child, &user_ppc_native_view,
3139                                            REGSET_GPR,
3140                                            0, sizeof(struct pt_regs),
3141                                            datavp);
3142
3143 #ifdef CONFIG_PPC64
3144         case PTRACE_SETREGS64:
3145 #endif
3146         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
3147                 return copy_regset_from_user(child, &user_ppc_native_view,
3148                                              REGSET_GPR,
3149                                              0, sizeof(struct pt_regs),
3150                                              datavp);
3151
3152         case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3153                 return copy_regset_to_user(child, &user_ppc_native_view,
3154                                            REGSET_FPR,
3155                                            0, sizeof(elf_fpregset_t),
3156                                            datavp);
3157
3158         case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3159                 return copy_regset_from_user(child, &user_ppc_native_view,
3160                                              REGSET_FPR,
3161                                              0, sizeof(elf_fpregset_t),
3162                                              datavp);
3163
3164 #ifdef CONFIG_ALTIVEC
3165         case PTRACE_GETVRREGS:
3166                 return copy_regset_to_user(child, &user_ppc_native_view,
3167                                            REGSET_VMX,
3168                                            0, (33 * sizeof(vector128) +
3169                                                sizeof(u32)),
3170                                            datavp);
3171
3172         case PTRACE_SETVRREGS:
3173                 return copy_regset_from_user(child, &user_ppc_native_view,
3174                                              REGSET_VMX,
3175                                              0, (33 * sizeof(vector128) +
3176                                                  sizeof(u32)),
3177                                              datavp);
3178 #endif
3179 #ifdef CONFIG_VSX
3180         case PTRACE_GETVSRREGS:
3181                 return copy_regset_to_user(child, &user_ppc_native_view,
3182                                            REGSET_VSX,
3183                                            0, 32 * sizeof(double),
3184                                            datavp);
3185
3186         case PTRACE_SETVSRREGS:
3187                 return copy_regset_from_user(child, &user_ppc_native_view,
3188                                              REGSET_VSX,
3189                                              0, 32 * sizeof(double),
3190                                              datavp);
3191 #endif
3192 #ifdef CONFIG_SPE
3193         case PTRACE_GETEVRREGS:
3194                 /* Get the child spe register state. */
3195                 return copy_regset_to_user(child, &user_ppc_native_view,
3196                                            REGSET_SPE, 0, 35 * sizeof(u32),
3197                                            datavp);
3198
3199         case PTRACE_SETEVRREGS:
3200                 /* Set the child spe register state. */
3201                 return copy_regset_from_user(child, &user_ppc_native_view,
3202                                              REGSET_SPE, 0, 35 * sizeof(u32),
3203                                              datavp);
3204 #endif
3205
3206         default:
3207                 ret = ptrace_request(child, request, addr, data);
3208                 break;
3209         }
3210         return ret;
3211 }
3212
3213 #ifdef CONFIG_SECCOMP
3214 static int do_seccomp(struct pt_regs *regs)
3215 {
3216         if (!test_thread_flag(TIF_SECCOMP))
3217                 return 0;
3218
3219         /*
3220          * The ABI we present to seccomp tracers is that r3 contains
3221          * the syscall return value and orig_gpr3 contains the first
3222          * syscall parameter. This is different to the ptrace ABI where
3223          * both r3 and orig_gpr3 contain the first syscall parameter.
3224          */
3225         regs->gpr[3] = -ENOSYS;
3226
3227         /*
3228          * We use the __ version here because we have already checked
3229          * TIF_SECCOMP. If this fails, there is nothing left to do, we
3230          * have already loaded -ENOSYS into r3, or seccomp has put
3231          * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3232          */
3233         if (__secure_computing(NULL))
3234                 return -1;
3235
3236         /*
3237          * The syscall was allowed by seccomp, restore the register
3238          * state to what audit expects.
3239          * Note that we use orig_gpr3, which means a seccomp tracer can
3240          * modify the first syscall parameter (in orig_gpr3) and also
3241          * allow the syscall to proceed.
3242          */
3243         regs->gpr[3] = regs->orig_gpr3;
3244
3245         return 0;
3246 }
3247 #else
3248 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3249 #endif /* CONFIG_SECCOMP */
3250
3251 /**
3252  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3253  * @regs: the pt_regs of the task to trace (current)
3254  *
3255  * Performs various types of tracing on syscall entry. This includes seccomp,
3256  * ptrace, syscall tracepoints and audit.
3257  *
3258  * The pt_regs are potentially visible to userspace via ptrace, so their
3259  * contents is ABI.
3260  *
3261  * One or more of the tracers may modify the contents of pt_regs, in particular
3262  * to modify arguments or even the syscall number itself.
3263  *
3264  * It's also possible that a tracer can choose to reject the system call. In
3265  * that case this function will return an illegal syscall number, and will put
3266  * an appropriate return value in regs->r3.
3267  *
3268  * Return: the (possibly changed) syscall number.
3269  */
3270 long do_syscall_trace_enter(struct pt_regs *regs)
3271 {
3272         user_exit();
3273
3274         /*
3275          * The tracer may decide to abort the syscall, if so tracehook
3276          * will return !0. Note that the tracer may also just change
3277          * regs->gpr[0] to an invalid syscall number, that is handled
3278          * below on the exit path.
3279          */
3280         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3281             tracehook_report_syscall_entry(regs))
3282                 goto skip;
3283
3284         /* Run seccomp after ptrace; allow it to set gpr[3]. */
3285         if (do_seccomp(regs))
3286                 return -1;
3287
3288         /* Avoid trace and audit when syscall is invalid. */
3289         if (regs->gpr[0] >= NR_syscalls)
3290                 goto skip;
3291
3292         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3293                 trace_sys_enter(regs, regs->gpr[0]);
3294
3295 #ifdef CONFIG_PPC64
3296         if (!is_32bit_task())
3297                 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3298                                     regs->gpr[5], regs->gpr[6]);
3299         else
3300 #endif
3301                 audit_syscall_entry(regs->gpr[0],
3302                                     regs->gpr[3] & 0xffffffff,
3303                                     regs->gpr[4] & 0xffffffff,
3304                                     regs->gpr[5] & 0xffffffff,
3305                                     regs->gpr[6] & 0xffffffff);
3306
3307         /* Return the possibly modified but valid syscall number */
3308         return regs->gpr[0];
3309
3310 skip:
3311         /*
3312          * If we are aborting explicitly, or if the syscall number is
3313          * now invalid, set the return value to -ENOSYS.
3314          */
3315         regs->gpr[3] = -ENOSYS;
3316         return -1;
3317 }
3318
3319 void do_syscall_trace_leave(struct pt_regs *regs)
3320 {
3321         int step;
3322
3323         audit_syscall_exit(regs);
3324
3325         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3326                 trace_sys_exit(regs, regs->result);
3327
3328         step = test_thread_flag(TIF_SINGLESTEP);
3329         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3330                 tracehook_report_syscall_exit(regs, step);
3331
3332         user_enter();
3333 }