1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
18 extern char system_call_common[];
19 extern char system_call_vectored_emulate[];
22 /* Bits in SRR1 that are copied from MSR */
23 #define MSR_MASK 0xffffffff87c0ffffUL
25 #define MSR_MASK 0x87c0ffff
29 #define XER_SO 0x80000000U
30 #define XER_OV 0x40000000U
31 #define XER_CA 0x20000000U
32 #define XER_OV32 0x00080000U
33 #define XER_CA32 0x00040000U
37 * Functions in ldstfp.S
39 extern void get_fpr(int rn, double *p);
40 extern void put_fpr(int rn, const double *p);
41 extern void get_vr(int rn, __vector128 *p);
42 extern void put_vr(int rn, __vector128 *p);
43 extern void load_vsrn(int vsr, const void *p);
44 extern void store_vsrn(int vsr, void *p);
45 extern void conv_sp_to_dp(const float *sp, double *dp);
46 extern void conv_dp_to_sp(const double *dp, float *sp);
53 extern int do_lq(unsigned long ea, unsigned long *regs);
54 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
55 extern int do_lqarx(unsigned long ea, unsigned long *regs);
56 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
60 #ifdef __LITTLE_ENDIAN__
69 * Emulate the truncation of 64 bit values in 32-bit mode.
71 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
75 if ((msr & MSR_64BIT) == 0)
82 * Determine whether a conditional branch instruction would branch.
84 static nokprobe_inline int branch_taken(unsigned int instr,
85 const struct pt_regs *regs,
86 struct instruction_op *op)
88 unsigned int bo = (instr >> 21) & 0x1f;
92 /* decrement counter */
94 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
97 if ((bo & 0x10) == 0) {
98 /* check bit from CR */
99 bi = (instr >> 16) & 0x1f;
100 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
106 static nokprobe_inline long address_ok(struct pt_regs *regs,
107 unsigned long ea, int nb)
109 if (!user_mode(regs))
111 if (__access_ok(ea, nb, USER_DS))
113 if (__access_ok(ea, 1, USER_DS))
114 /* Access overlaps the end of the user region */
115 regs->dar = USER_DS.seg;
122 * Calculate effective address for a D-form instruction
124 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
125 const struct pt_regs *regs)
130 ra = (instr >> 16) & 0x1f;
131 ea = (signed short) instr; /* sign-extend */
140 * Calculate effective address for a DS-form instruction
142 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
143 const struct pt_regs *regs)
148 ra = (instr >> 16) & 0x1f;
149 ea = (signed short) (instr & ~3); /* sign-extend */
157 * Calculate effective address for a DQ-form instruction
159 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
160 const struct pt_regs *regs)
165 ra = (instr >> 16) & 0x1f;
166 ea = (signed short) (instr & ~0xf); /* sign-extend */
172 #endif /* __powerpc64 */
175 * Calculate effective address for an X-form instruction
177 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
178 const struct pt_regs *regs)
183 ra = (instr >> 16) & 0x1f;
184 rb = (instr >> 11) & 0x1f;
193 * Calculate effective address for a MLS:D-form / 8LS:D-form
194 * prefixed instruction
196 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
198 const struct pt_regs *regs)
202 unsigned long ea, d0, d1, d;
204 prefix_r = instr & (1ul << 20);
205 ra = (suffix >> 16) & 0x1f;
207 d0 = instr & 0x3ffff;
208 d1 = suffix & 0xffff;
212 * sign extend a 34 bit number
214 dd = (unsigned int)(d >> 2);
216 ea = (ea << 2) | (d & 0x3);
220 else if (!prefix_r && !ra)
221 ; /* Leave ea as is */
222 else if (prefix_r && !ra)
224 else if (prefix_r && ra)
225 ; /* Invalid form. Should already be checked for by caller! */
231 * Return the largest power of 2, not greater than sizeof(unsigned long),
232 * such that x is a multiple of it.
234 static nokprobe_inline unsigned long max_align(unsigned long x)
236 x |= sizeof(unsigned long);
237 return x & -x; /* isolates rightmost bit */
240 static nokprobe_inline unsigned long byterev_2(unsigned long x)
242 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
245 static nokprobe_inline unsigned long byterev_4(unsigned long x)
247 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
248 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
252 static nokprobe_inline unsigned long byterev_8(unsigned long x)
254 return (byterev_4(x) << 32) | byterev_4(x >> 32);
258 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
262 *(u16 *)ptr = byterev_2(*(u16 *)ptr);
265 *(u32 *)ptr = byterev_4(*(u32 *)ptr);
269 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
272 unsigned long *up = (unsigned long *)ptr;
274 tmp = byterev_8(up[0]);
275 up[0] = byterev_8(up[1]);
285 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
286 unsigned long ea, int nb,
287 struct pt_regs *regs)
294 err = __get_user(x, (unsigned char __user *) ea);
297 err = __get_user(x, (unsigned short __user *) ea);
300 err = __get_user(x, (unsigned int __user *) ea);
304 err = __get_user(x, (unsigned long __user *) ea);
316 * Copy from userspace to a buffer, using the largest possible
317 * aligned accesses, up to sizeof(long).
319 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
320 struct pt_regs *regs)
325 for (; nb > 0; nb -= c) {
331 err = __get_user(*dest, (unsigned char __user *) ea);
334 err = __get_user(*(u16 *)dest,
335 (unsigned short __user *) ea);
338 err = __get_user(*(u32 *)dest,
339 (unsigned int __user *) ea);
343 err = __get_user(*(unsigned long *)dest,
344 (unsigned long __user *) ea);
358 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
359 unsigned long ea, int nb,
360 struct pt_regs *regs)
364 u8 b[sizeof(unsigned long)];
370 i = IS_BE ? sizeof(unsigned long) - nb : 0;
371 err = copy_mem_in(&u.b[i], ea, nb, regs);
378 * Read memory at address ea for nb bytes, return 0 for success
379 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
380 * If nb < sizeof(long), the result is right-justified on BE systems.
382 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
383 struct pt_regs *regs)
385 if (!address_ok(regs, ea, nb))
387 if ((ea & (nb - 1)) == 0)
388 return read_mem_aligned(dest, ea, nb, regs);
389 return read_mem_unaligned(dest, ea, nb, regs);
391 NOKPROBE_SYMBOL(read_mem);
393 static nokprobe_inline int write_mem_aligned(unsigned long val,
394 unsigned long ea, int nb,
395 struct pt_regs *regs)
401 err = __put_user(val, (unsigned char __user *) ea);
404 err = __put_user(val, (unsigned short __user *) ea);
407 err = __put_user(val, (unsigned int __user *) ea);
411 err = __put_user(val, (unsigned long __user *) ea);
421 * Copy from a buffer to userspace, using the largest possible
422 * aligned accesses, up to sizeof(long).
424 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
425 struct pt_regs *regs)
430 for (; nb > 0; nb -= c) {
436 err = __put_user(*dest, (unsigned char __user *) ea);
439 err = __put_user(*(u16 *)dest,
440 (unsigned short __user *) ea);
443 err = __put_user(*(u32 *)dest,
444 (unsigned int __user *) ea);
448 err = __put_user(*(unsigned long *)dest,
449 (unsigned long __user *) ea);
463 static nokprobe_inline int write_mem_unaligned(unsigned long val,
464 unsigned long ea, int nb,
465 struct pt_regs *regs)
469 u8 b[sizeof(unsigned long)];
474 i = IS_BE ? sizeof(unsigned long) - nb : 0;
475 return copy_mem_out(&u.b[i], ea, nb, regs);
479 * Write memory at address ea for nb bytes, return 0 for success
480 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
482 static int write_mem(unsigned long val, unsigned long ea, int nb,
483 struct pt_regs *regs)
485 if (!address_ok(regs, ea, nb))
487 if ((ea & (nb - 1)) == 0)
488 return write_mem_aligned(val, ea, nb, regs);
489 return write_mem_unaligned(val, ea, nb, regs);
491 NOKPROBE_SYMBOL(write_mem);
493 #ifdef CONFIG_PPC_FPU
495 * These access either the real FP register or the image in the
496 * thread_struct, depending on regs->msr & MSR_FP.
498 static int do_fp_load(struct instruction_op *op, unsigned long ea,
499 struct pt_regs *regs, bool cross_endian)
508 u8 b[2 * sizeof(double)];
511 nb = GETSIZE(op->type);
512 if (!address_ok(regs, ea, nb))
515 err = copy_mem_in(u.b, ea, nb, regs);
518 if (unlikely(cross_endian)) {
519 do_byte_reverse(u.b, min(nb, 8));
521 do_byte_reverse(&u.b[8], 8);
525 if (op->type & FPCONV)
526 conv_sp_to_dp(&u.f, &u.d[0]);
527 else if (op->type & SIGNEXT)
532 if (regs->msr & MSR_FP)
533 put_fpr(rn, &u.d[0]);
535 current->thread.TS_FPR(rn) = u.l[0];
539 if (regs->msr & MSR_FP)
540 put_fpr(rn, &u.d[1]);
542 current->thread.TS_FPR(rn) = u.l[1];
547 NOKPROBE_SYMBOL(do_fp_load);
549 static int do_fp_store(struct instruction_op *op, unsigned long ea,
550 struct pt_regs *regs, bool cross_endian)
558 u8 b[2 * sizeof(double)];
561 nb = GETSIZE(op->type);
562 if (!address_ok(regs, ea, nb))
566 if (regs->msr & MSR_FP)
567 get_fpr(rn, &u.d[0]);
569 u.l[0] = current->thread.TS_FPR(rn);
571 if (op->type & FPCONV)
572 conv_dp_to_sp(&u.d[0], &u.f);
578 if (regs->msr & MSR_FP)
579 get_fpr(rn, &u.d[1]);
581 u.l[1] = current->thread.TS_FPR(rn);
584 if (unlikely(cross_endian)) {
585 do_byte_reverse(u.b, min(nb, 8));
587 do_byte_reverse(&u.b[8], 8);
589 return copy_mem_out(u.b, ea, nb, regs);
591 NOKPROBE_SYMBOL(do_fp_store);
594 #ifdef CONFIG_ALTIVEC
595 /* For Altivec/VMX, no need to worry about alignment */
596 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
597 int size, struct pt_regs *regs,
603 u8 b[sizeof(__vector128)];
606 if (!address_ok(regs, ea & ~0xfUL, 16))
608 /* align to multiple of size */
610 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
613 if (unlikely(cross_endian))
614 do_byte_reverse(&u.b[ea & 0xf], size);
616 if (regs->msr & MSR_VEC)
619 current->thread.vr_state.vr[rn] = u.v;
624 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
625 int size, struct pt_regs *regs,
630 u8 b[sizeof(__vector128)];
633 if (!address_ok(regs, ea & ~0xfUL, 16))
635 /* align to multiple of size */
639 if (regs->msr & MSR_VEC)
642 u.v = current->thread.vr_state.vr[rn];
644 if (unlikely(cross_endian))
645 do_byte_reverse(&u.b[ea & 0xf], size);
646 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
648 #endif /* CONFIG_ALTIVEC */
651 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
652 int reg, bool cross_endian)
656 if (!address_ok(regs, ea, 16))
658 /* if aligned, should be atomic */
659 if ((ea & 0xf) == 0) {
660 err = do_lq(ea, ®s->gpr[reg]);
662 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs);
664 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs);
666 if (!err && unlikely(cross_endian))
667 do_byte_reverse(®s->gpr[reg], 16);
671 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
672 int reg, bool cross_endian)
675 unsigned long vals[2];
677 if (!address_ok(regs, ea, 16))
679 vals[0] = regs->gpr[reg];
680 vals[1] = regs->gpr[reg + 1];
681 if (unlikely(cross_endian))
682 do_byte_reverse(vals, 16);
684 /* if aligned, should be atomic */
686 return do_stq(ea, vals[0], vals[1]);
688 err = write_mem(vals[IS_LE], ea, 8, regs);
690 err = write_mem(vals[IS_BE], ea + 8, 8, regs);
693 #endif /* __powerpc64 */
696 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
697 const void *mem, bool rev)
701 const unsigned int *wp;
702 const unsigned short *hp;
703 const unsigned char *bp;
705 size = GETSIZE(op->type);
706 reg->d[0] = reg->d[1] = 0;
708 switch (op->element_size) {
710 /* whole vector; lxv[x] or lxvl[l] */
713 memcpy(reg, mem, size);
714 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
717 do_byte_reverse(reg, 16);
720 /* scalar loads, lxvd2x, lxvdsx */
721 read_size = (size >= 8) ? 8 : size;
722 i = IS_LE ? 8 : 8 - read_size;
723 memcpy(®->b[i], mem, read_size);
725 do_byte_reverse(®->b[i], 8);
727 if (op->type & SIGNEXT) {
728 /* size == 4 is the only case here */
729 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
730 } else if (op->vsx_flags & VSX_FPCONV) {
732 conv_sp_to_dp(®->fp[1 + IS_LE],
738 unsigned long v = *(unsigned long *)(mem + 8);
739 reg->d[IS_BE] = !rev ? v : byterev_8(v);
740 } else if (op->vsx_flags & VSX_SPLAT)
741 reg->d[IS_BE] = reg->d[IS_LE];
747 for (j = 0; j < size / 4; ++j) {
748 i = IS_LE ? 3 - j : j;
749 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
751 if (op->vsx_flags & VSX_SPLAT) {
752 u32 val = reg->w[IS_LE ? 3 : 0];
754 i = IS_LE ? 3 - j : j;
762 for (j = 0; j < size / 2; ++j) {
763 i = IS_LE ? 7 - j : j;
764 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
770 for (j = 0; j < size; ++j) {
771 i = IS_LE ? 15 - j : j;
777 EXPORT_SYMBOL_GPL(emulate_vsx_load);
778 NOKPROBE_SYMBOL(emulate_vsx_load);
780 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
783 int size, write_size;
790 size = GETSIZE(op->type);
792 switch (op->element_size) {
794 /* stxv, stxvx, stxvl, stxvll */
797 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
800 /* reverse 16 bytes */
801 buf.d[0] = byterev_8(reg->d[1]);
802 buf.d[1] = byterev_8(reg->d[0]);
805 memcpy(mem, reg, size);
808 /* scalar stores, stxvd2x */
809 write_size = (size >= 8) ? 8 : size;
810 i = IS_LE ? 8 : 8 - write_size;
811 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
812 buf.d[0] = buf.d[1] = 0;
814 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
818 memcpy(mem, ®->b[i], write_size);
820 memcpy(mem + 8, ®->d[IS_BE], 8);
822 do_byte_reverse(mem, write_size);
824 do_byte_reverse(mem + 8, 8);
830 for (j = 0; j < size / 4; ++j) {
831 i = IS_LE ? 3 - j : j;
832 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
838 for (j = 0; j < size / 2; ++j) {
839 i = IS_LE ? 7 - j : j;
840 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
846 for (j = 0; j < size; ++j) {
847 i = IS_LE ? 15 - j : j;
853 EXPORT_SYMBOL_GPL(emulate_vsx_store);
854 NOKPROBE_SYMBOL(emulate_vsx_store);
856 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
857 unsigned long ea, struct pt_regs *regs,
863 int size = GETSIZE(op->type);
865 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
868 emulate_vsx_load(op, &buf, mem, cross_endian);
871 /* FP regs + extensions */
872 if (regs->msr & MSR_FP) {
873 load_vsrn(reg, &buf);
875 current->thread.fp_state.fpr[reg][0] = buf.d[0];
876 current->thread.fp_state.fpr[reg][1] = buf.d[1];
879 if (regs->msr & MSR_VEC)
880 load_vsrn(reg, &buf);
882 current->thread.vr_state.vr[reg - 32] = buf.v;
888 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
889 unsigned long ea, struct pt_regs *regs,
895 int size = GETSIZE(op->type);
897 if (!address_ok(regs, ea, size))
902 /* FP regs + extensions */
903 if (regs->msr & MSR_FP) {
904 store_vsrn(reg, &buf);
906 buf.d[0] = current->thread.fp_state.fpr[reg][0];
907 buf.d[1] = current->thread.fp_state.fpr[reg][1];
910 if (regs->msr & MSR_VEC)
911 store_vsrn(reg, &buf);
913 buf.v = current->thread.vr_state.vr[reg - 32];
916 emulate_vsx_store(op, &buf, mem, cross_endian);
917 return copy_mem_out(mem, ea, size, regs);
919 #endif /* CONFIG_VSX */
921 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
924 unsigned long i, size;
927 size = ppc64_caches.l1d.block_size;
928 if (!(regs->msr & MSR_64BIT))
931 size = L1_CACHE_BYTES;
934 if (!address_ok(regs, ea, size))
936 for (i = 0; i < size; i += sizeof(long)) {
937 err = __put_user(0, (unsigned long __user *) (ea + i));
945 NOKPROBE_SYMBOL(emulate_dcbz);
947 #define __put_user_asmx(x, addr, err, op, cr) \
948 __asm__ __volatile__( \
949 "1: " op " %2,0,%3\n" \
952 ".section .fixup,\"ax\"\n" \
957 : "=r" (err), "=r" (cr) \
958 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
960 #define __get_user_asmx(x, addr, err, op) \
961 __asm__ __volatile__( \
962 "1: "op" %1,0,%2\n" \
964 ".section .fixup,\"ax\"\n" \
969 : "=r" (err), "=r" (x) \
970 : "r" (addr), "i" (-EFAULT), "0" (err))
972 #define __cacheop_user_asmx(addr, err, op) \
973 __asm__ __volatile__( \
976 ".section .fixup,\"ax\"\n" \
982 : "r" (addr), "i" (-EFAULT), "0" (err))
984 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
985 struct instruction_op *op)
990 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
992 if (!(regs->msr & MSR_64BIT))
996 op->ccval |= 0x80000000;
998 op->ccval |= 0x40000000;
1000 op->ccval |= 0x20000000;
1003 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1005 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1007 op->xerval |= XER_CA32;
1009 op->xerval &= ~XER_CA32;
1013 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1014 struct instruction_op *op, int rd,
1015 unsigned long val1, unsigned long val2,
1016 unsigned long carry_in)
1018 unsigned long val = val1 + val2;
1022 op->type = COMPUTE + SETREG + SETXER;
1025 #ifdef __powerpc64__
1026 if (!(regs->msr & MSR_64BIT)) {
1027 val = (unsigned int) val;
1028 val1 = (unsigned int) val1;
1031 op->xerval = regs->xer;
1032 if (val < val1 || (carry_in && val == val1))
1033 op->xerval |= XER_CA;
1035 op->xerval &= ~XER_CA;
1037 set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1038 (carry_in && (unsigned int)val == (unsigned int)val1));
1041 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1042 struct instruction_op *op,
1043 long v1, long v2, int crfld)
1045 unsigned int crval, shift;
1047 op->type = COMPUTE + SETCC;
1048 crval = (regs->xer >> 31) & 1; /* get SO bit */
1055 shift = (7 - crfld) * 4;
1056 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1059 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1060 struct instruction_op *op,
1062 unsigned long v2, int crfld)
1064 unsigned int crval, shift;
1066 op->type = COMPUTE + SETCC;
1067 crval = (regs->xer >> 31) & 1; /* get SO bit */
1074 shift = (7 - crfld) * 4;
1075 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1078 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1079 struct instruction_op *op,
1080 unsigned long v1, unsigned long v2)
1082 unsigned long long out_val, mask;
1086 for (i = 0; i < 8; i++) {
1087 mask = 0xffUL << (i * 8);
1088 if ((v1 & mask) == (v2 & mask))
1095 * The size parameter is used to adjust the equivalent popcnt instruction.
1096 * popcntb = 8, popcntw = 32, popcntd = 64
1098 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1099 struct instruction_op *op,
1100 unsigned long v1, int size)
1102 unsigned long long out = v1;
1104 out -= (out >> 1) & 0x5555555555555555ULL;
1105 out = (0x3333333333333333ULL & out) +
1106 (0x3333333333333333ULL & (out >> 2));
1107 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1109 if (size == 8) { /* popcntb */
1115 if (size == 32) { /* popcntw */
1116 op->val = out & 0x0000003f0000003fULL;
1120 out = (out + (out >> 32)) & 0x7f;
1121 op->val = out; /* popcntd */
1125 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1126 struct instruction_op *op,
1127 unsigned long v1, unsigned long v2)
1129 unsigned char perm, idx;
1133 for (i = 0; i < 8; i++) {
1134 idx = (v1 >> (i * 8)) & 0xff;
1136 if (v2 & PPC_BIT(idx))
1141 #endif /* CONFIG_PPC64 */
1143 * The size parameter adjusts the equivalent prty instruction.
1144 * prtyw = 32, prtyd = 64
1146 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1147 struct instruction_op *op,
1148 unsigned long v, int size)
1150 unsigned long long res = v ^ (v >> 8);
1153 if (size == 32) { /* prtyw */
1154 op->val = res & 0x0000000100000001ULL;
1159 op->val = res & 1; /*prtyd */
1162 static nokprobe_inline int trap_compare(long v1, long v2)
1172 if ((unsigned long)v1 < (unsigned long)v2)
1174 else if ((unsigned long)v1 > (unsigned long)v2)
1180 * Elements of 32-bit rotate and mask instructions.
1182 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1183 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1184 #ifdef __powerpc64__
1185 #define MASK64_L(mb) (~0UL >> (mb))
1186 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1187 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1188 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1190 #define DATA32(x) (x)
1192 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1195 * Decode an instruction, and return information about it in *op
1196 * without changing *regs.
1197 * Integer arithmetic and logical instructions, branches, and barrier
1198 * instructions can be emulated just using the information in *op.
1200 * Return value is 1 if the instruction can be emulated just by
1201 * updating *regs with the information in *op, -1 if we need the
1202 * GPRs but *regs doesn't contain the full register set, or 0
1205 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1206 struct ppc_inst instr)
1209 unsigned int suffixopcode, prefixtype, prefix_r;
1211 unsigned int opcode, ra, rb, rc, rd, spr, u;
1212 unsigned long int imm;
1213 unsigned long int val, val2;
1214 unsigned int mb, me, sh;
1215 unsigned int word, suffix;
1218 word = ppc_inst_val(instr);
1219 suffix = ppc_inst_suffix(instr);
1223 opcode = ppc_inst_primary_opcode(instr);
1227 imm = (signed short)(word & 0xfffc);
1228 if ((word & 2) == 0)
1230 op->val = truncate_if_32bit(regs->msr, imm);
1233 if (branch_taken(word, regs, op))
1234 op->type |= BRTAKEN;
1238 if ((word & 0xfe2) == 2)
1240 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1241 (word & 0xfe3) == 1)
1242 op->type = SYSCALL_VECTORED_0;
1248 op->type = BRANCH | BRTAKEN;
1249 imm = word & 0x03fffffc;
1250 if (imm & 0x02000000)
1252 if ((word & 2) == 0)
1254 op->val = truncate_if_32bit(regs->msr, imm);
1259 switch ((word >> 1) & 0x3ff) {
1261 op->type = COMPUTE + SETCC;
1262 rd = 7 - ((word >> 23) & 0x7);
1263 ra = 7 - ((word >> 18) & 0x7);
1266 val = (regs->ccr >> ra) & 0xf;
1267 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1271 case 528: /* bcctr */
1273 imm = (word & 0x400)? regs->ctr: regs->link;
1274 op->val = truncate_if_32bit(regs->msr, imm);
1277 if (branch_taken(word, regs, op))
1278 op->type |= BRTAKEN;
1281 case 18: /* rfid, scary */
1282 if (regs->msr & MSR_PR)
1287 case 150: /* isync */
1288 op->type = BARRIER | BARRIER_ISYNC;
1291 case 33: /* crnor */
1292 case 129: /* crandc */
1293 case 193: /* crxor */
1294 case 225: /* crnand */
1295 case 257: /* crand */
1296 case 289: /* creqv */
1297 case 417: /* crorc */
1298 case 449: /* cror */
1299 op->type = COMPUTE + SETCC;
1300 ra = (word >> 16) & 0x1f;
1301 rb = (word >> 11) & 0x1f;
1302 rd = (word >> 21) & 0x1f;
1303 ra = (regs->ccr >> (31 - ra)) & 1;
1304 rb = (regs->ccr >> (31 - rb)) & 1;
1305 val = (word >> (6 + ra * 2 + rb)) & 1;
1306 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1312 switch ((word >> 1) & 0x3ff) {
1313 case 598: /* sync */
1314 op->type = BARRIER + BARRIER_SYNC;
1315 #ifdef __powerpc64__
1316 switch ((word >> 21) & 3) {
1317 case 1: /* lwsync */
1318 op->type = BARRIER + BARRIER_LWSYNC;
1320 case 2: /* ptesync */
1321 op->type = BARRIER + BARRIER_PTESYNC;
1327 case 854: /* eieio */
1328 op->type = BARRIER + BARRIER_EIEIO;
1334 /* Following cases refer to regs->gpr[], so we need all regs */
1335 if (!FULL_REGS(regs))
1338 rd = (word >> 21) & 0x1f;
1339 ra = (word >> 16) & 0x1f;
1340 rb = (word >> 11) & 0x1f;
1341 rc = (word >> 6) & 0x1f;
1344 #ifdef __powerpc64__
1346 prefix_r = word & (1ul << 20);
1347 ra = (suffix >> 16) & 0x1f;
1348 rd = (suffix >> 21) & 0x1f;
1350 op->val = regs->gpr[rd];
1351 suffixopcode = get_op(suffix);
1352 prefixtype = (word >> 24) & 0x3;
1353 switch (prefixtype) {
1357 switch (suffixopcode) {
1358 case 14: /* paddi */
1359 op->type = COMPUTE | PREFIXED;
1360 op->val = mlsd_8lsd_ea(word, suffix, regs);
1366 if (rd & trap_compare(regs->gpr[ra], (short) word))
1371 if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1375 #ifdef __powerpc64__
1377 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1380 switch (word & 0x3f) {
1381 case 48: /* maddhd */
1382 asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1383 "=r" (op->val) : "r" (regs->gpr[ra]),
1384 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1387 case 49: /* maddhdu */
1388 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1389 "=r" (op->val) : "r" (regs->gpr[ra]),
1390 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1393 case 51: /* maddld */
1394 asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1395 "=r" (op->val) : "r" (regs->gpr[ra]),
1396 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1401 * There are other instructions from ISA 3.0 with the same
1402 * primary opcode which do not have emulation support yet.
1408 op->val = regs->gpr[ra] * (short) word;
1411 case 8: /* subfic */
1413 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1416 case 10: /* cmpli */
1417 imm = (unsigned short) word;
1418 val = regs->gpr[ra];
1419 #ifdef __powerpc64__
1421 val = (unsigned int) val;
1423 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1428 val = regs->gpr[ra];
1429 #ifdef __powerpc64__
1433 do_cmp_signed(regs, op, val, imm, rd >> 2);
1436 case 12: /* addic */
1438 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1441 case 13: /* addic. */
1443 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1450 imm += regs->gpr[ra];
1454 case 15: /* addis */
1455 imm = ((short) word) << 16;
1457 imm += regs->gpr[ra];
1462 if (((word >> 1) & 0x1f) == 2) {
1464 imm = (short) (word & 0xffc1); /* d0 + d2 fields */
1465 imm |= (word >> 15) & 0x3e; /* d1 field */
1466 op->val = regs->nip + (imm << 16) + 4;
1472 case 20: /* rlwimi */
1473 mb = (word >> 6) & 0x1f;
1474 me = (word >> 1) & 0x1f;
1475 val = DATA32(regs->gpr[rd]);
1476 imm = MASK32(mb, me);
1477 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1480 case 21: /* rlwinm */
1481 mb = (word >> 6) & 0x1f;
1482 me = (word >> 1) & 0x1f;
1483 val = DATA32(regs->gpr[rd]);
1484 op->val = ROTATE(val, rb) & MASK32(mb, me);
1487 case 23: /* rlwnm */
1488 mb = (word >> 6) & 0x1f;
1489 me = (word >> 1) & 0x1f;
1490 rb = regs->gpr[rb] & 0x1f;
1491 val = DATA32(regs->gpr[rd]);
1492 op->val = ROTATE(val, rb) & MASK32(mb, me);
1496 op->val = regs->gpr[rd] | (unsigned short) word;
1497 goto logical_done_nocc;
1500 imm = (unsigned short) word;
1501 op->val = regs->gpr[rd] | (imm << 16);
1502 goto logical_done_nocc;
1505 op->val = regs->gpr[rd] ^ (unsigned short) word;
1506 goto logical_done_nocc;
1508 case 27: /* xoris */
1509 imm = (unsigned short) word;
1510 op->val = regs->gpr[rd] ^ (imm << 16);
1511 goto logical_done_nocc;
1513 case 28: /* andi. */
1514 op->val = regs->gpr[rd] & (unsigned short) word;
1516 goto logical_done_nocc;
1518 case 29: /* andis. */
1519 imm = (unsigned short) word;
1520 op->val = regs->gpr[rd] & (imm << 16);
1522 goto logical_done_nocc;
1524 #ifdef __powerpc64__
1526 mb = ((word >> 6) & 0x1f) | (word & 0x20);
1527 val = regs->gpr[rd];
1528 if ((word & 0x10) == 0) {
1529 sh = rb | ((word & 2) << 4);
1530 val = ROTATE(val, sh);
1531 switch ((word >> 2) & 3) {
1532 case 0: /* rldicl */
1533 val &= MASK64_L(mb);
1535 case 1: /* rldicr */
1536 val &= MASK64_R(mb);
1539 val &= MASK64(mb, 63 - sh);
1541 case 3: /* rldimi */
1542 imm = MASK64(mb, 63 - sh);
1543 val = (regs->gpr[ra] & ~imm) |
1549 sh = regs->gpr[rb] & 0x3f;
1550 val = ROTATE(val, sh);
1551 switch ((word >> 1) & 7) {
1553 op->val = val & MASK64_L(mb);
1556 op->val = val & MASK64_R(mb);
1561 op->type = UNKNOWN; /* illegal instruction */
1565 /* isel occupies 32 minor opcodes */
1566 if (((word >> 1) & 0x1f) == 15) {
1567 mb = (word >> 6) & 0x1f; /* bc field */
1568 val = (regs->ccr >> (31 - mb)) & 1;
1569 val2 = (ra) ? regs->gpr[ra] : 0;
1571 op->val = (val) ? val2 : regs->gpr[rb];
1575 switch ((word >> 1) & 0x3ff) {
1578 (rd & trap_compare((int)regs->gpr[ra],
1579 (int)regs->gpr[rb])))
1582 #ifdef __powerpc64__
1584 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1588 case 83: /* mfmsr */
1589 if (regs->msr & MSR_PR)
1594 case 146: /* mtmsr */
1595 if (regs->msr & MSR_PR)
1599 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1602 case 178: /* mtmsrd */
1603 if (regs->msr & MSR_PR)
1607 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1608 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1609 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1616 if ((word >> 20) & 1) {
1618 for (sh = 0; sh < 8; ++sh) {
1619 if (word & (0x80000 >> sh))
1624 op->val = regs->ccr & imm;
1627 case 144: /* mtcrf */
1628 op->type = COMPUTE + SETCC;
1630 val = regs->gpr[rd];
1631 op->ccval = regs->ccr;
1632 for (sh = 0; sh < 8; ++sh) {
1633 if (word & (0x80000 >> sh))
1634 op->ccval = (op->ccval & ~imm) |
1640 case 339: /* mfspr */
1641 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1645 if (spr == SPRN_XER || spr == SPRN_LR ||
1650 case 467: /* mtspr */
1651 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1653 op->val = regs->gpr[rd];
1655 if (spr == SPRN_XER || spr == SPRN_LR ||
1661 * Compare instructions
1664 val = regs->gpr[ra];
1665 val2 = regs->gpr[rb];
1666 #ifdef __powerpc64__
1667 if ((rd & 1) == 0) {
1668 /* word (32-bit) compare */
1673 do_cmp_signed(regs, op, val, val2, rd >> 2);
1677 val = regs->gpr[ra];
1678 val2 = regs->gpr[rb];
1679 #ifdef __powerpc64__
1680 if ((rd & 1) == 0) {
1681 /* word (32-bit) compare */
1682 val = (unsigned int) val;
1683 val2 = (unsigned int) val2;
1686 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1689 case 508: /* cmpb */
1690 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1691 goto logical_done_nocc;
1694 * Arithmetic instructions
1697 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1700 #ifdef __powerpc64__
1701 case 9: /* mulhdu */
1702 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1703 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1707 add_with_carry(regs, op, rd, regs->gpr[ra],
1711 case 11: /* mulhwu */
1712 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1713 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1717 op->val = regs->gpr[rb] - regs->gpr[ra];
1719 #ifdef __powerpc64__
1720 case 73: /* mulhd */
1721 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1722 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1725 case 75: /* mulhw */
1726 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1727 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1731 op->val = -regs->gpr[ra];
1734 case 136: /* subfe */
1735 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1736 regs->gpr[rb], regs->xer & XER_CA);
1739 case 138: /* adde */
1740 add_with_carry(regs, op, rd, regs->gpr[ra],
1741 regs->gpr[rb], regs->xer & XER_CA);
1744 case 200: /* subfze */
1745 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1746 regs->xer & XER_CA);
1749 case 202: /* addze */
1750 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1751 regs->xer & XER_CA);
1754 case 232: /* subfme */
1755 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1756 regs->xer & XER_CA);
1758 #ifdef __powerpc64__
1759 case 233: /* mulld */
1760 op->val = regs->gpr[ra] * regs->gpr[rb];
1763 case 234: /* addme */
1764 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1765 regs->xer & XER_CA);
1768 case 235: /* mullw */
1769 op->val = (long)(int) regs->gpr[ra] *
1770 (int) regs->gpr[rb];
1773 #ifdef __powerpc64__
1774 case 265: /* modud */
1775 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1777 op->val = regs->gpr[ra] % regs->gpr[rb];
1781 op->val = regs->gpr[ra] + regs->gpr[rb];
1784 case 267: /* moduw */
1785 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1787 op->val = (unsigned int) regs->gpr[ra] %
1788 (unsigned int) regs->gpr[rb];
1790 #ifdef __powerpc64__
1791 case 457: /* divdu */
1792 op->val = regs->gpr[ra] / regs->gpr[rb];
1795 case 459: /* divwu */
1796 op->val = (unsigned int) regs->gpr[ra] /
1797 (unsigned int) regs->gpr[rb];
1799 #ifdef __powerpc64__
1800 case 489: /* divd */
1801 op->val = (long int) regs->gpr[ra] /
1802 (long int) regs->gpr[rb];
1805 case 491: /* divw */
1806 op->val = (int) regs->gpr[ra] /
1807 (int) regs->gpr[rb];
1810 case 755: /* darn */
1811 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1815 /* 32-bit conditioned */
1816 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1820 /* 64-bit conditioned */
1821 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1826 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1831 #ifdef __powerpc64__
1832 case 777: /* modsd */
1833 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1835 op->val = (long int) regs->gpr[ra] %
1836 (long int) regs->gpr[rb];
1839 case 779: /* modsw */
1840 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1842 op->val = (int) regs->gpr[ra] %
1843 (int) regs->gpr[rb];
1848 * Logical instructions
1850 case 26: /* cntlzw */
1851 val = (unsigned int) regs->gpr[rd];
1852 op->val = ( val ? __builtin_clz(val) : 32 );
1854 #ifdef __powerpc64__
1855 case 58: /* cntlzd */
1856 val = regs->gpr[rd];
1857 op->val = ( val ? __builtin_clzl(val) : 64 );
1861 op->val = regs->gpr[rd] & regs->gpr[rb];
1865 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1868 case 122: /* popcntb */
1869 do_popcnt(regs, op, regs->gpr[rd], 8);
1870 goto logical_done_nocc;
1873 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1876 case 154: /* prtyw */
1877 do_prty(regs, op, regs->gpr[rd], 32);
1878 goto logical_done_nocc;
1880 case 186: /* prtyd */
1881 do_prty(regs, op, regs->gpr[rd], 64);
1882 goto logical_done_nocc;
1884 case 252: /* bpermd */
1885 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1886 goto logical_done_nocc;
1889 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1893 op->val = regs->gpr[rd] ^ regs->gpr[rb];
1896 case 378: /* popcntw */
1897 do_popcnt(regs, op, regs->gpr[rd], 32);
1898 goto logical_done_nocc;
1901 op->val = regs->gpr[rd] | ~regs->gpr[rb];
1905 op->val = regs->gpr[rd] | regs->gpr[rb];
1908 case 476: /* nand */
1909 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1912 case 506: /* popcntd */
1913 do_popcnt(regs, op, regs->gpr[rd], 64);
1914 goto logical_done_nocc;
1916 case 538: /* cnttzw */
1917 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1919 val = (unsigned int) regs->gpr[rd];
1920 op->val = (val ? __builtin_ctz(val) : 32);
1922 #ifdef __powerpc64__
1923 case 570: /* cnttzd */
1924 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1926 val = regs->gpr[rd];
1927 op->val = (val ? __builtin_ctzl(val) : 64);
1930 case 922: /* extsh */
1931 op->val = (signed short) regs->gpr[rd];
1934 case 954: /* extsb */
1935 op->val = (signed char) regs->gpr[rd];
1937 #ifdef __powerpc64__
1938 case 986: /* extsw */
1939 op->val = (signed int) regs->gpr[rd];
1944 * Shift instructions
1947 sh = regs->gpr[rb] & 0x3f;
1949 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1955 sh = regs->gpr[rb] & 0x3f;
1957 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1962 case 792: /* sraw */
1963 op->type = COMPUTE + SETREG + SETXER;
1964 sh = regs->gpr[rb] & 0x3f;
1965 ival = (signed int) regs->gpr[rd];
1966 op->val = ival >> (sh < 32 ? sh : 31);
1967 op->xerval = regs->xer;
1968 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1969 op->xerval |= XER_CA;
1971 op->xerval &= ~XER_CA;
1972 set_ca32(op, op->xerval & XER_CA);
1975 case 824: /* srawi */
1976 op->type = COMPUTE + SETREG + SETXER;
1978 ival = (signed int) regs->gpr[rd];
1979 op->val = ival >> sh;
1980 op->xerval = regs->xer;
1981 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1982 op->xerval |= XER_CA;
1984 op->xerval &= ~XER_CA;
1985 set_ca32(op, op->xerval & XER_CA);
1988 #ifdef __powerpc64__
1990 sh = regs->gpr[rb] & 0x7f;
1992 op->val = regs->gpr[rd] << sh;
1998 sh = regs->gpr[rb] & 0x7f;
2000 op->val = regs->gpr[rd] >> sh;
2005 case 794: /* srad */
2006 op->type = COMPUTE + SETREG + SETXER;
2007 sh = regs->gpr[rb] & 0x7f;
2008 ival = (signed long int) regs->gpr[rd];
2009 op->val = ival >> (sh < 64 ? sh : 63);
2010 op->xerval = regs->xer;
2011 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2012 op->xerval |= XER_CA;
2014 op->xerval &= ~XER_CA;
2015 set_ca32(op, op->xerval & XER_CA);
2018 case 826: /* sradi with sh_5 = 0 */
2019 case 827: /* sradi with sh_5 = 1 */
2020 op->type = COMPUTE + SETREG + SETXER;
2021 sh = rb | ((word & 2) << 4);
2022 ival = (signed long int) regs->gpr[rd];
2023 op->val = ival >> sh;
2024 op->xerval = regs->xer;
2025 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2026 op->xerval |= XER_CA;
2028 op->xerval &= ~XER_CA;
2029 set_ca32(op, op->xerval & XER_CA);
2032 case 890: /* extswsli with sh_5 = 0 */
2033 case 891: /* extswsli with sh_5 = 1 */
2034 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2036 op->type = COMPUTE + SETREG;
2037 sh = rb | ((word & 2) << 4);
2038 val = (signed int) regs->gpr[rd];
2040 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2045 #endif /* __powerpc64__ */
2048 * Cache instructions
2050 case 54: /* dcbst */
2051 op->type = MKOP(CACHEOP, DCBST, 0);
2052 op->ea = xform_ea(word, regs);
2056 op->type = MKOP(CACHEOP, DCBF, 0);
2057 op->ea = xform_ea(word, regs);
2060 case 246: /* dcbtst */
2061 op->type = MKOP(CACHEOP, DCBTST, 0);
2062 op->ea = xform_ea(word, regs);
2066 case 278: /* dcbt */
2067 op->type = MKOP(CACHEOP, DCBTST, 0);
2068 op->ea = xform_ea(word, regs);
2072 case 982: /* icbi */
2073 op->type = MKOP(CACHEOP, ICBI, 0);
2074 op->ea = xform_ea(word, regs);
2077 case 1014: /* dcbz */
2078 op->type = MKOP(CACHEOP, DCBZ, 0);
2079 op->ea = xform_ea(word, regs);
2089 op->update_reg = ra;
2091 op->val = regs->gpr[rd];
2092 u = (word >> 20) & UPDATE;
2098 op->ea = xform_ea(word, regs);
2099 switch ((word >> 1) & 0x3ff) {
2100 case 20: /* lwarx */
2101 op->type = MKOP(LARX, 0, 4);
2104 case 150: /* stwcx. */
2105 op->type = MKOP(STCX, 0, 4);
2108 #ifdef __powerpc64__
2109 case 84: /* ldarx */
2110 op->type = MKOP(LARX, 0, 8);
2113 case 214: /* stdcx. */
2114 op->type = MKOP(STCX, 0, 8);
2117 case 52: /* lbarx */
2118 op->type = MKOP(LARX, 0, 1);
2121 case 694: /* stbcx. */
2122 op->type = MKOP(STCX, 0, 1);
2125 case 116: /* lharx */
2126 op->type = MKOP(LARX, 0, 2);
2129 case 726: /* sthcx. */
2130 op->type = MKOP(STCX, 0, 2);
2133 case 276: /* lqarx */
2134 if (!((rd & 1) || rd == ra || rd == rb))
2135 op->type = MKOP(LARX, 0, 16);
2138 case 182: /* stqcx. */
2140 op->type = MKOP(STCX, 0, 16);
2145 case 55: /* lwzux */
2146 op->type = MKOP(LOAD, u, 4);
2150 case 119: /* lbzux */
2151 op->type = MKOP(LOAD, u, 1);
2154 #ifdef CONFIG_ALTIVEC
2156 * Note: for the load/store vector element instructions,
2157 * bits of the EA say which field of the VMX register to use.
2160 op->type = MKOP(LOAD_VMX, 0, 1);
2161 op->element_size = 1;
2164 case 39: /* lvehx */
2165 op->type = MKOP(LOAD_VMX, 0, 2);
2166 op->element_size = 2;
2169 case 71: /* lvewx */
2170 op->type = MKOP(LOAD_VMX, 0, 4);
2171 op->element_size = 4;
2175 case 359: /* lvxl */
2176 op->type = MKOP(LOAD_VMX, 0, 16);
2177 op->element_size = 16;
2180 case 135: /* stvebx */
2181 op->type = MKOP(STORE_VMX, 0, 1);
2182 op->element_size = 1;
2185 case 167: /* stvehx */
2186 op->type = MKOP(STORE_VMX, 0, 2);
2187 op->element_size = 2;
2190 case 199: /* stvewx */
2191 op->type = MKOP(STORE_VMX, 0, 4);
2192 op->element_size = 4;
2195 case 231: /* stvx */
2196 case 487: /* stvxl */
2197 op->type = MKOP(STORE_VMX, 0, 16);
2199 #endif /* CONFIG_ALTIVEC */
2201 #ifdef __powerpc64__
2204 op->type = MKOP(LOAD, u, 8);
2207 case 149: /* stdx */
2208 case 181: /* stdux */
2209 op->type = MKOP(STORE, u, 8);
2213 case 151: /* stwx */
2214 case 183: /* stwux */
2215 op->type = MKOP(STORE, u, 4);
2218 case 215: /* stbx */
2219 case 247: /* stbux */
2220 op->type = MKOP(STORE, u, 1);
2223 case 279: /* lhzx */
2224 case 311: /* lhzux */
2225 op->type = MKOP(LOAD, u, 2);
2228 #ifdef __powerpc64__
2229 case 341: /* lwax */
2230 case 373: /* lwaux */
2231 op->type = MKOP(LOAD, SIGNEXT | u, 4);
2235 case 343: /* lhax */
2236 case 375: /* lhaux */
2237 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2240 case 407: /* sthx */
2241 case 439: /* sthux */
2242 op->type = MKOP(STORE, u, 2);
2245 #ifdef __powerpc64__
2246 case 532: /* ldbrx */
2247 op->type = MKOP(LOAD, BYTEREV, 8);
2251 case 533: /* lswx */
2252 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2255 case 534: /* lwbrx */
2256 op->type = MKOP(LOAD, BYTEREV, 4);
2259 case 597: /* lswi */
2261 rb = 32; /* # bytes to load */
2262 op->type = MKOP(LOAD_MULTI, 0, rb);
2263 op->ea = ra ? regs->gpr[ra] : 0;
2266 #ifdef CONFIG_PPC_FPU
2267 case 535: /* lfsx */
2268 case 567: /* lfsux */
2269 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2272 case 599: /* lfdx */
2273 case 631: /* lfdux */
2274 op->type = MKOP(LOAD_FP, u, 8);
2277 case 663: /* stfsx */
2278 case 695: /* stfsux */
2279 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2282 case 727: /* stfdx */
2283 case 759: /* stfdux */
2284 op->type = MKOP(STORE_FP, u, 8);
2287 #ifdef __powerpc64__
2288 case 791: /* lfdpx */
2289 op->type = MKOP(LOAD_FP, 0, 16);
2292 case 855: /* lfiwax */
2293 op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2296 case 887: /* lfiwzx */
2297 op->type = MKOP(LOAD_FP, 0, 4);
2300 case 919: /* stfdpx */
2301 op->type = MKOP(STORE_FP, 0, 16);
2304 case 983: /* stfiwx */
2305 op->type = MKOP(STORE_FP, 0, 4);
2307 #endif /* __powerpc64 */
2308 #endif /* CONFIG_PPC_FPU */
2310 #ifdef __powerpc64__
2311 case 660: /* stdbrx */
2312 op->type = MKOP(STORE, BYTEREV, 8);
2313 op->val = byterev_8(regs->gpr[rd]);
2317 case 661: /* stswx */
2318 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2321 case 662: /* stwbrx */
2322 op->type = MKOP(STORE, BYTEREV, 4);
2323 op->val = byterev_4(regs->gpr[rd]);
2326 case 725: /* stswi */
2328 rb = 32; /* # bytes to store */
2329 op->type = MKOP(STORE_MULTI, 0, rb);
2330 op->ea = ra ? regs->gpr[ra] : 0;
2333 case 790: /* lhbrx */
2334 op->type = MKOP(LOAD, BYTEREV, 2);
2337 case 918: /* sthbrx */
2338 op->type = MKOP(STORE, BYTEREV, 2);
2339 op->val = byterev_2(regs->gpr[rd]);
2343 case 12: /* lxsiwzx */
2344 op->reg = rd | ((word & 1) << 5);
2345 op->type = MKOP(LOAD_VSX, 0, 4);
2346 op->element_size = 8;
2349 case 76: /* lxsiwax */
2350 op->reg = rd | ((word & 1) << 5);
2351 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2352 op->element_size = 8;
2355 case 140: /* stxsiwx */
2356 op->reg = rd | ((word & 1) << 5);
2357 op->type = MKOP(STORE_VSX, 0, 4);
2358 op->element_size = 8;
2361 case 268: /* lxvx */
2362 op->reg = rd | ((word & 1) << 5);
2363 op->type = MKOP(LOAD_VSX, 0, 16);
2364 op->element_size = 16;
2365 op->vsx_flags = VSX_CHECK_VEC;
2368 case 269: /* lxvl */
2369 case 301: { /* lxvll */
2371 op->reg = rd | ((word & 1) << 5);
2372 op->ea = ra ? regs->gpr[ra] : 0;
2373 nb = regs->gpr[rb] & 0xff;
2376 op->type = MKOP(LOAD_VSX, 0, nb);
2377 op->element_size = 16;
2378 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2382 case 332: /* lxvdsx */
2383 op->reg = rd | ((word & 1) << 5);
2384 op->type = MKOP(LOAD_VSX, 0, 8);
2385 op->element_size = 8;
2386 op->vsx_flags = VSX_SPLAT;
2389 case 364: /* lxvwsx */
2390 op->reg = rd | ((word & 1) << 5);
2391 op->type = MKOP(LOAD_VSX, 0, 4);
2392 op->element_size = 4;
2393 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2396 case 396: /* stxvx */
2397 op->reg = rd | ((word & 1) << 5);
2398 op->type = MKOP(STORE_VSX, 0, 16);
2399 op->element_size = 16;
2400 op->vsx_flags = VSX_CHECK_VEC;
2403 case 397: /* stxvl */
2404 case 429: { /* stxvll */
2406 op->reg = rd | ((word & 1) << 5);
2407 op->ea = ra ? regs->gpr[ra] : 0;
2408 nb = regs->gpr[rb] & 0xff;
2411 op->type = MKOP(STORE_VSX, 0, nb);
2412 op->element_size = 16;
2413 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2417 case 524: /* lxsspx */
2418 op->reg = rd | ((word & 1) << 5);
2419 op->type = MKOP(LOAD_VSX, 0, 4);
2420 op->element_size = 8;
2421 op->vsx_flags = VSX_FPCONV;
2424 case 588: /* lxsdx */
2425 op->reg = rd | ((word & 1) << 5);
2426 op->type = MKOP(LOAD_VSX, 0, 8);
2427 op->element_size = 8;
2430 case 652: /* stxsspx */
2431 op->reg = rd | ((word & 1) << 5);
2432 op->type = MKOP(STORE_VSX, 0, 4);
2433 op->element_size = 8;
2434 op->vsx_flags = VSX_FPCONV;
2437 case 716: /* stxsdx */
2438 op->reg = rd | ((word & 1) << 5);
2439 op->type = MKOP(STORE_VSX, 0, 8);
2440 op->element_size = 8;
2443 case 780: /* lxvw4x */
2444 op->reg = rd | ((word & 1) << 5);
2445 op->type = MKOP(LOAD_VSX, 0, 16);
2446 op->element_size = 4;
2449 case 781: /* lxsibzx */
2450 op->reg = rd | ((word & 1) << 5);
2451 op->type = MKOP(LOAD_VSX, 0, 1);
2452 op->element_size = 8;
2453 op->vsx_flags = VSX_CHECK_VEC;
2456 case 812: /* lxvh8x */
2457 op->reg = rd | ((word & 1) << 5);
2458 op->type = MKOP(LOAD_VSX, 0, 16);
2459 op->element_size = 2;
2460 op->vsx_flags = VSX_CHECK_VEC;
2463 case 813: /* lxsihzx */
2464 op->reg = rd | ((word & 1) << 5);
2465 op->type = MKOP(LOAD_VSX, 0, 2);
2466 op->element_size = 8;
2467 op->vsx_flags = VSX_CHECK_VEC;
2470 case 844: /* lxvd2x */
2471 op->reg = rd | ((word & 1) << 5);
2472 op->type = MKOP(LOAD_VSX, 0, 16);
2473 op->element_size = 8;
2476 case 876: /* lxvb16x */
2477 op->reg = rd | ((word & 1) << 5);
2478 op->type = MKOP(LOAD_VSX, 0, 16);
2479 op->element_size = 1;
2480 op->vsx_flags = VSX_CHECK_VEC;
2483 case 908: /* stxvw4x */
2484 op->reg = rd | ((word & 1) << 5);
2485 op->type = MKOP(STORE_VSX, 0, 16);
2486 op->element_size = 4;
2489 case 909: /* stxsibx */
2490 op->reg = rd | ((word & 1) << 5);
2491 op->type = MKOP(STORE_VSX, 0, 1);
2492 op->element_size = 8;
2493 op->vsx_flags = VSX_CHECK_VEC;
2496 case 940: /* stxvh8x */
2497 op->reg = rd | ((word & 1) << 5);
2498 op->type = MKOP(STORE_VSX, 0, 16);
2499 op->element_size = 2;
2500 op->vsx_flags = VSX_CHECK_VEC;
2503 case 941: /* stxsihx */
2504 op->reg = rd | ((word & 1) << 5);
2505 op->type = MKOP(STORE_VSX, 0, 2);
2506 op->element_size = 8;
2507 op->vsx_flags = VSX_CHECK_VEC;
2510 case 972: /* stxvd2x */
2511 op->reg = rd | ((word & 1) << 5);
2512 op->type = MKOP(STORE_VSX, 0, 16);
2513 op->element_size = 8;
2516 case 1004: /* stxvb16x */
2517 op->reg = rd | ((word & 1) << 5);
2518 op->type = MKOP(STORE_VSX, 0, 16);
2519 op->element_size = 1;
2520 op->vsx_flags = VSX_CHECK_VEC;
2523 #endif /* CONFIG_VSX */
2529 op->type = MKOP(LOAD, u, 4);
2530 op->ea = dform_ea(word, regs);
2535 op->type = MKOP(LOAD, u, 1);
2536 op->ea = dform_ea(word, regs);
2541 op->type = MKOP(STORE, u, 4);
2542 op->ea = dform_ea(word, regs);
2547 op->type = MKOP(STORE, u, 1);
2548 op->ea = dform_ea(word, regs);
2553 op->type = MKOP(LOAD, u, 2);
2554 op->ea = dform_ea(word, regs);
2559 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2560 op->ea = dform_ea(word, regs);
2565 op->type = MKOP(STORE, u, 2);
2566 op->ea = dform_ea(word, regs);
2571 break; /* invalid form, ra in range to load */
2572 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2573 op->ea = dform_ea(word, regs);
2577 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2578 op->ea = dform_ea(word, regs);
2581 #ifdef CONFIG_PPC_FPU
2584 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2585 op->ea = dform_ea(word, regs);
2590 op->type = MKOP(LOAD_FP, u, 8);
2591 op->ea = dform_ea(word, regs);
2595 case 53: /* stfsu */
2596 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2597 op->ea = dform_ea(word, regs);
2601 case 55: /* stfdu */
2602 op->type = MKOP(STORE_FP, u, 8);
2603 op->ea = dform_ea(word, regs);
2607 #ifdef __powerpc64__
2609 if (!((rd & 1) || (rd == ra)))
2610 op->type = MKOP(LOAD, 0, 16);
2611 op->ea = dqform_ea(word, regs);
2616 case 57: /* lfdp, lxsd, lxssp */
2617 op->ea = dsform_ea(word, regs);
2621 break; /* reg must be even */
2622 op->type = MKOP(LOAD_FP, 0, 16);
2626 op->type = MKOP(LOAD_VSX, 0, 8);
2627 op->element_size = 8;
2628 op->vsx_flags = VSX_CHECK_VEC;
2632 op->type = MKOP(LOAD_VSX, 0, 4);
2633 op->element_size = 8;
2634 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2638 #endif /* CONFIG_VSX */
2640 #ifdef __powerpc64__
2641 case 58: /* ld[u], lwa */
2642 op->ea = dsform_ea(word, regs);
2645 op->type = MKOP(LOAD, 0, 8);
2648 op->type = MKOP(LOAD, UPDATE, 8);
2651 op->type = MKOP(LOAD, SIGNEXT, 4);
2658 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2660 case 0: /* stfdp with LSB of DS field = 0 */
2661 case 4: /* stfdp with LSB of DS field = 1 */
2662 op->ea = dsform_ea(word, regs);
2663 op->type = MKOP(STORE_FP, 0, 16);
2667 op->ea = dqform_ea(word, regs);
2670 op->type = MKOP(LOAD_VSX, 0, 16);
2671 op->element_size = 16;
2672 op->vsx_flags = VSX_CHECK_VEC;
2675 case 2: /* stxsd with LSB of DS field = 0 */
2676 case 6: /* stxsd with LSB of DS field = 1 */
2677 op->ea = dsform_ea(word, regs);
2679 op->type = MKOP(STORE_VSX, 0, 8);
2680 op->element_size = 8;
2681 op->vsx_flags = VSX_CHECK_VEC;
2684 case 3: /* stxssp with LSB of DS field = 0 */
2685 case 7: /* stxssp with LSB of DS field = 1 */
2686 op->ea = dsform_ea(word, regs);
2688 op->type = MKOP(STORE_VSX, 0, 4);
2689 op->element_size = 8;
2690 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2694 op->ea = dqform_ea(word, regs);
2697 op->type = MKOP(STORE_VSX, 0, 16);
2698 op->element_size = 16;
2699 op->vsx_flags = VSX_CHECK_VEC;
2703 #endif /* CONFIG_VSX */
2705 #ifdef __powerpc64__
2706 case 62: /* std[u] */
2707 op->ea = dsform_ea(word, regs);
2710 op->type = MKOP(STORE, 0, 8);
2713 op->type = MKOP(STORE, UPDATE, 8);
2717 op->type = MKOP(STORE, 0, 16);
2721 case 1: /* Prefixed instructions */
2722 prefix_r = word & (1ul << 20);
2723 ra = (suffix >> 16) & 0x1f;
2724 op->update_reg = ra;
2725 rd = (suffix >> 21) & 0x1f;
2727 op->val = regs->gpr[rd];
2729 suffixopcode = get_op(suffix);
2730 prefixtype = (word >> 24) & 0x3;
2731 switch (prefixtype) {
2732 case 0: /* Type 00 Eight-Byte Load/Store */
2735 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2736 switch (suffixopcode) {
2738 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2740 case 42: /* plxsd */
2742 op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2743 op->element_size = 8;
2744 op->vsx_flags = VSX_CHECK_VEC;
2746 case 43: /* plxssp */
2748 op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2749 op->element_size = 8;
2750 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2752 case 46: /* pstxsd */
2754 op->type = MKOP(STORE_VSX, PREFIXED, 8);
2755 op->element_size = 8;
2756 op->vsx_flags = VSX_CHECK_VEC;
2758 case 47: /* pstxssp */
2760 op->type = MKOP(STORE_VSX, PREFIXED, 4);
2761 op->element_size = 8;
2762 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2764 case 51: /* plxv1 */
2767 case 50: /* plxv0 */
2768 op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2769 op->element_size = 16;
2770 op->vsx_flags = VSX_CHECK_VEC;
2772 case 55: /* pstxv1 */
2775 case 54: /* pstxv0 */
2776 op->type = MKOP(STORE_VSX, PREFIXED, 16);
2777 op->element_size = 16;
2778 op->vsx_flags = VSX_CHECK_VEC;
2781 op->type = MKOP(LOAD, PREFIXED, 16);
2784 op->type = MKOP(LOAD, PREFIXED, 8);
2787 op->type = MKOP(STORE, PREFIXED, 16);
2790 op->type = MKOP(STORE, PREFIXED, 8);
2794 case 1: /* Type 01 Eight-Byte Register-to-Register */
2796 case 2: /* Type 10 Modified Load/Store */
2799 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2800 switch (suffixopcode) {
2802 op->type = MKOP(LOAD, PREFIXED, 4);
2805 op->type = MKOP(LOAD, PREFIXED, 1);
2808 op->type = MKOP(STORE, PREFIXED, 4);
2811 op->type = MKOP(STORE, PREFIXED, 1);
2814 op->type = MKOP(LOAD, PREFIXED, 2);
2817 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
2820 op->type = MKOP(STORE, PREFIXED, 2);
2823 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
2826 op->type = MKOP(LOAD_FP, PREFIXED, 8);
2828 case 52: /* pstfs */
2829 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
2831 case 54: /* pstfd */
2832 op->type = MKOP(STORE_FP, PREFIXED, 8);
2836 case 3: /* Type 11 Modified Register-to-Register */
2839 #endif /* __powerpc64__ */
2844 if ((GETTYPE(op->type) == LOAD_VSX ||
2845 GETTYPE(op->type) == STORE_VSX) &&
2846 !cpu_has_feature(CPU_FTR_VSX)) {
2849 #endif /* CONFIG_VSX */
2870 op->type = INTERRUPT | 0x700;
2871 op->val = SRR1_PROGPRIV;
2875 op->type = INTERRUPT | 0x700;
2876 op->val = SRR1_PROGTRAP;
2879 EXPORT_SYMBOL_GPL(analyse_instr);
2880 NOKPROBE_SYMBOL(analyse_instr);
2883 * For PPC32 we always use stwu with r1 to change the stack pointer.
2884 * So this emulated store may corrupt the exception frame, now we
2885 * have to provide the exception frame trampoline, which is pushed
2886 * below the kprobed function stack. So we only update gpr[1] but
2887 * don't emulate the real store operation. We will do real store
2888 * operation safely in exception return code by checking this flag.
2890 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2894 * Check if we will touch kernel stack overflow
2896 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2897 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2900 #endif /* CONFIG_PPC32 */
2902 * Check if we already set since that means we'll
2903 * lose the previous value.
2905 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2906 set_thread_flag(TIF_EMULATE_STACK_STORE);
2910 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2914 *valp = (signed short) *valp;
2917 *valp = (signed int) *valp;
2922 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2926 *valp = byterev_2(*valp);
2929 *valp = byterev_4(*valp);
2931 #ifdef __powerpc64__
2933 *valp = byterev_8(*valp);
2940 * Emulate an instruction that can be executed just by updating
2943 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2945 unsigned long next_pc;
2947 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
2948 switch (GETTYPE(op->type)) {
2950 if (op->type & SETREG)
2951 regs->gpr[op->reg] = op->val;
2952 if (op->type & SETCC)
2953 regs->ccr = op->ccval;
2954 if (op->type & SETXER)
2955 regs->xer = op->xerval;
2959 if (op->type & SETLK)
2960 regs->link = next_pc;
2961 if (op->type & BRTAKEN)
2963 if (op->type & DECCTR)
2968 switch (op->type & BARRIER_MASK) {
2978 case BARRIER_LWSYNC:
2979 asm volatile("lwsync" : : : "memory");
2981 case BARRIER_PTESYNC:
2982 asm volatile("ptesync" : : : "memory");
2990 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
2993 regs->gpr[op->reg] = regs->link;
2996 regs->gpr[op->reg] = regs->ctr;
3006 regs->xer = op->val & 0xffffffffUL;
3009 regs->link = op->val;
3012 regs->ctr = op->val;
3022 regs->nip = next_pc;
3024 NOKPROBE_SYMBOL(emulate_update_regs);
3027 * Emulate a previously-analysed load or store instruction.
3028 * Return values are:
3029 * 0 = instruction emulated successfully
3030 * -EFAULT = address out of range or access faulted (regs->dar
3031 * contains the faulting address)
3032 * -EACCES = misaligned access, instruction requires alignment
3033 * -EINVAL = unknown operation in *op
3035 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3037 int err, size, type;
3045 size = GETSIZE(op->type);
3046 type = GETTYPE(op->type);
3047 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3048 ea = truncate_if_32bit(regs->msr, op->ea);
3052 if (ea & (size - 1))
3053 return -EACCES; /* can't handle misaligned */
3054 if (!address_ok(regs, ea, size))
3059 #ifdef __powerpc64__
3061 __get_user_asmx(val, ea, err, "lbarx");
3064 __get_user_asmx(val, ea, err, "lharx");
3068 __get_user_asmx(val, ea, err, "lwarx");
3070 #ifdef __powerpc64__
3072 __get_user_asmx(val, ea, err, "ldarx");
3075 err = do_lqarx(ea, ®s->gpr[op->reg]);
3086 regs->gpr[op->reg] = val;
3090 if (ea & (size - 1))
3091 return -EACCES; /* can't handle misaligned */
3092 if (!address_ok(regs, ea, size))
3096 #ifdef __powerpc64__
3098 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3101 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3105 __put_user_asmx(op->val, ea, err, "stwcx.", cr);
3107 #ifdef __powerpc64__
3109 __put_user_asmx(op->val, ea, err, "stdcx.", cr);
3112 err = do_stqcx(ea, regs->gpr[op->reg],
3113 regs->gpr[op->reg + 1], &cr);
3120 regs->ccr = (regs->ccr & 0x0fffffff) |
3122 ((regs->xer >> 3) & 0x10000000);
3128 #ifdef __powerpc64__
3130 err = emulate_lq(regs, ea, op->reg, cross_endian);
3134 err = read_mem(®s->gpr[op->reg], ea, size, regs);
3136 if (op->type & SIGNEXT)
3137 do_signext(®s->gpr[op->reg], size);
3138 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3139 do_byterev(®s->gpr[op->reg], size);
3143 #ifdef CONFIG_PPC_FPU
3146 * If the instruction is in userspace, we can emulate it even
3147 * if the VMX state is not live, because we have the state
3148 * stored in the thread_struct. If the instruction is in
3149 * the kernel, we must not touch the state in the thread_struct.
3151 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3153 err = do_fp_load(op, ea, regs, cross_endian);
3156 #ifdef CONFIG_ALTIVEC
3158 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3160 err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3165 unsigned long msrbit = MSR_VSX;
3168 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3169 * when the target of the instruction is a vector register.
3171 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3173 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3175 err = do_vsx_load(op, ea, regs, cross_endian);
3180 if (!address_ok(regs, ea, size))
3183 for (i = 0; i < size; i += 4) {
3184 unsigned int v32 = 0;
3189 err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3192 if (unlikely(cross_endian))
3193 v32 = byterev_4(v32);
3194 regs->gpr[rd] = v32;
3196 /* reg number wraps from 31 to 0 for lsw[ix] */
3197 rd = (rd + 1) & 0x1f;
3202 #ifdef __powerpc64__
3204 err = emulate_stq(regs, ea, op->reg, cross_endian);
3208 if ((op->type & UPDATE) && size == sizeof(long) &&
3209 op->reg == 1 && op->update_reg == 1 &&
3210 !(regs->msr & MSR_PR) &&
3211 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3212 err = handle_stack_update(ea, regs);
3215 if (unlikely(cross_endian))
3216 do_byterev(&op->val, size);
3217 err = write_mem(op->val, ea, size, regs);
3220 #ifdef CONFIG_PPC_FPU
3222 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3224 err = do_fp_store(op, ea, regs, cross_endian);
3227 #ifdef CONFIG_ALTIVEC
3229 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3231 err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3236 unsigned long msrbit = MSR_VSX;
3239 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3240 * when the target of the instruction is a vector register.
3242 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3244 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3246 err = do_vsx_store(op, ea, regs, cross_endian);
3251 if (!address_ok(regs, ea, size))
3254 for (i = 0; i < size; i += 4) {
3255 unsigned int v32 = regs->gpr[rd];
3260 if (unlikely(cross_endian))
3261 v32 = byterev_4(v32);
3262 err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3266 /* reg number wraps from 31 to 0 for stsw[ix] */
3267 rd = (rd + 1) & 0x1f;
3278 if (op->type & UPDATE)
3279 regs->gpr[op->update_reg] = op->ea;
3283 NOKPROBE_SYMBOL(emulate_loadstore);
3286 * Emulate instructions that cause a transfer of control,
3287 * loads and stores, and a few other instructions.
3288 * Returns 1 if the step was emulated, 0 if not,
3289 * or -1 if the instruction is one that should not be stepped,
3290 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3292 int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3294 struct instruction_op op;
3299 r = analyse_instr(&op, regs, instr);
3303 emulate_update_regs(regs, &op);
3308 type = GETTYPE(op.type);
3310 if (OP_IS_LOAD_STORE(type)) {
3311 err = emulate_loadstore(regs, &op);
3319 ea = truncate_if_32bit(regs->msr, op.ea);
3320 if (!address_ok(regs, ea, 8))
3322 switch (op.type & CACHEOP_MASK) {
3324 __cacheop_user_asmx(ea, err, "dcbst");
3327 __cacheop_user_asmx(ea, err, "dcbf");
3331 prefetchw((void *) ea);
3335 prefetch((void *) ea);
3338 __cacheop_user_asmx(ea, err, "icbi");
3341 err = emulate_dcbz(ea, regs);
3351 regs->gpr[op.reg] = regs->msr & MSR_MASK;
3355 val = regs->gpr[op.reg];
3356 if ((val & MSR_RI) == 0)
3357 /* can't step mtmsr[d] that would clear MSR_RI */
3359 /* here op.val is the mask of bits to change */
3360 regs->msr = (regs->msr & ~op.val) | (val & op.val);
3364 case SYSCALL: /* sc */
3366 * N.B. this uses knowledge about how the syscall
3367 * entry code works. If that is changed, this will
3368 * need to be changed also.
3370 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3371 cpu_has_feature(CPU_FTR_REAL_LE) &&
3372 regs->gpr[0] == 0x1ebe) {
3373 regs->msr ^= MSR_LE;
3376 regs->gpr[9] = regs->gpr[13];
3377 regs->gpr[10] = MSR_KERNEL;
3378 regs->gpr[11] = regs->nip + 4;
3379 regs->gpr[12] = regs->msr & MSR_MASK;
3380 regs->gpr[13] = (unsigned long) get_paca();
3381 regs->nip = (unsigned long) &system_call_common;
3382 regs->msr = MSR_KERNEL;
3385 #ifdef CONFIG_PPC64_BOOK3S
3386 case SYSCALL_VECTORED_0: /* scv 0 */
3387 regs->gpr[9] = regs->gpr[13];
3388 regs->gpr[10] = MSR_KERNEL;
3389 regs->gpr[11] = regs->nip + 4;
3390 regs->gpr[12] = regs->msr & MSR_MASK;
3391 regs->gpr[13] = (unsigned long) get_paca();
3392 regs->nip = (unsigned long) &system_call_vectored_emulate;
3393 regs->msr = MSR_KERNEL;
3404 regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
3407 NOKPROBE_SYMBOL(emulate_step);