1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
18 extern char system_call_common[];
19 extern char system_call_vectored_emulate[];
22 /* Bits in SRR1 that are copied from MSR */
23 #define MSR_MASK 0xffffffff87c0ffffUL
25 #define MSR_MASK 0x87c0ffff
29 #define XER_SO 0x80000000U
30 #define XER_OV 0x40000000U
31 #define XER_CA 0x20000000U
32 #define XER_OV32 0x00080000U
33 #define XER_CA32 0x00040000U
36 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe))
41 * Functions in ldstfp.S
43 extern void get_fpr(int rn, double *p);
44 extern void put_fpr(int rn, const double *p);
45 extern void get_vr(int rn, __vector128 *p);
46 extern void put_vr(int rn, __vector128 *p);
47 extern void load_vsrn(int vsr, const void *p);
48 extern void store_vsrn(int vsr, void *p);
49 extern void conv_sp_to_dp(const float *sp, double *dp);
50 extern void conv_dp_to_sp(const double *dp, float *sp);
57 extern int do_lq(unsigned long ea, unsigned long *regs);
58 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
59 extern int do_lqarx(unsigned long ea, unsigned long *regs);
60 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
64 #ifdef __LITTLE_ENDIAN__
73 * Emulate the truncation of 64 bit values in 32-bit mode.
75 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
79 if ((msr & MSR_64BIT) == 0)
86 * Determine whether a conditional branch instruction would branch.
88 static nokprobe_inline int branch_taken(unsigned int instr,
89 const struct pt_regs *regs,
90 struct instruction_op *op)
92 unsigned int bo = (instr >> 21) & 0x1f;
96 /* decrement counter */
98 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
101 if ((bo & 0x10) == 0) {
102 /* check bit from CR */
103 bi = (instr >> 16) & 0x1f;
104 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
110 static nokprobe_inline long address_ok(struct pt_regs *regs,
111 unsigned long ea, int nb)
113 if (!user_mode(regs))
115 if (__access_ok(ea, nb))
117 if (__access_ok(ea, 1))
118 /* Access overlaps the end of the user region */
119 regs->dar = TASK_SIZE_MAX - 1;
126 * Calculate effective address for a D-form instruction
128 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
129 const struct pt_regs *regs)
134 ra = (instr >> 16) & 0x1f;
135 ea = (signed short) instr; /* sign-extend */
144 * Calculate effective address for a DS-form instruction
146 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
147 const struct pt_regs *regs)
152 ra = (instr >> 16) & 0x1f;
153 ea = (signed short) (instr & ~3); /* sign-extend */
161 * Calculate effective address for a DQ-form instruction
163 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
164 const struct pt_regs *regs)
169 ra = (instr >> 16) & 0x1f;
170 ea = (signed short) (instr & ~0xf); /* sign-extend */
176 #endif /* __powerpc64 */
179 * Calculate effective address for an X-form instruction
181 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
182 const struct pt_regs *regs)
187 ra = (instr >> 16) & 0x1f;
188 rb = (instr >> 11) & 0x1f;
197 * Calculate effective address for a MLS:D-form / 8LS:D-form
198 * prefixed instruction
200 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
202 const struct pt_regs *regs)
206 unsigned long ea, d0, d1, d;
208 prefix_r = GET_PREFIX_R(instr);
209 ra = GET_PREFIX_RA(suffix);
211 d0 = instr & 0x3ffff;
212 d1 = suffix & 0xffff;
216 * sign extend a 34 bit number
218 dd = (unsigned int)(d >> 2);
220 ea = (ea << 2) | (d & 0x3);
224 else if (!prefix_r && !ra)
225 ; /* Leave ea as is */
230 * (prefix_r && ra) is an invalid form. Should already be
231 * checked for by caller!
238 * Return the largest power of 2, not greater than sizeof(unsigned long),
239 * such that x is a multiple of it.
241 static nokprobe_inline unsigned long max_align(unsigned long x)
243 x |= sizeof(unsigned long);
244 return x & -x; /* isolates rightmost bit */
247 static nokprobe_inline unsigned long byterev_2(unsigned long x)
249 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
252 static nokprobe_inline unsigned long byterev_4(unsigned long x)
254 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
255 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
259 static nokprobe_inline unsigned long byterev_8(unsigned long x)
261 return (byterev_4(x) << 32) | byterev_4(x >> 32);
265 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
269 *(u16 *)ptr = byterev_2(*(u16 *)ptr);
272 *(u32 *)ptr = byterev_4(*(u32 *)ptr);
276 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
279 unsigned long *up = (unsigned long *)ptr;
281 tmp = byterev_8(up[0]);
282 up[0] = byterev_8(up[1]);
287 unsigned long *up = (unsigned long *)ptr;
290 tmp = byterev_8(up[0]);
291 up[0] = byterev_8(up[3]);
293 tmp = byterev_8(up[2]);
294 up[2] = byterev_8(up[1]);
305 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
306 unsigned long ea, int nb,
307 struct pt_regs *regs)
314 err = __get_user(x, (unsigned char __user *) ea);
317 err = __get_user(x, (unsigned short __user *) ea);
320 err = __get_user(x, (unsigned int __user *) ea);
324 err = __get_user(x, (unsigned long __user *) ea);
336 * Copy from userspace to a buffer, using the largest possible
337 * aligned accesses, up to sizeof(long).
339 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
340 struct pt_regs *regs)
345 for (; nb > 0; nb -= c) {
351 err = __get_user(*dest, (unsigned char __user *) ea);
354 err = __get_user(*(u16 *)dest,
355 (unsigned short __user *) ea);
358 err = __get_user(*(u32 *)dest,
359 (unsigned int __user *) ea);
363 err = __get_user(*(unsigned long *)dest,
364 (unsigned long __user *) ea);
378 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
379 unsigned long ea, int nb,
380 struct pt_regs *regs)
384 u8 b[sizeof(unsigned long)];
390 i = IS_BE ? sizeof(unsigned long) - nb : 0;
391 err = copy_mem_in(&u.b[i], ea, nb, regs);
398 * Read memory at address ea for nb bytes, return 0 for success
399 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
400 * If nb < sizeof(long), the result is right-justified on BE systems.
402 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
403 struct pt_regs *regs)
405 if (!address_ok(regs, ea, nb))
407 if ((ea & (nb - 1)) == 0)
408 return read_mem_aligned(dest, ea, nb, regs);
409 return read_mem_unaligned(dest, ea, nb, regs);
411 NOKPROBE_SYMBOL(read_mem);
413 static nokprobe_inline int write_mem_aligned(unsigned long val,
414 unsigned long ea, int nb,
415 struct pt_regs *regs)
421 err = __put_user(val, (unsigned char __user *) ea);
424 err = __put_user(val, (unsigned short __user *) ea);
427 err = __put_user(val, (unsigned int __user *) ea);
431 err = __put_user(val, (unsigned long __user *) ea);
441 * Copy from a buffer to userspace, using the largest possible
442 * aligned accesses, up to sizeof(long).
444 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
445 struct pt_regs *regs)
450 for (; nb > 0; nb -= c) {
456 err = __put_user(*dest, (unsigned char __user *) ea);
459 err = __put_user(*(u16 *)dest,
460 (unsigned short __user *) ea);
463 err = __put_user(*(u32 *)dest,
464 (unsigned int __user *) ea);
468 err = __put_user(*(unsigned long *)dest,
469 (unsigned long __user *) ea);
483 static nokprobe_inline int write_mem_unaligned(unsigned long val,
484 unsigned long ea, int nb,
485 struct pt_regs *regs)
489 u8 b[sizeof(unsigned long)];
494 i = IS_BE ? sizeof(unsigned long) - nb : 0;
495 return copy_mem_out(&u.b[i], ea, nb, regs);
499 * Write memory at address ea for nb bytes, return 0 for success
500 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
502 static int write_mem(unsigned long val, unsigned long ea, int nb,
503 struct pt_regs *regs)
505 if (!address_ok(regs, ea, nb))
507 if ((ea & (nb - 1)) == 0)
508 return write_mem_aligned(val, ea, nb, regs);
509 return write_mem_unaligned(val, ea, nb, regs);
511 NOKPROBE_SYMBOL(write_mem);
513 #ifdef CONFIG_PPC_FPU
515 * These access either the real FP register or the image in the
516 * thread_struct, depending on regs->msr & MSR_FP.
518 static int do_fp_load(struct instruction_op *op, unsigned long ea,
519 struct pt_regs *regs, bool cross_endian)
528 u8 b[2 * sizeof(double)];
531 nb = GETSIZE(op->type);
532 if (!address_ok(regs, ea, nb))
535 err = copy_mem_in(u.b, ea, nb, regs);
538 if (unlikely(cross_endian)) {
539 do_byte_reverse(u.b, min(nb, 8));
541 do_byte_reverse(&u.b[8], 8);
545 if (op->type & FPCONV)
546 conv_sp_to_dp(&u.f, &u.d[0]);
547 else if (op->type & SIGNEXT)
552 if (regs->msr & MSR_FP)
553 put_fpr(rn, &u.d[0]);
555 current->thread.TS_FPR(rn) = u.l[0];
559 if (regs->msr & MSR_FP)
560 put_fpr(rn, &u.d[1]);
562 current->thread.TS_FPR(rn) = u.l[1];
567 NOKPROBE_SYMBOL(do_fp_load);
569 static int do_fp_store(struct instruction_op *op, unsigned long ea,
570 struct pt_regs *regs, bool cross_endian)
578 u8 b[2 * sizeof(double)];
581 nb = GETSIZE(op->type);
582 if (!address_ok(regs, ea, nb))
586 if (regs->msr & MSR_FP)
587 get_fpr(rn, &u.d[0]);
589 u.l[0] = current->thread.TS_FPR(rn);
591 if (op->type & FPCONV)
592 conv_dp_to_sp(&u.d[0], &u.f);
598 if (regs->msr & MSR_FP)
599 get_fpr(rn, &u.d[1]);
601 u.l[1] = current->thread.TS_FPR(rn);
604 if (unlikely(cross_endian)) {
605 do_byte_reverse(u.b, min(nb, 8));
607 do_byte_reverse(&u.b[8], 8);
609 return copy_mem_out(u.b, ea, nb, regs);
611 NOKPROBE_SYMBOL(do_fp_store);
614 #ifdef CONFIG_ALTIVEC
615 /* For Altivec/VMX, no need to worry about alignment */
616 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
617 int size, struct pt_regs *regs,
623 u8 b[sizeof(__vector128)];
626 if (!address_ok(regs, ea & ~0xfUL, 16))
628 /* align to multiple of size */
630 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
633 if (unlikely(cross_endian))
634 do_byte_reverse(&u.b[ea & 0xf], size);
636 if (regs->msr & MSR_VEC)
639 current->thread.vr_state.vr[rn] = u.v;
644 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
645 int size, struct pt_regs *regs,
650 u8 b[sizeof(__vector128)];
653 if (!address_ok(regs, ea & ~0xfUL, 16))
655 /* align to multiple of size */
659 if (regs->msr & MSR_VEC)
662 u.v = current->thread.vr_state.vr[rn];
664 if (unlikely(cross_endian))
665 do_byte_reverse(&u.b[ea & 0xf], size);
666 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
668 #endif /* CONFIG_ALTIVEC */
671 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
672 int reg, bool cross_endian)
676 if (!address_ok(regs, ea, 16))
678 /* if aligned, should be atomic */
679 if ((ea & 0xf) == 0) {
680 err = do_lq(ea, ®s->gpr[reg]);
682 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs);
684 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs);
686 if (!err && unlikely(cross_endian))
687 do_byte_reverse(®s->gpr[reg], 16);
691 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
692 int reg, bool cross_endian)
695 unsigned long vals[2];
697 if (!address_ok(regs, ea, 16))
699 vals[0] = regs->gpr[reg];
700 vals[1] = regs->gpr[reg + 1];
701 if (unlikely(cross_endian))
702 do_byte_reverse(vals, 16);
704 /* if aligned, should be atomic */
706 return do_stq(ea, vals[0], vals[1]);
708 err = write_mem(vals[IS_LE], ea, 8, regs);
710 err = write_mem(vals[IS_BE], ea + 8, 8, regs);
713 #endif /* __powerpc64 */
716 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
717 const void *mem, bool rev)
721 const unsigned int *wp;
722 const unsigned short *hp;
723 const unsigned char *bp;
725 size = GETSIZE(op->type);
726 reg->d[0] = reg->d[1] = 0;
728 switch (op->element_size) {
732 /* whole vector; lxv[x] or lxvl[l] */
735 memcpy(reg, mem, size);
736 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
739 do_byte_reverse(reg, size);
742 /* scalar loads, lxvd2x, lxvdsx */
743 read_size = (size >= 8) ? 8 : size;
744 i = IS_LE ? 8 : 8 - read_size;
745 memcpy(®->b[i], mem, read_size);
747 do_byte_reverse(®->b[i], 8);
749 if (op->type & SIGNEXT) {
750 /* size == 4 is the only case here */
751 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
752 } else if (op->vsx_flags & VSX_FPCONV) {
754 conv_sp_to_dp(®->fp[1 + IS_LE],
760 unsigned long v = *(unsigned long *)(mem + 8);
761 reg->d[IS_BE] = !rev ? v : byterev_8(v);
762 } else if (op->vsx_flags & VSX_SPLAT)
763 reg->d[IS_BE] = reg->d[IS_LE];
769 for (j = 0; j < size / 4; ++j) {
770 i = IS_LE ? 3 - j : j;
771 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
773 if (op->vsx_flags & VSX_SPLAT) {
774 u32 val = reg->w[IS_LE ? 3 : 0];
776 i = IS_LE ? 3 - j : j;
784 for (j = 0; j < size / 2; ++j) {
785 i = IS_LE ? 7 - j : j;
786 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
792 for (j = 0; j < size; ++j) {
793 i = IS_LE ? 15 - j : j;
799 EXPORT_SYMBOL_GPL(emulate_vsx_load);
800 NOKPROBE_SYMBOL(emulate_vsx_load);
802 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
805 int size, write_size;
812 size = GETSIZE(op->type);
814 switch (op->element_size) {
820 /* reverse 32 bytes */
821 buf.d[0] = byterev_8(reg->d[3]);
822 buf.d[1] = byterev_8(reg->d[2]);
823 buf.d[2] = byterev_8(reg->d[1]);
824 buf.d[3] = byterev_8(reg->d[0]);
827 memcpy(mem, reg, size);
830 /* stxv, stxvx, stxvl, stxvll */
833 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
836 /* reverse 16 bytes */
837 buf.d[0] = byterev_8(reg->d[1]);
838 buf.d[1] = byterev_8(reg->d[0]);
841 memcpy(mem, reg, size);
844 /* scalar stores, stxvd2x */
845 write_size = (size >= 8) ? 8 : size;
846 i = IS_LE ? 8 : 8 - write_size;
847 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
848 buf.d[0] = buf.d[1] = 0;
850 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
854 memcpy(mem, ®->b[i], write_size);
856 memcpy(mem + 8, ®->d[IS_BE], 8);
858 do_byte_reverse(mem, write_size);
860 do_byte_reverse(mem + 8, 8);
866 for (j = 0; j < size / 4; ++j) {
867 i = IS_LE ? 3 - j : j;
868 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
874 for (j = 0; j < size / 2; ++j) {
875 i = IS_LE ? 7 - j : j;
876 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
882 for (j = 0; j < size; ++j) {
883 i = IS_LE ? 15 - j : j;
889 EXPORT_SYMBOL_GPL(emulate_vsx_store);
890 NOKPROBE_SYMBOL(emulate_vsx_store);
892 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
893 unsigned long ea, struct pt_regs *regs,
897 int i, j, nr_vsx_regs;
899 union vsx_reg buf[2];
900 int size = GETSIZE(op->type);
902 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
905 nr_vsx_regs = size / sizeof(__vector128);
906 emulate_vsx_load(op, buf, mem, cross_endian);
909 /* FP regs + extensions */
910 if (regs->msr & MSR_FP) {
911 for (i = 0; i < nr_vsx_regs; i++) {
912 j = IS_LE ? nr_vsx_regs - i - 1 : i;
913 load_vsrn(reg + i, &buf[j].v);
916 for (i = 0; i < nr_vsx_regs; i++) {
917 j = IS_LE ? nr_vsx_regs - i - 1 : i;
918 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
919 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
923 if (regs->msr & MSR_VEC) {
924 for (i = 0; i < nr_vsx_regs; i++) {
925 j = IS_LE ? nr_vsx_regs - i - 1 : i;
926 load_vsrn(reg + i, &buf[j].v);
929 for (i = 0; i < nr_vsx_regs; i++) {
930 j = IS_LE ? nr_vsx_regs - i - 1 : i;
931 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
939 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
940 unsigned long ea, struct pt_regs *regs,
944 int i, j, nr_vsx_regs;
946 union vsx_reg buf[2];
947 int size = GETSIZE(op->type);
949 if (!address_ok(regs, ea, size))
952 nr_vsx_regs = size / sizeof(__vector128);
955 /* FP regs + extensions */
956 if (regs->msr & MSR_FP) {
957 for (i = 0; i < nr_vsx_regs; i++) {
958 j = IS_LE ? nr_vsx_regs - i - 1 : i;
959 store_vsrn(reg + i, &buf[j].v);
962 for (i = 0; i < nr_vsx_regs; i++) {
963 j = IS_LE ? nr_vsx_regs - i - 1 : i;
964 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
965 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
969 if (regs->msr & MSR_VEC) {
970 for (i = 0; i < nr_vsx_regs; i++) {
971 j = IS_LE ? nr_vsx_regs - i - 1 : i;
972 store_vsrn(reg + i, &buf[j].v);
975 for (i = 0; i < nr_vsx_regs; i++) {
976 j = IS_LE ? nr_vsx_regs - i - 1 : i;
977 buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
982 emulate_vsx_store(op, buf, mem, cross_endian);
983 return copy_mem_out(mem, ea, size, regs);
985 #endif /* CONFIG_VSX */
987 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
990 unsigned long i, size;
993 size = ppc64_caches.l1d.block_size;
994 if (!(regs->msr & MSR_64BIT))
997 size = L1_CACHE_BYTES;
1000 if (!address_ok(regs, ea, size))
1002 for (i = 0; i < size; i += sizeof(long)) {
1003 err = __put_user(0, (unsigned long __user *) (ea + i));
1011 NOKPROBE_SYMBOL(emulate_dcbz);
1013 #define __put_user_asmx(x, addr, err, op, cr) \
1014 __asm__ __volatile__( \
1015 "1: " op " %2,0,%3\n" \
1018 ".section .fixup,\"ax\"\n" \
1023 : "=r" (err), "=r" (cr) \
1024 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1026 #define __get_user_asmx(x, addr, err, op) \
1027 __asm__ __volatile__( \
1028 "1: "op" %1,0,%2\n" \
1030 ".section .fixup,\"ax\"\n" \
1035 : "=r" (err), "=r" (x) \
1036 : "r" (addr), "i" (-EFAULT), "0" (err))
1038 #define __cacheop_user_asmx(addr, err, op) \
1039 __asm__ __volatile__( \
1042 ".section .fixup,\"ax\"\n" \
1048 : "r" (addr), "i" (-EFAULT), "0" (err))
1050 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1051 struct instruction_op *op)
1056 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1057 #ifdef __powerpc64__
1058 if (!(regs->msr & MSR_64BIT))
1062 op->ccval |= 0x80000000;
1064 op->ccval |= 0x40000000;
1066 op->ccval |= 0x20000000;
1069 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1071 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1073 op->xerval |= XER_CA32;
1075 op->xerval &= ~XER_CA32;
1079 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1080 struct instruction_op *op, int rd,
1081 unsigned long val1, unsigned long val2,
1082 unsigned long carry_in)
1084 unsigned long val = val1 + val2;
1088 op->type = COMPUTE + SETREG + SETXER;
1091 #ifdef __powerpc64__
1092 if (!(regs->msr & MSR_64BIT)) {
1093 val = (unsigned int) val;
1094 val1 = (unsigned int) val1;
1097 op->xerval = regs->xer;
1098 if (val < val1 || (carry_in && val == val1))
1099 op->xerval |= XER_CA;
1101 op->xerval &= ~XER_CA;
1103 set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1104 (carry_in && (unsigned int)val == (unsigned int)val1));
1107 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1108 struct instruction_op *op,
1109 long v1, long v2, int crfld)
1111 unsigned int crval, shift;
1113 op->type = COMPUTE + SETCC;
1114 crval = (regs->xer >> 31) & 1; /* get SO bit */
1121 shift = (7 - crfld) * 4;
1122 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1125 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1126 struct instruction_op *op,
1128 unsigned long v2, int crfld)
1130 unsigned int crval, shift;
1132 op->type = COMPUTE + SETCC;
1133 crval = (regs->xer >> 31) & 1; /* get SO bit */
1140 shift = (7 - crfld) * 4;
1141 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1144 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1145 struct instruction_op *op,
1146 unsigned long v1, unsigned long v2)
1148 unsigned long long out_val, mask;
1152 for (i = 0; i < 8; i++) {
1153 mask = 0xffUL << (i * 8);
1154 if ((v1 & mask) == (v2 & mask))
1161 * The size parameter is used to adjust the equivalent popcnt instruction.
1162 * popcntb = 8, popcntw = 32, popcntd = 64
1164 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1165 struct instruction_op *op,
1166 unsigned long v1, int size)
1168 unsigned long long out = v1;
1170 out -= (out >> 1) & 0x5555555555555555ULL;
1171 out = (0x3333333333333333ULL & out) +
1172 (0x3333333333333333ULL & (out >> 2));
1173 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1175 if (size == 8) { /* popcntb */
1181 if (size == 32) { /* popcntw */
1182 op->val = out & 0x0000003f0000003fULL;
1186 out = (out + (out >> 32)) & 0x7f;
1187 op->val = out; /* popcntd */
1191 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1192 struct instruction_op *op,
1193 unsigned long v1, unsigned long v2)
1195 unsigned char perm, idx;
1199 for (i = 0; i < 8; i++) {
1200 idx = (v1 >> (i * 8)) & 0xff;
1202 if (v2 & PPC_BIT(idx))
1207 #endif /* CONFIG_PPC64 */
1209 * The size parameter adjusts the equivalent prty instruction.
1210 * prtyw = 32, prtyd = 64
1212 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1213 struct instruction_op *op,
1214 unsigned long v, int size)
1216 unsigned long long res = v ^ (v >> 8);
1219 if (size == 32) { /* prtyw */
1220 op->val = res & 0x0000000100000001ULL;
1225 op->val = res & 1; /*prtyd */
1228 static nokprobe_inline int trap_compare(long v1, long v2)
1238 if ((unsigned long)v1 < (unsigned long)v2)
1240 else if ((unsigned long)v1 > (unsigned long)v2)
1246 * Elements of 32-bit rotate and mask instructions.
1248 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1249 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1250 #ifdef __powerpc64__
1251 #define MASK64_L(mb) (~0UL >> (mb))
1252 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1253 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1254 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1256 #define DATA32(x) (x)
1258 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1261 * Decode an instruction, and return information about it in *op
1262 * without changing *regs.
1263 * Integer arithmetic and logical instructions, branches, and barrier
1264 * instructions can be emulated just using the information in *op.
1266 * Return value is 1 if the instruction can be emulated just by
1267 * updating *regs with the information in *op, -1 if we need the
1268 * GPRs but *regs doesn't contain the full register set, or 0
1271 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1272 struct ppc_inst instr)
1275 unsigned int suffixopcode, prefixtype, prefix_r;
1277 unsigned int opcode, ra, rb, rc, rd, spr, u;
1278 unsigned long int imm;
1279 unsigned long int val, val2;
1280 unsigned int mb, me, sh;
1281 unsigned int word, suffix;
1284 word = ppc_inst_val(instr);
1285 suffix = ppc_inst_suffix(instr);
1289 opcode = ppc_inst_primary_opcode(instr);
1293 imm = (signed short)(word & 0xfffc);
1294 if ((word & 2) == 0)
1296 op->val = truncate_if_32bit(regs->msr, imm);
1299 if (branch_taken(word, regs, op))
1300 op->type |= BRTAKEN;
1304 if ((word & 0xfe2) == 2)
1306 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1307 (word & 0xfe3) == 1) { /* scv */
1308 op->type = SYSCALL_VECTORED_0;
1309 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1310 goto unknown_opcode;
1316 op->type = BRANCH | BRTAKEN;
1317 imm = word & 0x03fffffc;
1318 if (imm & 0x02000000)
1320 if ((word & 2) == 0)
1322 op->val = truncate_if_32bit(regs->msr, imm);
1327 switch ((word >> 1) & 0x3ff) {
1329 op->type = COMPUTE + SETCC;
1330 rd = 7 - ((word >> 23) & 0x7);
1331 ra = 7 - ((word >> 18) & 0x7);
1334 val = (regs->ccr >> ra) & 0xf;
1335 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1339 case 528: /* bcctr */
1341 imm = (word & 0x400)? regs->ctr: regs->link;
1342 op->val = truncate_if_32bit(regs->msr, imm);
1345 if (branch_taken(word, regs, op))
1346 op->type |= BRTAKEN;
1349 case 18: /* rfid, scary */
1350 if (regs->msr & MSR_PR)
1355 case 150: /* isync */
1356 op->type = BARRIER | BARRIER_ISYNC;
1359 case 33: /* crnor */
1360 case 129: /* crandc */
1361 case 193: /* crxor */
1362 case 225: /* crnand */
1363 case 257: /* crand */
1364 case 289: /* creqv */
1365 case 417: /* crorc */
1366 case 449: /* cror */
1367 op->type = COMPUTE + SETCC;
1368 ra = (word >> 16) & 0x1f;
1369 rb = (word >> 11) & 0x1f;
1370 rd = (word >> 21) & 0x1f;
1371 ra = (regs->ccr >> (31 - ra)) & 1;
1372 rb = (regs->ccr >> (31 - rb)) & 1;
1373 val = (word >> (6 + ra * 2 + rb)) & 1;
1374 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1380 switch ((word >> 1) & 0x3ff) {
1381 case 598: /* sync */
1382 op->type = BARRIER + BARRIER_SYNC;
1383 #ifdef __powerpc64__
1384 switch ((word >> 21) & 3) {
1385 case 1: /* lwsync */
1386 op->type = BARRIER + BARRIER_LWSYNC;
1388 case 2: /* ptesync */
1389 op->type = BARRIER + BARRIER_PTESYNC;
1395 case 854: /* eieio */
1396 op->type = BARRIER + BARRIER_EIEIO;
1402 /* Following cases refer to regs->gpr[], so we need all regs */
1403 if (!FULL_REGS(regs))
1406 rd = (word >> 21) & 0x1f;
1407 ra = (word >> 16) & 0x1f;
1408 rb = (word >> 11) & 0x1f;
1409 rc = (word >> 6) & 0x1f;
1412 #ifdef __powerpc64__
1414 if (!cpu_has_feature(CPU_FTR_ARCH_31))
1415 goto unknown_opcode;
1417 prefix_r = GET_PREFIX_R(word);
1418 ra = GET_PREFIX_RA(suffix);
1419 rd = (suffix >> 21) & 0x1f;
1421 op->val = regs->gpr[rd];
1422 suffixopcode = get_op(suffix);
1423 prefixtype = (word >> 24) & 0x3;
1424 switch (prefixtype) {
1428 switch (suffixopcode) {
1429 case 14: /* paddi */
1430 op->type = COMPUTE | PREFIXED;
1431 op->val = mlsd_8lsd_ea(word, suffix, regs);
1437 if (rd & trap_compare(regs->gpr[ra], (short) word))
1442 if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1446 #ifdef __powerpc64__
1449 * There are very many instructions with this primary opcode
1450 * introduced in the ISA as early as v2.03. However, the ones
1451 * we currently emulate were all introduced with ISA 3.0
1453 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1454 goto unknown_opcode;
1456 switch (word & 0x3f) {
1457 case 48: /* maddhd */
1458 asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1459 "=r" (op->val) : "r" (regs->gpr[ra]),
1460 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1463 case 49: /* maddhdu */
1464 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1465 "=r" (op->val) : "r" (regs->gpr[ra]),
1466 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1469 case 51: /* maddld */
1470 asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1471 "=r" (op->val) : "r" (regs->gpr[ra]),
1472 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1477 * There are other instructions from ISA 3.0 with the same
1478 * primary opcode which do not have emulation support yet.
1480 goto unknown_opcode;
1484 op->val = regs->gpr[ra] * (short) word;
1487 case 8: /* subfic */
1489 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1492 case 10: /* cmpli */
1493 imm = (unsigned short) word;
1494 val = regs->gpr[ra];
1495 #ifdef __powerpc64__
1497 val = (unsigned int) val;
1499 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1504 val = regs->gpr[ra];
1505 #ifdef __powerpc64__
1509 do_cmp_signed(regs, op, val, imm, rd >> 2);
1512 case 12: /* addic */
1514 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1517 case 13: /* addic. */
1519 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1526 imm += regs->gpr[ra];
1530 case 15: /* addis */
1531 imm = ((short) word) << 16;
1533 imm += regs->gpr[ra];
1538 if (((word >> 1) & 0x1f) == 2) {
1540 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1541 goto unknown_opcode;
1542 imm = (short) (word & 0xffc1); /* d0 + d2 fields */
1543 imm |= (word >> 15) & 0x3e; /* d1 field */
1544 op->val = regs->nip + (imm << 16) + 4;
1550 case 20: /* rlwimi */
1551 mb = (word >> 6) & 0x1f;
1552 me = (word >> 1) & 0x1f;
1553 val = DATA32(regs->gpr[rd]);
1554 imm = MASK32(mb, me);
1555 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1558 case 21: /* rlwinm */
1559 mb = (word >> 6) & 0x1f;
1560 me = (word >> 1) & 0x1f;
1561 val = DATA32(regs->gpr[rd]);
1562 op->val = ROTATE(val, rb) & MASK32(mb, me);
1565 case 23: /* rlwnm */
1566 mb = (word >> 6) & 0x1f;
1567 me = (word >> 1) & 0x1f;
1568 rb = regs->gpr[rb] & 0x1f;
1569 val = DATA32(regs->gpr[rd]);
1570 op->val = ROTATE(val, rb) & MASK32(mb, me);
1574 op->val = regs->gpr[rd] | (unsigned short) word;
1575 goto logical_done_nocc;
1578 imm = (unsigned short) word;
1579 op->val = regs->gpr[rd] | (imm << 16);
1580 goto logical_done_nocc;
1583 op->val = regs->gpr[rd] ^ (unsigned short) word;
1584 goto logical_done_nocc;
1586 case 27: /* xoris */
1587 imm = (unsigned short) word;
1588 op->val = regs->gpr[rd] ^ (imm << 16);
1589 goto logical_done_nocc;
1591 case 28: /* andi. */
1592 op->val = regs->gpr[rd] & (unsigned short) word;
1594 goto logical_done_nocc;
1596 case 29: /* andis. */
1597 imm = (unsigned short) word;
1598 op->val = regs->gpr[rd] & (imm << 16);
1600 goto logical_done_nocc;
1602 #ifdef __powerpc64__
1604 mb = ((word >> 6) & 0x1f) | (word & 0x20);
1605 val = regs->gpr[rd];
1606 if ((word & 0x10) == 0) {
1607 sh = rb | ((word & 2) << 4);
1608 val = ROTATE(val, sh);
1609 switch ((word >> 2) & 3) {
1610 case 0: /* rldicl */
1611 val &= MASK64_L(mb);
1613 case 1: /* rldicr */
1614 val &= MASK64_R(mb);
1617 val &= MASK64(mb, 63 - sh);
1619 case 3: /* rldimi */
1620 imm = MASK64(mb, 63 - sh);
1621 val = (regs->gpr[ra] & ~imm) |
1627 sh = regs->gpr[rb] & 0x3f;
1628 val = ROTATE(val, sh);
1629 switch ((word >> 1) & 7) {
1631 op->val = val & MASK64_L(mb);
1634 op->val = val & MASK64_R(mb);
1639 op->type = UNKNOWN; /* illegal instruction */
1643 /* isel occupies 32 minor opcodes */
1644 if (((word >> 1) & 0x1f) == 15) {
1645 mb = (word >> 6) & 0x1f; /* bc field */
1646 val = (regs->ccr >> (31 - mb)) & 1;
1647 val2 = (ra) ? regs->gpr[ra] : 0;
1649 op->val = (val) ? val2 : regs->gpr[rb];
1653 switch ((word >> 1) & 0x3ff) {
1656 (rd & trap_compare((int)regs->gpr[ra],
1657 (int)regs->gpr[rb])))
1660 #ifdef __powerpc64__
1662 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1666 case 83: /* mfmsr */
1667 if (regs->msr & MSR_PR)
1672 case 146: /* mtmsr */
1673 if (regs->msr & MSR_PR)
1677 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1680 case 178: /* mtmsrd */
1681 if (regs->msr & MSR_PR)
1685 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1686 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1687 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1694 if ((word >> 20) & 1) {
1696 for (sh = 0; sh < 8; ++sh) {
1697 if (word & (0x80000 >> sh))
1702 op->val = regs->ccr & imm;
1705 case 144: /* mtcrf */
1706 op->type = COMPUTE + SETCC;
1708 val = regs->gpr[rd];
1709 op->ccval = regs->ccr;
1710 for (sh = 0; sh < 8; ++sh) {
1711 if (word & (0x80000 >> sh))
1712 op->ccval = (op->ccval & ~imm) |
1718 case 339: /* mfspr */
1719 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1723 if (spr == SPRN_XER || spr == SPRN_LR ||
1728 case 467: /* mtspr */
1729 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1731 op->val = regs->gpr[rd];
1733 if (spr == SPRN_XER || spr == SPRN_LR ||
1739 * Compare instructions
1742 val = regs->gpr[ra];
1743 val2 = regs->gpr[rb];
1744 #ifdef __powerpc64__
1745 if ((rd & 1) == 0) {
1746 /* word (32-bit) compare */
1751 do_cmp_signed(regs, op, val, val2, rd >> 2);
1755 val = regs->gpr[ra];
1756 val2 = regs->gpr[rb];
1757 #ifdef __powerpc64__
1758 if ((rd & 1) == 0) {
1759 /* word (32-bit) compare */
1760 val = (unsigned int) val;
1761 val2 = (unsigned int) val2;
1764 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1767 case 508: /* cmpb */
1768 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1769 goto logical_done_nocc;
1772 * Arithmetic instructions
1775 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1778 #ifdef __powerpc64__
1779 case 9: /* mulhdu */
1780 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1781 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1785 add_with_carry(regs, op, rd, regs->gpr[ra],
1789 case 11: /* mulhwu */
1790 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1791 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1795 op->val = regs->gpr[rb] - regs->gpr[ra];
1797 #ifdef __powerpc64__
1798 case 73: /* mulhd */
1799 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1800 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1803 case 75: /* mulhw */
1804 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1805 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1809 op->val = -regs->gpr[ra];
1812 case 136: /* subfe */
1813 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1814 regs->gpr[rb], regs->xer & XER_CA);
1817 case 138: /* adde */
1818 add_with_carry(regs, op, rd, regs->gpr[ra],
1819 regs->gpr[rb], regs->xer & XER_CA);
1822 case 200: /* subfze */
1823 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1824 regs->xer & XER_CA);
1827 case 202: /* addze */
1828 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1829 regs->xer & XER_CA);
1832 case 232: /* subfme */
1833 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1834 regs->xer & XER_CA);
1836 #ifdef __powerpc64__
1837 case 233: /* mulld */
1838 op->val = regs->gpr[ra] * regs->gpr[rb];
1841 case 234: /* addme */
1842 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1843 regs->xer & XER_CA);
1846 case 235: /* mullw */
1847 op->val = (long)(int) regs->gpr[ra] *
1848 (int) regs->gpr[rb];
1851 #ifdef __powerpc64__
1852 case 265: /* modud */
1853 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1854 goto unknown_opcode;
1855 op->val = regs->gpr[ra] % regs->gpr[rb];
1859 op->val = regs->gpr[ra] + regs->gpr[rb];
1862 case 267: /* moduw */
1863 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1864 goto unknown_opcode;
1865 op->val = (unsigned int) regs->gpr[ra] %
1866 (unsigned int) regs->gpr[rb];
1868 #ifdef __powerpc64__
1869 case 457: /* divdu */
1870 op->val = regs->gpr[ra] / regs->gpr[rb];
1873 case 459: /* divwu */
1874 op->val = (unsigned int) regs->gpr[ra] /
1875 (unsigned int) regs->gpr[rb];
1877 #ifdef __powerpc64__
1878 case 489: /* divd */
1879 op->val = (long int) regs->gpr[ra] /
1880 (long int) regs->gpr[rb];
1883 case 491: /* divw */
1884 op->val = (int) regs->gpr[ra] /
1885 (int) regs->gpr[rb];
1887 #ifdef __powerpc64__
1888 case 425: /* divde[.] */
1889 asm volatile(PPC_DIVDE(%0, %1, %2) :
1890 "=r" (op->val) : "r" (regs->gpr[ra]),
1891 "r" (regs->gpr[rb]));
1893 case 393: /* divdeu[.] */
1894 asm volatile(PPC_DIVDEU(%0, %1, %2) :
1895 "=r" (op->val) : "r" (regs->gpr[ra]),
1896 "r" (regs->gpr[rb]));
1899 case 755: /* darn */
1900 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1901 goto unknown_opcode;
1904 /* 32-bit conditioned */
1905 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1909 /* 64-bit conditioned */
1910 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1915 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1920 #ifdef __powerpc64__
1921 case 777: /* modsd */
1922 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1923 goto unknown_opcode;
1924 op->val = (long int) regs->gpr[ra] %
1925 (long int) regs->gpr[rb];
1928 case 779: /* modsw */
1929 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1930 goto unknown_opcode;
1931 op->val = (int) regs->gpr[ra] %
1932 (int) regs->gpr[rb];
1937 * Logical instructions
1939 case 26: /* cntlzw */
1940 val = (unsigned int) regs->gpr[rd];
1941 op->val = ( val ? __builtin_clz(val) : 32 );
1943 #ifdef __powerpc64__
1944 case 58: /* cntlzd */
1945 val = regs->gpr[rd];
1946 op->val = ( val ? __builtin_clzl(val) : 64 );
1950 op->val = regs->gpr[rd] & regs->gpr[rb];
1954 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1957 case 122: /* popcntb */
1958 do_popcnt(regs, op, regs->gpr[rd], 8);
1959 goto logical_done_nocc;
1962 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1965 case 154: /* prtyw */
1966 do_prty(regs, op, regs->gpr[rd], 32);
1967 goto logical_done_nocc;
1969 case 186: /* prtyd */
1970 do_prty(regs, op, regs->gpr[rd], 64);
1971 goto logical_done_nocc;
1973 case 252: /* bpermd */
1974 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1975 goto logical_done_nocc;
1978 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1982 op->val = regs->gpr[rd] ^ regs->gpr[rb];
1985 case 378: /* popcntw */
1986 do_popcnt(regs, op, regs->gpr[rd], 32);
1987 goto logical_done_nocc;
1990 op->val = regs->gpr[rd] | ~regs->gpr[rb];
1994 op->val = regs->gpr[rd] | regs->gpr[rb];
1997 case 476: /* nand */
1998 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2001 case 506: /* popcntd */
2002 do_popcnt(regs, op, regs->gpr[rd], 64);
2003 goto logical_done_nocc;
2005 case 538: /* cnttzw */
2006 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2007 goto unknown_opcode;
2008 val = (unsigned int) regs->gpr[rd];
2009 op->val = (val ? __builtin_ctz(val) : 32);
2011 #ifdef __powerpc64__
2012 case 570: /* cnttzd */
2013 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2014 goto unknown_opcode;
2015 val = regs->gpr[rd];
2016 op->val = (val ? __builtin_ctzl(val) : 64);
2019 case 922: /* extsh */
2020 op->val = (signed short) regs->gpr[rd];
2023 case 954: /* extsb */
2024 op->val = (signed char) regs->gpr[rd];
2026 #ifdef __powerpc64__
2027 case 986: /* extsw */
2028 op->val = (signed int) regs->gpr[rd];
2033 * Shift instructions
2036 sh = regs->gpr[rb] & 0x3f;
2038 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2044 sh = regs->gpr[rb] & 0x3f;
2046 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2051 case 792: /* sraw */
2052 op->type = COMPUTE + SETREG + SETXER;
2053 sh = regs->gpr[rb] & 0x3f;
2054 ival = (signed int) regs->gpr[rd];
2055 op->val = ival >> (sh < 32 ? sh : 31);
2056 op->xerval = regs->xer;
2057 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2058 op->xerval |= XER_CA;
2060 op->xerval &= ~XER_CA;
2061 set_ca32(op, op->xerval & XER_CA);
2064 case 824: /* srawi */
2065 op->type = COMPUTE + SETREG + SETXER;
2067 ival = (signed int) regs->gpr[rd];
2068 op->val = ival >> sh;
2069 op->xerval = regs->xer;
2070 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2071 op->xerval |= XER_CA;
2073 op->xerval &= ~XER_CA;
2074 set_ca32(op, op->xerval & XER_CA);
2077 #ifdef __powerpc64__
2079 sh = regs->gpr[rb] & 0x7f;
2081 op->val = regs->gpr[rd] << sh;
2087 sh = regs->gpr[rb] & 0x7f;
2089 op->val = regs->gpr[rd] >> sh;
2094 case 794: /* srad */
2095 op->type = COMPUTE + SETREG + SETXER;
2096 sh = regs->gpr[rb] & 0x7f;
2097 ival = (signed long int) regs->gpr[rd];
2098 op->val = ival >> (sh < 64 ? sh : 63);
2099 op->xerval = regs->xer;
2100 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2101 op->xerval |= XER_CA;
2103 op->xerval &= ~XER_CA;
2104 set_ca32(op, op->xerval & XER_CA);
2107 case 826: /* sradi with sh_5 = 0 */
2108 case 827: /* sradi with sh_5 = 1 */
2109 op->type = COMPUTE + SETREG + SETXER;
2110 sh = rb | ((word & 2) << 4);
2111 ival = (signed long int) regs->gpr[rd];
2112 op->val = ival >> sh;
2113 op->xerval = regs->xer;
2114 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2115 op->xerval |= XER_CA;
2117 op->xerval &= ~XER_CA;
2118 set_ca32(op, op->xerval & XER_CA);
2121 case 890: /* extswsli with sh_5 = 0 */
2122 case 891: /* extswsli with sh_5 = 1 */
2123 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2124 goto unknown_opcode;
2125 op->type = COMPUTE + SETREG;
2126 sh = rb | ((word & 2) << 4);
2127 val = (signed int) regs->gpr[rd];
2129 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2134 #endif /* __powerpc64__ */
2137 * Cache instructions
2139 case 54: /* dcbst */
2140 op->type = MKOP(CACHEOP, DCBST, 0);
2141 op->ea = xform_ea(word, regs);
2145 op->type = MKOP(CACHEOP, DCBF, 0);
2146 op->ea = xform_ea(word, regs);
2149 case 246: /* dcbtst */
2150 op->type = MKOP(CACHEOP, DCBTST, 0);
2151 op->ea = xform_ea(word, regs);
2155 case 278: /* dcbt */
2156 op->type = MKOP(CACHEOP, DCBTST, 0);
2157 op->ea = xform_ea(word, regs);
2161 case 982: /* icbi */
2162 op->type = MKOP(CACHEOP, ICBI, 0);
2163 op->ea = xform_ea(word, regs);
2166 case 1014: /* dcbz */
2167 op->type = MKOP(CACHEOP, DCBZ, 0);
2168 op->ea = xform_ea(word, regs);
2178 op->update_reg = ra;
2180 op->val = regs->gpr[rd];
2181 u = (word >> 20) & UPDATE;
2187 op->ea = xform_ea(word, regs);
2188 switch ((word >> 1) & 0x3ff) {
2189 case 20: /* lwarx */
2190 op->type = MKOP(LARX, 0, 4);
2193 case 150: /* stwcx. */
2194 op->type = MKOP(STCX, 0, 4);
2197 #ifdef __powerpc64__
2198 case 84: /* ldarx */
2199 op->type = MKOP(LARX, 0, 8);
2202 case 214: /* stdcx. */
2203 op->type = MKOP(STCX, 0, 8);
2206 case 52: /* lbarx */
2207 op->type = MKOP(LARX, 0, 1);
2210 case 694: /* stbcx. */
2211 op->type = MKOP(STCX, 0, 1);
2214 case 116: /* lharx */
2215 op->type = MKOP(LARX, 0, 2);
2218 case 726: /* sthcx. */
2219 op->type = MKOP(STCX, 0, 2);
2222 case 276: /* lqarx */
2223 if (!((rd & 1) || rd == ra || rd == rb))
2224 op->type = MKOP(LARX, 0, 16);
2227 case 182: /* stqcx. */
2229 op->type = MKOP(STCX, 0, 16);
2234 case 55: /* lwzux */
2235 op->type = MKOP(LOAD, u, 4);
2239 case 119: /* lbzux */
2240 op->type = MKOP(LOAD, u, 1);
2243 #ifdef CONFIG_ALTIVEC
2245 * Note: for the load/store vector element instructions,
2246 * bits of the EA say which field of the VMX register to use.
2249 op->type = MKOP(LOAD_VMX, 0, 1);
2250 op->element_size = 1;
2253 case 39: /* lvehx */
2254 op->type = MKOP(LOAD_VMX, 0, 2);
2255 op->element_size = 2;
2258 case 71: /* lvewx */
2259 op->type = MKOP(LOAD_VMX, 0, 4);
2260 op->element_size = 4;
2264 case 359: /* lvxl */
2265 op->type = MKOP(LOAD_VMX, 0, 16);
2266 op->element_size = 16;
2269 case 135: /* stvebx */
2270 op->type = MKOP(STORE_VMX, 0, 1);
2271 op->element_size = 1;
2274 case 167: /* stvehx */
2275 op->type = MKOP(STORE_VMX, 0, 2);
2276 op->element_size = 2;
2279 case 199: /* stvewx */
2280 op->type = MKOP(STORE_VMX, 0, 4);
2281 op->element_size = 4;
2284 case 231: /* stvx */
2285 case 487: /* stvxl */
2286 op->type = MKOP(STORE_VMX, 0, 16);
2288 #endif /* CONFIG_ALTIVEC */
2290 #ifdef __powerpc64__
2293 op->type = MKOP(LOAD, u, 8);
2296 case 149: /* stdx */
2297 case 181: /* stdux */
2298 op->type = MKOP(STORE, u, 8);
2302 case 151: /* stwx */
2303 case 183: /* stwux */
2304 op->type = MKOP(STORE, u, 4);
2307 case 215: /* stbx */
2308 case 247: /* stbux */
2309 op->type = MKOP(STORE, u, 1);
2312 case 279: /* lhzx */
2313 case 311: /* lhzux */
2314 op->type = MKOP(LOAD, u, 2);
2317 #ifdef __powerpc64__
2318 case 341: /* lwax */
2319 case 373: /* lwaux */
2320 op->type = MKOP(LOAD, SIGNEXT | u, 4);
2324 case 343: /* lhax */
2325 case 375: /* lhaux */
2326 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2329 case 407: /* sthx */
2330 case 439: /* sthux */
2331 op->type = MKOP(STORE, u, 2);
2334 #ifdef __powerpc64__
2335 case 532: /* ldbrx */
2336 op->type = MKOP(LOAD, BYTEREV, 8);
2340 case 533: /* lswx */
2341 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2344 case 534: /* lwbrx */
2345 op->type = MKOP(LOAD, BYTEREV, 4);
2348 case 597: /* lswi */
2350 rb = 32; /* # bytes to load */
2351 op->type = MKOP(LOAD_MULTI, 0, rb);
2352 op->ea = ra ? regs->gpr[ra] : 0;
2355 #ifdef CONFIG_PPC_FPU
2356 case 535: /* lfsx */
2357 case 567: /* lfsux */
2358 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2361 case 599: /* lfdx */
2362 case 631: /* lfdux */
2363 op->type = MKOP(LOAD_FP, u, 8);
2366 case 663: /* stfsx */
2367 case 695: /* stfsux */
2368 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2371 case 727: /* stfdx */
2372 case 759: /* stfdux */
2373 op->type = MKOP(STORE_FP, u, 8);
2376 #ifdef __powerpc64__
2377 case 791: /* lfdpx */
2378 op->type = MKOP(LOAD_FP, 0, 16);
2381 case 855: /* lfiwax */
2382 op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2385 case 887: /* lfiwzx */
2386 op->type = MKOP(LOAD_FP, 0, 4);
2389 case 919: /* stfdpx */
2390 op->type = MKOP(STORE_FP, 0, 16);
2393 case 983: /* stfiwx */
2394 op->type = MKOP(STORE_FP, 0, 4);
2396 #endif /* __powerpc64 */
2397 #endif /* CONFIG_PPC_FPU */
2399 #ifdef __powerpc64__
2400 case 660: /* stdbrx */
2401 op->type = MKOP(STORE, BYTEREV, 8);
2402 op->val = byterev_8(regs->gpr[rd]);
2406 case 661: /* stswx */
2407 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2410 case 662: /* stwbrx */
2411 op->type = MKOP(STORE, BYTEREV, 4);
2412 op->val = byterev_4(regs->gpr[rd]);
2415 case 725: /* stswi */
2417 rb = 32; /* # bytes to store */
2418 op->type = MKOP(STORE_MULTI, 0, rb);
2419 op->ea = ra ? regs->gpr[ra] : 0;
2422 case 790: /* lhbrx */
2423 op->type = MKOP(LOAD, BYTEREV, 2);
2426 case 918: /* sthbrx */
2427 op->type = MKOP(STORE, BYTEREV, 2);
2428 op->val = byterev_2(regs->gpr[rd]);
2432 case 12: /* lxsiwzx */
2433 op->reg = rd | ((word & 1) << 5);
2434 op->type = MKOP(LOAD_VSX, 0, 4);
2435 op->element_size = 8;
2438 case 76: /* lxsiwax */
2439 op->reg = rd | ((word & 1) << 5);
2440 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2441 op->element_size = 8;
2444 case 140: /* stxsiwx */
2445 op->reg = rd | ((word & 1) << 5);
2446 op->type = MKOP(STORE_VSX, 0, 4);
2447 op->element_size = 8;
2450 case 268: /* lxvx */
2451 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2452 goto unknown_opcode;
2453 op->reg = rd | ((word & 1) << 5);
2454 op->type = MKOP(LOAD_VSX, 0, 16);
2455 op->element_size = 16;
2456 op->vsx_flags = VSX_CHECK_VEC;
2459 case 269: /* lxvl */
2460 case 301: { /* lxvll */
2462 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2463 goto unknown_opcode;
2464 op->reg = rd | ((word & 1) << 5);
2465 op->ea = ra ? regs->gpr[ra] : 0;
2466 nb = regs->gpr[rb] & 0xff;
2469 op->type = MKOP(LOAD_VSX, 0, nb);
2470 op->element_size = 16;
2471 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2475 case 332: /* lxvdsx */
2476 op->reg = rd | ((word & 1) << 5);
2477 op->type = MKOP(LOAD_VSX, 0, 8);
2478 op->element_size = 8;
2479 op->vsx_flags = VSX_SPLAT;
2482 case 333: /* lxvpx */
2483 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2484 goto unknown_opcode;
2485 op->reg = VSX_REGISTER_XTP(rd);
2486 op->type = MKOP(LOAD_VSX, 0, 32);
2487 op->element_size = 32;
2490 case 364: /* lxvwsx */
2491 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2492 goto unknown_opcode;
2493 op->reg = rd | ((word & 1) << 5);
2494 op->type = MKOP(LOAD_VSX, 0, 4);
2495 op->element_size = 4;
2496 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2499 case 396: /* stxvx */
2500 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2501 goto unknown_opcode;
2502 op->reg = rd | ((word & 1) << 5);
2503 op->type = MKOP(STORE_VSX, 0, 16);
2504 op->element_size = 16;
2505 op->vsx_flags = VSX_CHECK_VEC;
2508 case 397: /* stxvl */
2509 case 429: { /* stxvll */
2511 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2512 goto unknown_opcode;
2513 op->reg = rd | ((word & 1) << 5);
2514 op->ea = ra ? regs->gpr[ra] : 0;
2515 nb = regs->gpr[rb] & 0xff;
2518 op->type = MKOP(STORE_VSX, 0, nb);
2519 op->element_size = 16;
2520 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2524 case 461: /* stxvpx */
2525 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2526 goto unknown_opcode;
2527 op->reg = VSX_REGISTER_XTP(rd);
2528 op->type = MKOP(STORE_VSX, 0, 32);
2529 op->element_size = 32;
2531 case 524: /* lxsspx */
2532 op->reg = rd | ((word & 1) << 5);
2533 op->type = MKOP(LOAD_VSX, 0, 4);
2534 op->element_size = 8;
2535 op->vsx_flags = VSX_FPCONV;
2538 case 588: /* lxsdx */
2539 op->reg = rd | ((word & 1) << 5);
2540 op->type = MKOP(LOAD_VSX, 0, 8);
2541 op->element_size = 8;
2544 case 652: /* stxsspx */
2545 op->reg = rd | ((word & 1) << 5);
2546 op->type = MKOP(STORE_VSX, 0, 4);
2547 op->element_size = 8;
2548 op->vsx_flags = VSX_FPCONV;
2551 case 716: /* stxsdx */
2552 op->reg = rd | ((word & 1) << 5);
2553 op->type = MKOP(STORE_VSX, 0, 8);
2554 op->element_size = 8;
2557 case 780: /* lxvw4x */
2558 op->reg = rd | ((word & 1) << 5);
2559 op->type = MKOP(LOAD_VSX, 0, 16);
2560 op->element_size = 4;
2563 case 781: /* lxsibzx */
2564 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2565 goto unknown_opcode;
2566 op->reg = rd | ((word & 1) << 5);
2567 op->type = MKOP(LOAD_VSX, 0, 1);
2568 op->element_size = 8;
2569 op->vsx_flags = VSX_CHECK_VEC;
2572 case 812: /* lxvh8x */
2573 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2574 goto unknown_opcode;
2575 op->reg = rd | ((word & 1) << 5);
2576 op->type = MKOP(LOAD_VSX, 0, 16);
2577 op->element_size = 2;
2578 op->vsx_flags = VSX_CHECK_VEC;
2581 case 813: /* lxsihzx */
2582 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2583 goto unknown_opcode;
2584 op->reg = rd | ((word & 1) << 5);
2585 op->type = MKOP(LOAD_VSX, 0, 2);
2586 op->element_size = 8;
2587 op->vsx_flags = VSX_CHECK_VEC;
2590 case 844: /* lxvd2x */
2591 op->reg = rd | ((word & 1) << 5);
2592 op->type = MKOP(LOAD_VSX, 0, 16);
2593 op->element_size = 8;
2596 case 876: /* lxvb16x */
2597 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2598 goto unknown_opcode;
2599 op->reg = rd | ((word & 1) << 5);
2600 op->type = MKOP(LOAD_VSX, 0, 16);
2601 op->element_size = 1;
2602 op->vsx_flags = VSX_CHECK_VEC;
2605 case 908: /* stxvw4x */
2606 op->reg = rd | ((word & 1) << 5);
2607 op->type = MKOP(STORE_VSX, 0, 16);
2608 op->element_size = 4;
2611 case 909: /* stxsibx */
2612 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2613 goto unknown_opcode;
2614 op->reg = rd | ((word & 1) << 5);
2615 op->type = MKOP(STORE_VSX, 0, 1);
2616 op->element_size = 8;
2617 op->vsx_flags = VSX_CHECK_VEC;
2620 case 940: /* stxvh8x */
2621 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2622 goto unknown_opcode;
2623 op->reg = rd | ((word & 1) << 5);
2624 op->type = MKOP(STORE_VSX, 0, 16);
2625 op->element_size = 2;
2626 op->vsx_flags = VSX_CHECK_VEC;
2629 case 941: /* stxsihx */
2630 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2631 goto unknown_opcode;
2632 op->reg = rd | ((word & 1) << 5);
2633 op->type = MKOP(STORE_VSX, 0, 2);
2634 op->element_size = 8;
2635 op->vsx_flags = VSX_CHECK_VEC;
2638 case 972: /* stxvd2x */
2639 op->reg = rd | ((word & 1) << 5);
2640 op->type = MKOP(STORE_VSX, 0, 16);
2641 op->element_size = 8;
2644 case 1004: /* stxvb16x */
2645 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2646 goto unknown_opcode;
2647 op->reg = rd | ((word & 1) << 5);
2648 op->type = MKOP(STORE_VSX, 0, 16);
2649 op->element_size = 1;
2650 op->vsx_flags = VSX_CHECK_VEC;
2653 #endif /* CONFIG_VSX */
2659 op->type = MKOP(LOAD, u, 4);
2660 op->ea = dform_ea(word, regs);
2665 op->type = MKOP(LOAD, u, 1);
2666 op->ea = dform_ea(word, regs);
2671 op->type = MKOP(STORE, u, 4);
2672 op->ea = dform_ea(word, regs);
2677 op->type = MKOP(STORE, u, 1);
2678 op->ea = dform_ea(word, regs);
2683 op->type = MKOP(LOAD, u, 2);
2684 op->ea = dform_ea(word, regs);
2689 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2690 op->ea = dform_ea(word, regs);
2695 op->type = MKOP(STORE, u, 2);
2696 op->ea = dform_ea(word, regs);
2701 break; /* invalid form, ra in range to load */
2702 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2703 op->ea = dform_ea(word, regs);
2707 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2708 op->ea = dform_ea(word, regs);
2711 #ifdef CONFIG_PPC_FPU
2714 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2715 op->ea = dform_ea(word, regs);
2720 op->type = MKOP(LOAD_FP, u, 8);
2721 op->ea = dform_ea(word, regs);
2725 case 53: /* stfsu */
2726 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2727 op->ea = dform_ea(word, regs);
2731 case 55: /* stfdu */
2732 op->type = MKOP(STORE_FP, u, 8);
2733 op->ea = dform_ea(word, regs);
2737 #ifdef __powerpc64__
2739 if (!((rd & 1) || (rd == ra)))
2740 op->type = MKOP(LOAD, 0, 16);
2741 op->ea = dqform_ea(word, regs);
2746 case 57: /* lfdp, lxsd, lxssp */
2747 op->ea = dsform_ea(word, regs);
2751 break; /* reg must be even */
2752 op->type = MKOP(LOAD_FP, 0, 16);
2755 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2756 goto unknown_opcode;
2758 op->type = MKOP(LOAD_VSX, 0, 8);
2759 op->element_size = 8;
2760 op->vsx_flags = VSX_CHECK_VEC;
2763 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2764 goto unknown_opcode;
2766 op->type = MKOP(LOAD_VSX, 0, 4);
2767 op->element_size = 8;
2768 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2772 #endif /* CONFIG_VSX */
2774 #ifdef __powerpc64__
2775 case 58: /* ld[u], lwa */
2776 op->ea = dsform_ea(word, regs);
2779 op->type = MKOP(LOAD, 0, 8);
2782 op->type = MKOP(LOAD, UPDATE, 8);
2785 op->type = MKOP(LOAD, SIGNEXT, 4);
2793 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2794 goto unknown_opcode;
2795 op->ea = dqform_ea(word, regs);
2796 op->reg = VSX_REGISTER_XTP(rd);
2797 op->element_size = 32;
2798 switch (word & 0xf) {
2800 op->type = MKOP(LOAD_VSX, 0, 32);
2803 op->type = MKOP(STORE_VSX, 0, 32);
2808 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2810 case 0: /* stfdp with LSB of DS field = 0 */
2811 case 4: /* stfdp with LSB of DS field = 1 */
2812 op->ea = dsform_ea(word, regs);
2813 op->type = MKOP(STORE_FP, 0, 16);
2817 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2818 goto unknown_opcode;
2819 op->ea = dqform_ea(word, regs);
2822 op->type = MKOP(LOAD_VSX, 0, 16);
2823 op->element_size = 16;
2824 op->vsx_flags = VSX_CHECK_VEC;
2827 case 2: /* stxsd with LSB of DS field = 0 */
2828 case 6: /* stxsd with LSB of DS field = 1 */
2829 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2830 goto unknown_opcode;
2831 op->ea = dsform_ea(word, regs);
2833 op->type = MKOP(STORE_VSX, 0, 8);
2834 op->element_size = 8;
2835 op->vsx_flags = VSX_CHECK_VEC;
2838 case 3: /* stxssp with LSB of DS field = 0 */
2839 case 7: /* stxssp with LSB of DS field = 1 */
2840 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2841 goto unknown_opcode;
2842 op->ea = dsform_ea(word, regs);
2844 op->type = MKOP(STORE_VSX, 0, 4);
2845 op->element_size = 8;
2846 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2850 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2851 goto unknown_opcode;
2852 op->ea = dqform_ea(word, regs);
2855 op->type = MKOP(STORE_VSX, 0, 16);
2856 op->element_size = 16;
2857 op->vsx_flags = VSX_CHECK_VEC;
2861 #endif /* CONFIG_VSX */
2863 #ifdef __powerpc64__
2864 case 62: /* std[u] */
2865 op->ea = dsform_ea(word, regs);
2868 op->type = MKOP(STORE, 0, 8);
2871 op->type = MKOP(STORE, UPDATE, 8);
2875 op->type = MKOP(STORE, 0, 16);
2879 case 1: /* Prefixed instructions */
2880 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2881 goto unknown_opcode;
2883 prefix_r = GET_PREFIX_R(word);
2884 ra = GET_PREFIX_RA(suffix);
2885 op->update_reg = ra;
2886 rd = (suffix >> 21) & 0x1f;
2888 op->val = regs->gpr[rd];
2890 suffixopcode = get_op(suffix);
2891 prefixtype = (word >> 24) & 0x3;
2892 switch (prefixtype) {
2893 case 0: /* Type 00 Eight-Byte Load/Store */
2896 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2897 switch (suffixopcode) {
2899 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2902 case 42: /* plxsd */
2904 op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2905 op->element_size = 8;
2906 op->vsx_flags = VSX_CHECK_VEC;
2908 case 43: /* plxssp */
2910 op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2911 op->element_size = 8;
2912 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2914 case 46: /* pstxsd */
2916 op->type = MKOP(STORE_VSX, PREFIXED, 8);
2917 op->element_size = 8;
2918 op->vsx_flags = VSX_CHECK_VEC;
2920 case 47: /* pstxssp */
2922 op->type = MKOP(STORE_VSX, PREFIXED, 4);
2923 op->element_size = 8;
2924 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2926 case 51: /* plxv1 */
2929 case 50: /* plxv0 */
2930 op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2931 op->element_size = 16;
2932 op->vsx_flags = VSX_CHECK_VEC;
2934 case 55: /* pstxv1 */
2937 case 54: /* pstxv0 */
2938 op->type = MKOP(STORE_VSX, PREFIXED, 16);
2939 op->element_size = 16;
2940 op->vsx_flags = VSX_CHECK_VEC;
2942 #endif /* CONFIG_VSX */
2944 op->type = MKOP(LOAD, PREFIXED, 16);
2947 op->type = MKOP(LOAD, PREFIXED, 8);
2950 case 58: /* plxvp */
2951 op->reg = VSX_REGISTER_XTP(rd);
2952 op->type = MKOP(LOAD_VSX, PREFIXED, 32);
2953 op->element_size = 32;
2955 #endif /* CONFIG_VSX */
2957 op->type = MKOP(STORE, PREFIXED, 16);
2960 op->type = MKOP(STORE, PREFIXED, 8);
2963 case 62: /* pstxvp */
2964 op->reg = VSX_REGISTER_XTP(rd);
2965 op->type = MKOP(STORE_VSX, PREFIXED, 32);
2966 op->element_size = 32;
2968 #endif /* CONFIG_VSX */
2971 case 1: /* Type 01 Eight-Byte Register-to-Register */
2973 case 2: /* Type 10 Modified Load/Store */
2976 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2977 switch (suffixopcode) {
2979 op->type = MKOP(LOAD, PREFIXED, 4);
2982 op->type = MKOP(LOAD, PREFIXED, 1);
2985 op->type = MKOP(STORE, PREFIXED, 4);
2988 op->type = MKOP(STORE, PREFIXED, 1);
2991 op->type = MKOP(LOAD, PREFIXED, 2);
2994 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
2997 op->type = MKOP(STORE, PREFIXED, 2);
3000 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3003 op->type = MKOP(LOAD_FP, PREFIXED, 8);
3005 case 52: /* pstfs */
3006 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3008 case 54: /* pstfd */
3009 op->type = MKOP(STORE_FP, PREFIXED, 8);
3013 case 3: /* Type 11 Modified Register-to-Register */
3016 #endif /* __powerpc64__ */
3021 if ((GETTYPE(op->type) == LOAD_VSX ||
3022 GETTYPE(op->type) == STORE_VSX) &&
3023 !cpu_has_feature(CPU_FTR_VSX)) {
3026 #endif /* CONFIG_VSX */
3051 op->type = INTERRUPT | 0x700;
3052 op->val = SRR1_PROGPRIV;
3056 op->type = INTERRUPT | 0x700;
3057 op->val = SRR1_PROGTRAP;
3060 EXPORT_SYMBOL_GPL(analyse_instr);
3061 NOKPROBE_SYMBOL(analyse_instr);
3064 * For PPC32 we always use stwu with r1 to change the stack pointer.
3065 * So this emulated store may corrupt the exception frame, now we
3066 * have to provide the exception frame trampoline, which is pushed
3067 * below the kprobed function stack. So we only update gpr[1] but
3068 * don't emulate the real store operation. We will do real store
3069 * operation safely in exception return code by checking this flag.
3071 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3075 * Check if we will touch kernel stack overflow
3077 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
3078 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
3081 #endif /* CONFIG_PPC32 */
3083 * Check if we already set since that means we'll
3084 * lose the previous value.
3086 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3087 set_thread_flag(TIF_EMULATE_STACK_STORE);
3091 static nokprobe_inline void do_signext(unsigned long *valp, int size)
3095 *valp = (signed short) *valp;
3098 *valp = (signed int) *valp;
3103 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3107 *valp = byterev_2(*valp);
3110 *valp = byterev_4(*valp);
3112 #ifdef __powerpc64__
3114 *valp = byterev_8(*valp);
3121 * Emulate an instruction that can be executed just by updating
3124 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3126 unsigned long next_pc;
3128 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3129 switch (GETTYPE(op->type)) {
3131 if (op->type & SETREG)
3132 regs->gpr[op->reg] = op->val;
3133 if (op->type & SETCC)
3134 regs->ccr = op->ccval;
3135 if (op->type & SETXER)
3136 regs->xer = op->xerval;
3140 if (op->type & SETLK)
3141 regs->link = next_pc;
3142 if (op->type & BRTAKEN)
3144 if (op->type & DECCTR)
3149 switch (op->type & BARRIER_MASK) {
3159 case BARRIER_LWSYNC:
3160 asm volatile("lwsync" : : : "memory");
3162 case BARRIER_PTESYNC:
3163 asm volatile("ptesync" : : : "memory");
3171 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3174 regs->gpr[op->reg] = regs->link;
3177 regs->gpr[op->reg] = regs->ctr;
3187 regs->xer = op->val & 0xffffffffUL;
3190 regs->link = op->val;
3193 regs->ctr = op->val;
3203 regs->nip = next_pc;
3205 NOKPROBE_SYMBOL(emulate_update_regs);
3208 * Emulate a previously-analysed load or store instruction.
3209 * Return values are:
3210 * 0 = instruction emulated successfully
3211 * -EFAULT = address out of range or access faulted (regs->dar
3212 * contains the faulting address)
3213 * -EACCES = misaligned access, instruction requires alignment
3214 * -EINVAL = unknown operation in *op
3216 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3218 int err, size, type;
3226 size = GETSIZE(op->type);
3227 type = GETTYPE(op->type);
3228 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3229 ea = truncate_if_32bit(regs->msr, op->ea);
3233 if (ea & (size - 1))
3234 return -EACCES; /* can't handle misaligned */
3235 if (!address_ok(regs, ea, size))
3240 #ifdef __powerpc64__
3242 __get_user_asmx(val, ea, err, "lbarx");
3245 __get_user_asmx(val, ea, err, "lharx");
3249 __get_user_asmx(val, ea, err, "lwarx");
3251 #ifdef __powerpc64__
3253 __get_user_asmx(val, ea, err, "ldarx");
3256 err = do_lqarx(ea, ®s->gpr[op->reg]);
3267 regs->gpr[op->reg] = val;
3271 if (ea & (size - 1))
3272 return -EACCES; /* can't handle misaligned */
3273 if (!address_ok(regs, ea, size))
3277 #ifdef __powerpc64__
3279 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3282 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3286 __put_user_asmx(op->val, ea, err, "stwcx.", cr);
3288 #ifdef __powerpc64__
3290 __put_user_asmx(op->val, ea, err, "stdcx.", cr);
3293 err = do_stqcx(ea, regs->gpr[op->reg],
3294 regs->gpr[op->reg + 1], &cr);
3301 regs->ccr = (regs->ccr & 0x0fffffff) |
3303 ((regs->xer >> 3) & 0x10000000);
3309 #ifdef __powerpc64__
3311 err = emulate_lq(regs, ea, op->reg, cross_endian);
3315 err = read_mem(®s->gpr[op->reg], ea, size, regs);
3317 if (op->type & SIGNEXT)
3318 do_signext(®s->gpr[op->reg], size);
3319 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3320 do_byterev(®s->gpr[op->reg], size);
3324 #ifdef CONFIG_PPC_FPU
3327 * If the instruction is in userspace, we can emulate it even
3328 * if the VMX state is not live, because we have the state
3329 * stored in the thread_struct. If the instruction is in
3330 * the kernel, we must not touch the state in the thread_struct.
3332 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3334 err = do_fp_load(op, ea, regs, cross_endian);
3337 #ifdef CONFIG_ALTIVEC
3339 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3341 err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3346 unsigned long msrbit = MSR_VSX;
3349 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3350 * when the target of the instruction is a vector register.
3352 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3354 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3356 err = do_vsx_load(op, ea, regs, cross_endian);
3361 if (!address_ok(regs, ea, size))
3364 for (i = 0; i < size; i += 4) {
3365 unsigned int v32 = 0;
3370 err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3373 if (unlikely(cross_endian))
3374 v32 = byterev_4(v32);
3375 regs->gpr[rd] = v32;
3377 /* reg number wraps from 31 to 0 for lsw[ix] */
3378 rd = (rd + 1) & 0x1f;
3383 #ifdef __powerpc64__
3385 err = emulate_stq(regs, ea, op->reg, cross_endian);
3389 if ((op->type & UPDATE) && size == sizeof(long) &&
3390 op->reg == 1 && op->update_reg == 1 &&
3391 !(regs->msr & MSR_PR) &&
3392 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3393 err = handle_stack_update(ea, regs);
3396 if (unlikely(cross_endian))
3397 do_byterev(&op->val, size);
3398 err = write_mem(op->val, ea, size, regs);
3401 #ifdef CONFIG_PPC_FPU
3403 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3405 err = do_fp_store(op, ea, regs, cross_endian);
3408 #ifdef CONFIG_ALTIVEC
3410 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3412 err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3417 unsigned long msrbit = MSR_VSX;
3420 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3421 * when the target of the instruction is a vector register.
3423 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3425 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3427 err = do_vsx_store(op, ea, regs, cross_endian);
3432 if (!address_ok(regs, ea, size))
3435 for (i = 0; i < size; i += 4) {
3436 unsigned int v32 = regs->gpr[rd];
3441 if (unlikely(cross_endian))
3442 v32 = byterev_4(v32);
3443 err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3447 /* reg number wraps from 31 to 0 for stsw[ix] */
3448 rd = (rd + 1) & 0x1f;
3459 if (op->type & UPDATE)
3460 regs->gpr[op->update_reg] = op->ea;
3464 NOKPROBE_SYMBOL(emulate_loadstore);
3467 * Emulate instructions that cause a transfer of control,
3468 * loads and stores, and a few other instructions.
3469 * Returns 1 if the step was emulated, 0 if not,
3470 * or -1 if the instruction is one that should not be stepped,
3471 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3473 int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3475 struct instruction_op op;
3480 r = analyse_instr(&op, regs, instr);
3484 emulate_update_regs(regs, &op);
3489 type = GETTYPE(op.type);
3491 if (OP_IS_LOAD_STORE(type)) {
3492 err = emulate_loadstore(regs, &op);
3500 ea = truncate_if_32bit(regs->msr, op.ea);
3501 if (!address_ok(regs, ea, 8))
3503 switch (op.type & CACHEOP_MASK) {
3505 __cacheop_user_asmx(ea, err, "dcbst");
3508 __cacheop_user_asmx(ea, err, "dcbf");
3512 prefetchw((void *) ea);
3516 prefetch((void *) ea);
3519 __cacheop_user_asmx(ea, err, "icbi");
3522 err = emulate_dcbz(ea, regs);
3532 regs->gpr[op.reg] = regs->msr & MSR_MASK;
3536 val = regs->gpr[op.reg];
3537 if ((val & MSR_RI) == 0)
3538 /* can't step mtmsr[d] that would clear MSR_RI */
3540 /* here op.val is the mask of bits to change */
3541 regs->msr = (regs->msr & ~op.val) | (val & op.val);
3545 case SYSCALL: /* sc */
3547 * N.B. this uses knowledge about how the syscall
3548 * entry code works. If that is changed, this will
3549 * need to be changed also.
3551 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3552 cpu_has_feature(CPU_FTR_REAL_LE) &&
3553 regs->gpr[0] == 0x1ebe) {
3554 regs->msr ^= MSR_LE;
3557 regs->gpr[9] = regs->gpr[13];
3558 regs->gpr[10] = MSR_KERNEL;
3559 regs->gpr[11] = regs->nip + 4;
3560 regs->gpr[12] = regs->msr & MSR_MASK;
3561 regs->gpr[13] = (unsigned long) get_paca();
3562 regs->nip = (unsigned long) &system_call_common;
3563 regs->msr = MSR_KERNEL;
3566 #ifdef CONFIG_PPC_BOOK3S_64
3567 case SYSCALL_VECTORED_0: /* scv 0 */
3568 regs->gpr[9] = regs->gpr[13];
3569 regs->gpr[10] = MSR_KERNEL;
3570 regs->gpr[11] = regs->nip + 4;
3571 regs->gpr[12] = regs->msr & MSR_MASK;
3572 regs->gpr[13] = (unsigned long) get_paca();
3573 regs->nip = (unsigned long) &system_call_vectored_emulate;
3574 regs->msr = MSR_KERNEL;
3585 regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
3588 NOKPROBE_SYMBOL(emulate_step);