1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
18 extern char system_call_common[];
19 extern char system_call_vectored_emulate[];
22 /* Bits in SRR1 that are copied from MSR */
23 #define MSR_MASK 0xffffffff87c0ffffUL
25 #define MSR_MASK 0x87c0ffff
29 #define XER_SO 0x80000000U
30 #define XER_OV 0x40000000U
31 #define XER_CA 0x20000000U
32 #define XER_OV32 0x00080000U
33 #define XER_CA32 0x00040000U
36 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe))
41 * Functions in ldstfp.S
43 extern void get_fpr(int rn, double *p);
44 extern void put_fpr(int rn, const double *p);
45 extern void get_vr(int rn, __vector128 *p);
46 extern void put_vr(int rn, __vector128 *p);
47 extern void load_vsrn(int vsr, const void *p);
48 extern void store_vsrn(int vsr, void *p);
49 extern void conv_sp_to_dp(const float *sp, double *dp);
50 extern void conv_dp_to_sp(const double *dp, float *sp);
57 extern int do_lq(unsigned long ea, unsigned long *regs);
58 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
59 extern int do_lqarx(unsigned long ea, unsigned long *regs);
60 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
64 #ifdef __LITTLE_ENDIAN__
73 * Emulate the truncation of 64 bit values in 32-bit mode.
75 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
79 if ((msr & MSR_64BIT) == 0)
86 * Determine whether a conditional branch instruction would branch.
88 static nokprobe_inline int branch_taken(unsigned int instr,
89 const struct pt_regs *regs,
90 struct instruction_op *op)
92 unsigned int bo = (instr >> 21) & 0x1f;
96 /* decrement counter */
98 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
101 if ((bo & 0x10) == 0) {
102 /* check bit from CR */
103 bi = (instr >> 16) & 0x1f;
104 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
110 static nokprobe_inline long address_ok(struct pt_regs *regs,
111 unsigned long ea, int nb)
113 if (!user_mode(regs))
115 if (__access_ok(ea, nb))
117 if (__access_ok(ea, 1))
118 /* Access overlaps the end of the user region */
119 regs->dar = TASK_SIZE_MAX - 1;
126 * Calculate effective address for a D-form instruction
128 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
129 const struct pt_regs *regs)
134 ra = (instr >> 16) & 0x1f;
135 ea = (signed short) instr; /* sign-extend */
144 * Calculate effective address for a DS-form instruction
146 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
147 const struct pt_regs *regs)
152 ra = (instr >> 16) & 0x1f;
153 ea = (signed short) (instr & ~3); /* sign-extend */
161 * Calculate effective address for a DQ-form instruction
163 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
164 const struct pt_regs *regs)
169 ra = (instr >> 16) & 0x1f;
170 ea = (signed short) (instr & ~0xf); /* sign-extend */
176 #endif /* __powerpc64 */
179 * Calculate effective address for an X-form instruction
181 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
182 const struct pt_regs *regs)
187 ra = (instr >> 16) & 0x1f;
188 rb = (instr >> 11) & 0x1f;
197 * Calculate effective address for a MLS:D-form / 8LS:D-form
198 * prefixed instruction
200 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
202 const struct pt_regs *regs)
206 unsigned long ea, d0, d1, d;
208 prefix_r = GET_PREFIX_R(instr);
209 ra = GET_PREFIX_RA(suffix);
211 d0 = instr & 0x3ffff;
212 d1 = suffix & 0xffff;
216 * sign extend a 34 bit number
218 dd = (unsigned int)(d >> 2);
220 ea = (ea << 2) | (d & 0x3);
224 else if (!prefix_r && !ra)
225 ; /* Leave ea as is */
230 * (prefix_r && ra) is an invalid form. Should already be
231 * checked for by caller!
238 * Return the largest power of 2, not greater than sizeof(unsigned long),
239 * such that x is a multiple of it.
241 static nokprobe_inline unsigned long max_align(unsigned long x)
243 x |= sizeof(unsigned long);
244 return x & -x; /* isolates rightmost bit */
247 static nokprobe_inline unsigned long byterev_2(unsigned long x)
249 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
252 static nokprobe_inline unsigned long byterev_4(unsigned long x)
254 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
255 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
259 static nokprobe_inline unsigned long byterev_8(unsigned long x)
261 return (byterev_4(x) << 32) | byterev_4(x >> 32);
265 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
269 *(u16 *)ptr = byterev_2(*(u16 *)ptr);
272 *(u32 *)ptr = byterev_4(*(u32 *)ptr);
276 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
279 unsigned long *up = (unsigned long *)ptr;
281 tmp = byterev_8(up[0]);
282 up[0] = byterev_8(up[1]);
287 unsigned long *up = (unsigned long *)ptr;
290 tmp = byterev_8(up[0]);
291 up[0] = byterev_8(up[3]);
293 tmp = byterev_8(up[2]);
294 up[2] = byterev_8(up[1]);
305 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
306 unsigned long ea, int nb,
307 struct pt_regs *regs)
314 err = __get_user(x, (unsigned char __user *) ea);
317 err = __get_user(x, (unsigned short __user *) ea);
320 err = __get_user(x, (unsigned int __user *) ea);
324 err = __get_user(x, (unsigned long __user *) ea);
336 * Copy from userspace to a buffer, using the largest possible
337 * aligned accesses, up to sizeof(long).
339 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
340 struct pt_regs *regs)
345 for (; nb > 0; nb -= c) {
351 err = __get_user(*dest, (unsigned char __user *) ea);
354 err = __get_user(*(u16 *)dest,
355 (unsigned short __user *) ea);
358 err = __get_user(*(u32 *)dest,
359 (unsigned int __user *) ea);
363 err = __get_user(*(unsigned long *)dest,
364 (unsigned long __user *) ea);
378 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
379 unsigned long ea, int nb,
380 struct pt_regs *regs)
384 u8 b[sizeof(unsigned long)];
390 i = IS_BE ? sizeof(unsigned long) - nb : 0;
391 err = copy_mem_in(&u.b[i], ea, nb, regs);
398 * Read memory at address ea for nb bytes, return 0 for success
399 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
400 * If nb < sizeof(long), the result is right-justified on BE systems.
402 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
403 struct pt_regs *regs)
405 if (!address_ok(regs, ea, nb))
407 if ((ea & (nb - 1)) == 0)
408 return read_mem_aligned(dest, ea, nb, regs);
409 return read_mem_unaligned(dest, ea, nb, regs);
411 NOKPROBE_SYMBOL(read_mem);
413 static nokprobe_inline int write_mem_aligned(unsigned long val,
414 unsigned long ea, int nb,
415 struct pt_regs *regs)
421 err = __put_user(val, (unsigned char __user *) ea);
424 err = __put_user(val, (unsigned short __user *) ea);
427 err = __put_user(val, (unsigned int __user *) ea);
431 err = __put_user(val, (unsigned long __user *) ea);
441 * Copy from a buffer to userspace, using the largest possible
442 * aligned accesses, up to sizeof(long).
444 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
445 struct pt_regs *regs)
450 for (; nb > 0; nb -= c) {
456 err = __put_user(*dest, (unsigned char __user *) ea);
459 err = __put_user(*(u16 *)dest,
460 (unsigned short __user *) ea);
463 err = __put_user(*(u32 *)dest,
464 (unsigned int __user *) ea);
468 err = __put_user(*(unsigned long *)dest,
469 (unsigned long __user *) ea);
483 static nokprobe_inline int write_mem_unaligned(unsigned long val,
484 unsigned long ea, int nb,
485 struct pt_regs *regs)
489 u8 b[sizeof(unsigned long)];
494 i = IS_BE ? sizeof(unsigned long) - nb : 0;
495 return copy_mem_out(&u.b[i], ea, nb, regs);
499 * Write memory at address ea for nb bytes, return 0 for success
500 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
502 static int write_mem(unsigned long val, unsigned long ea, int nb,
503 struct pt_regs *regs)
505 if (!address_ok(regs, ea, nb))
507 if ((ea & (nb - 1)) == 0)
508 return write_mem_aligned(val, ea, nb, regs);
509 return write_mem_unaligned(val, ea, nb, regs);
511 NOKPROBE_SYMBOL(write_mem);
513 #ifdef CONFIG_PPC_FPU
515 * These access either the real FP register or the image in the
516 * thread_struct, depending on regs->msr & MSR_FP.
518 static int do_fp_load(struct instruction_op *op, unsigned long ea,
519 struct pt_regs *regs, bool cross_endian)
528 u8 b[2 * sizeof(double)];
531 nb = GETSIZE(op->type);
532 if (!address_ok(regs, ea, nb))
535 err = copy_mem_in(u.b, ea, nb, regs);
538 if (unlikely(cross_endian)) {
539 do_byte_reverse(u.b, min(nb, 8));
541 do_byte_reverse(&u.b[8], 8);
545 if (op->type & FPCONV)
546 conv_sp_to_dp(&u.f, &u.d[0]);
547 else if (op->type & SIGNEXT)
552 if (regs->msr & MSR_FP)
553 put_fpr(rn, &u.d[0]);
555 current->thread.TS_FPR(rn) = u.l[0];
559 if (regs->msr & MSR_FP)
560 put_fpr(rn, &u.d[1]);
562 current->thread.TS_FPR(rn) = u.l[1];
567 NOKPROBE_SYMBOL(do_fp_load);
569 static int do_fp_store(struct instruction_op *op, unsigned long ea,
570 struct pt_regs *regs, bool cross_endian)
578 u8 b[2 * sizeof(double)];
581 nb = GETSIZE(op->type);
582 if (!address_ok(regs, ea, nb))
586 if (regs->msr & MSR_FP)
587 get_fpr(rn, &u.d[0]);
589 u.l[0] = current->thread.TS_FPR(rn);
591 if (op->type & FPCONV)
592 conv_dp_to_sp(&u.d[0], &u.f);
598 if (regs->msr & MSR_FP)
599 get_fpr(rn, &u.d[1]);
601 u.l[1] = current->thread.TS_FPR(rn);
604 if (unlikely(cross_endian)) {
605 do_byte_reverse(u.b, min(nb, 8));
607 do_byte_reverse(&u.b[8], 8);
609 return copy_mem_out(u.b, ea, nb, regs);
611 NOKPROBE_SYMBOL(do_fp_store);
614 #ifdef CONFIG_ALTIVEC
615 /* For Altivec/VMX, no need to worry about alignment */
616 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
617 int size, struct pt_regs *regs,
623 u8 b[sizeof(__vector128)];
626 if (!address_ok(regs, ea & ~0xfUL, 16))
628 /* align to multiple of size */
630 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
633 if (unlikely(cross_endian))
634 do_byte_reverse(&u.b[ea & 0xf], size);
636 if (regs->msr & MSR_VEC)
639 current->thread.vr_state.vr[rn] = u.v;
644 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
645 int size, struct pt_regs *regs,
650 u8 b[sizeof(__vector128)];
653 if (!address_ok(regs, ea & ~0xfUL, 16))
655 /* align to multiple of size */
659 if (regs->msr & MSR_VEC)
662 u.v = current->thread.vr_state.vr[rn];
664 if (unlikely(cross_endian))
665 do_byte_reverse(&u.b[ea & 0xf], size);
666 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
668 #endif /* CONFIG_ALTIVEC */
671 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
672 int reg, bool cross_endian)
676 if (!address_ok(regs, ea, 16))
678 /* if aligned, should be atomic */
679 if ((ea & 0xf) == 0) {
680 err = do_lq(ea, ®s->gpr[reg]);
682 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs);
684 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs);
686 if (!err && unlikely(cross_endian))
687 do_byte_reverse(®s->gpr[reg], 16);
691 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
692 int reg, bool cross_endian)
695 unsigned long vals[2];
697 if (!address_ok(regs, ea, 16))
699 vals[0] = regs->gpr[reg];
700 vals[1] = regs->gpr[reg + 1];
701 if (unlikely(cross_endian))
702 do_byte_reverse(vals, 16);
704 /* if aligned, should be atomic */
706 return do_stq(ea, vals[0], vals[1]);
708 err = write_mem(vals[IS_LE], ea, 8, regs);
710 err = write_mem(vals[IS_BE], ea + 8, 8, regs);
713 #endif /* __powerpc64 */
716 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
717 const void *mem, bool rev)
721 const unsigned int *wp;
722 const unsigned short *hp;
723 const unsigned char *bp;
725 size = GETSIZE(op->type);
726 reg->d[0] = reg->d[1] = 0;
728 switch (op->element_size) {
732 /* whole vector; lxv[x] or lxvl[l] */
735 memcpy(reg, mem, size);
736 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
739 do_byte_reverse(reg, size);
742 /* scalar loads, lxvd2x, lxvdsx */
743 read_size = (size >= 8) ? 8 : size;
744 i = IS_LE ? 8 : 8 - read_size;
745 memcpy(®->b[i], mem, read_size);
747 do_byte_reverse(®->b[i], 8);
749 if (op->type & SIGNEXT) {
750 /* size == 4 is the only case here */
751 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
752 } else if (op->vsx_flags & VSX_FPCONV) {
754 conv_sp_to_dp(®->fp[1 + IS_LE],
760 unsigned long v = *(unsigned long *)(mem + 8);
761 reg->d[IS_BE] = !rev ? v : byterev_8(v);
762 } else if (op->vsx_flags & VSX_SPLAT)
763 reg->d[IS_BE] = reg->d[IS_LE];
769 for (j = 0; j < size / 4; ++j) {
770 i = IS_LE ? 3 - j : j;
771 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
773 if (op->vsx_flags & VSX_SPLAT) {
774 u32 val = reg->w[IS_LE ? 3 : 0];
776 i = IS_LE ? 3 - j : j;
784 for (j = 0; j < size / 2; ++j) {
785 i = IS_LE ? 7 - j : j;
786 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
792 for (j = 0; j < size; ++j) {
793 i = IS_LE ? 15 - j : j;
799 EXPORT_SYMBOL_GPL(emulate_vsx_load);
800 NOKPROBE_SYMBOL(emulate_vsx_load);
802 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
805 int size, write_size;
812 size = GETSIZE(op->type);
814 switch (op->element_size) {
820 /* reverse 32 bytes */
821 union vsx_reg buf32[2];
822 buf32[0].d[0] = byterev_8(reg[1].d[1]);
823 buf32[0].d[1] = byterev_8(reg[1].d[0]);
824 buf32[1].d[0] = byterev_8(reg[0].d[1]);
825 buf32[1].d[1] = byterev_8(reg[0].d[0]);
826 memcpy(mem, buf32, size);
828 memcpy(mem, reg, size);
832 /* stxv, stxvx, stxvl, stxvll */
835 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
838 /* reverse 16 bytes */
839 buf.d[0] = byterev_8(reg->d[1]);
840 buf.d[1] = byterev_8(reg->d[0]);
843 memcpy(mem, reg, size);
846 /* scalar stores, stxvd2x */
847 write_size = (size >= 8) ? 8 : size;
848 i = IS_LE ? 8 : 8 - write_size;
849 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
850 buf.d[0] = buf.d[1] = 0;
852 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
856 memcpy(mem, ®->b[i], write_size);
858 memcpy(mem + 8, ®->d[IS_BE], 8);
860 do_byte_reverse(mem, write_size);
862 do_byte_reverse(mem + 8, 8);
868 for (j = 0; j < size / 4; ++j) {
869 i = IS_LE ? 3 - j : j;
870 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
876 for (j = 0; j < size / 2; ++j) {
877 i = IS_LE ? 7 - j : j;
878 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
884 for (j = 0; j < size; ++j) {
885 i = IS_LE ? 15 - j : j;
891 EXPORT_SYMBOL_GPL(emulate_vsx_store);
892 NOKPROBE_SYMBOL(emulate_vsx_store);
894 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
895 unsigned long ea, struct pt_regs *regs,
899 int i, j, nr_vsx_regs;
901 union vsx_reg buf[2];
902 int size = GETSIZE(op->type);
904 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
907 nr_vsx_regs = max(1ul, size / sizeof(__vector128));
908 emulate_vsx_load(op, buf, mem, cross_endian);
911 /* FP regs + extensions */
912 if (regs->msr & MSR_FP) {
913 for (i = 0; i < nr_vsx_regs; i++) {
914 j = IS_LE ? nr_vsx_regs - i - 1 : i;
915 load_vsrn(reg + i, &buf[j].v);
918 for (i = 0; i < nr_vsx_regs; i++) {
919 j = IS_LE ? nr_vsx_regs - i - 1 : i;
920 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
921 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
925 if (regs->msr & MSR_VEC) {
926 for (i = 0; i < nr_vsx_regs; i++) {
927 j = IS_LE ? nr_vsx_regs - i - 1 : i;
928 load_vsrn(reg + i, &buf[j].v);
931 for (i = 0; i < nr_vsx_regs; i++) {
932 j = IS_LE ? nr_vsx_regs - i - 1 : i;
933 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
941 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
942 unsigned long ea, struct pt_regs *regs,
946 int i, j, nr_vsx_regs;
948 union vsx_reg buf[2];
949 int size = GETSIZE(op->type);
951 if (!address_ok(regs, ea, size))
954 nr_vsx_regs = max(1ul, size / sizeof(__vector128));
957 /* FP regs + extensions */
958 if (regs->msr & MSR_FP) {
959 for (i = 0; i < nr_vsx_regs; i++) {
960 j = IS_LE ? nr_vsx_regs - i - 1 : i;
961 store_vsrn(reg + i, &buf[j].v);
964 for (i = 0; i < nr_vsx_regs; i++) {
965 j = IS_LE ? nr_vsx_regs - i - 1 : i;
966 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
967 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
971 if (regs->msr & MSR_VEC) {
972 for (i = 0; i < nr_vsx_regs; i++) {
973 j = IS_LE ? nr_vsx_regs - i - 1 : i;
974 store_vsrn(reg + i, &buf[j].v);
977 for (i = 0; i < nr_vsx_regs; i++) {
978 j = IS_LE ? nr_vsx_regs - i - 1 : i;
979 buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
984 emulate_vsx_store(op, buf, mem, cross_endian);
985 return copy_mem_out(mem, ea, size, regs);
987 #endif /* CONFIG_VSX */
989 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
992 unsigned long i, size;
995 size = ppc64_caches.l1d.block_size;
996 if (!(regs->msr & MSR_64BIT))
999 size = L1_CACHE_BYTES;
1002 if (!address_ok(regs, ea, size))
1004 for (i = 0; i < size; i += sizeof(long)) {
1005 err = __put_user(0, (unsigned long __user *) (ea + i));
1013 NOKPROBE_SYMBOL(emulate_dcbz);
1015 #define __put_user_asmx(x, addr, err, op, cr) \
1016 __asm__ __volatile__( \
1017 "1: " op " %2,0,%3\n" \
1020 ".section .fixup,\"ax\"\n" \
1025 : "=r" (err), "=r" (cr) \
1026 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1028 #define __get_user_asmx(x, addr, err, op) \
1029 __asm__ __volatile__( \
1030 "1: "op" %1,0,%2\n" \
1032 ".section .fixup,\"ax\"\n" \
1037 : "=r" (err), "=r" (x) \
1038 : "r" (addr), "i" (-EFAULT), "0" (err))
1040 #define __cacheop_user_asmx(addr, err, op) \
1041 __asm__ __volatile__( \
1044 ".section .fixup,\"ax\"\n" \
1050 : "r" (addr), "i" (-EFAULT), "0" (err))
1052 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1053 struct instruction_op *op)
1058 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1059 #ifdef __powerpc64__
1060 if (!(regs->msr & MSR_64BIT))
1064 op->ccval |= 0x80000000;
1066 op->ccval |= 0x40000000;
1068 op->ccval |= 0x20000000;
1071 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1073 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1075 op->xerval |= XER_CA32;
1077 op->xerval &= ~XER_CA32;
1081 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1082 struct instruction_op *op, int rd,
1083 unsigned long val1, unsigned long val2,
1084 unsigned long carry_in)
1086 unsigned long val = val1 + val2;
1090 op->type = COMPUTE + SETREG + SETXER;
1093 #ifdef __powerpc64__
1094 if (!(regs->msr & MSR_64BIT)) {
1095 val = (unsigned int) val;
1096 val1 = (unsigned int) val1;
1099 op->xerval = regs->xer;
1100 if (val < val1 || (carry_in && val == val1))
1101 op->xerval |= XER_CA;
1103 op->xerval &= ~XER_CA;
1105 set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1106 (carry_in && (unsigned int)val == (unsigned int)val1));
1109 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1110 struct instruction_op *op,
1111 long v1, long v2, int crfld)
1113 unsigned int crval, shift;
1115 op->type = COMPUTE + SETCC;
1116 crval = (regs->xer >> 31) & 1; /* get SO bit */
1123 shift = (7 - crfld) * 4;
1124 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1127 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1128 struct instruction_op *op,
1130 unsigned long v2, int crfld)
1132 unsigned int crval, shift;
1134 op->type = COMPUTE + SETCC;
1135 crval = (regs->xer >> 31) & 1; /* get SO bit */
1142 shift = (7 - crfld) * 4;
1143 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1146 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1147 struct instruction_op *op,
1148 unsigned long v1, unsigned long v2)
1150 unsigned long long out_val, mask;
1154 for (i = 0; i < 8; i++) {
1155 mask = 0xffUL << (i * 8);
1156 if ((v1 & mask) == (v2 & mask))
1163 * The size parameter is used to adjust the equivalent popcnt instruction.
1164 * popcntb = 8, popcntw = 32, popcntd = 64
1166 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1167 struct instruction_op *op,
1168 unsigned long v1, int size)
1170 unsigned long long out = v1;
1172 out -= (out >> 1) & 0x5555555555555555ULL;
1173 out = (0x3333333333333333ULL & out) +
1174 (0x3333333333333333ULL & (out >> 2));
1175 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1177 if (size == 8) { /* popcntb */
1183 if (size == 32) { /* popcntw */
1184 op->val = out & 0x0000003f0000003fULL;
1188 out = (out + (out >> 32)) & 0x7f;
1189 op->val = out; /* popcntd */
1193 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1194 struct instruction_op *op,
1195 unsigned long v1, unsigned long v2)
1197 unsigned char perm, idx;
1201 for (i = 0; i < 8; i++) {
1202 idx = (v1 >> (i * 8)) & 0xff;
1204 if (v2 & PPC_BIT(idx))
1209 #endif /* CONFIG_PPC64 */
1211 * The size parameter adjusts the equivalent prty instruction.
1212 * prtyw = 32, prtyd = 64
1214 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1215 struct instruction_op *op,
1216 unsigned long v, int size)
1218 unsigned long long res = v ^ (v >> 8);
1221 if (size == 32) { /* prtyw */
1222 op->val = res & 0x0000000100000001ULL;
1227 op->val = res & 1; /*prtyd */
1230 static nokprobe_inline int trap_compare(long v1, long v2)
1240 if ((unsigned long)v1 < (unsigned long)v2)
1242 else if ((unsigned long)v1 > (unsigned long)v2)
1248 * Elements of 32-bit rotate and mask instructions.
1250 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1251 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1252 #ifdef __powerpc64__
1253 #define MASK64_L(mb) (~0UL >> (mb))
1254 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1255 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1256 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1258 #define DATA32(x) (x)
1260 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1263 * Decode an instruction, and return information about it in *op
1264 * without changing *regs.
1265 * Integer arithmetic and logical instructions, branches, and barrier
1266 * instructions can be emulated just using the information in *op.
1268 * Return value is 1 if the instruction can be emulated just by
1269 * updating *regs with the information in *op, -1 if we need the
1270 * GPRs but *regs doesn't contain the full register set, or 0
1273 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1274 struct ppc_inst instr)
1277 unsigned int suffixopcode, prefixtype, prefix_r;
1279 unsigned int opcode, ra, rb, rc, rd, spr, u;
1280 unsigned long int imm;
1281 unsigned long int val, val2;
1282 unsigned int mb, me, sh;
1283 unsigned int word, suffix;
1286 word = ppc_inst_val(instr);
1287 suffix = ppc_inst_suffix(instr);
1291 opcode = ppc_inst_primary_opcode(instr);
1295 imm = (signed short)(word & 0xfffc);
1296 if ((word & 2) == 0)
1298 op->val = truncate_if_32bit(regs->msr, imm);
1301 if (branch_taken(word, regs, op))
1302 op->type |= BRTAKEN;
1306 if ((word & 0xfe2) == 2)
1308 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1309 (word & 0xfe3) == 1) { /* scv */
1310 op->type = SYSCALL_VECTORED_0;
1311 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1312 goto unknown_opcode;
1318 op->type = BRANCH | BRTAKEN;
1319 imm = word & 0x03fffffc;
1320 if (imm & 0x02000000)
1322 if ((word & 2) == 0)
1324 op->val = truncate_if_32bit(regs->msr, imm);
1329 switch ((word >> 1) & 0x3ff) {
1331 op->type = COMPUTE + SETCC;
1332 rd = 7 - ((word >> 23) & 0x7);
1333 ra = 7 - ((word >> 18) & 0x7);
1336 val = (regs->ccr >> ra) & 0xf;
1337 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1341 case 528: /* bcctr */
1343 imm = (word & 0x400)? regs->ctr: regs->link;
1344 op->val = truncate_if_32bit(regs->msr, imm);
1347 if (branch_taken(word, regs, op))
1348 op->type |= BRTAKEN;
1351 case 18: /* rfid, scary */
1352 if (regs->msr & MSR_PR)
1357 case 150: /* isync */
1358 op->type = BARRIER | BARRIER_ISYNC;
1361 case 33: /* crnor */
1362 case 129: /* crandc */
1363 case 193: /* crxor */
1364 case 225: /* crnand */
1365 case 257: /* crand */
1366 case 289: /* creqv */
1367 case 417: /* crorc */
1368 case 449: /* cror */
1369 op->type = COMPUTE + SETCC;
1370 ra = (word >> 16) & 0x1f;
1371 rb = (word >> 11) & 0x1f;
1372 rd = (word >> 21) & 0x1f;
1373 ra = (regs->ccr >> (31 - ra)) & 1;
1374 rb = (regs->ccr >> (31 - rb)) & 1;
1375 val = (word >> (6 + ra * 2 + rb)) & 1;
1376 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1382 switch ((word >> 1) & 0x3ff) {
1383 case 598: /* sync */
1384 op->type = BARRIER + BARRIER_SYNC;
1385 #ifdef __powerpc64__
1386 switch ((word >> 21) & 3) {
1387 case 1: /* lwsync */
1388 op->type = BARRIER + BARRIER_LWSYNC;
1390 case 2: /* ptesync */
1391 op->type = BARRIER + BARRIER_PTESYNC;
1397 case 854: /* eieio */
1398 op->type = BARRIER + BARRIER_EIEIO;
1404 rd = (word >> 21) & 0x1f;
1405 ra = (word >> 16) & 0x1f;
1406 rb = (word >> 11) & 0x1f;
1407 rc = (word >> 6) & 0x1f;
1410 #ifdef __powerpc64__
1412 if (!cpu_has_feature(CPU_FTR_ARCH_31))
1413 goto unknown_opcode;
1415 prefix_r = GET_PREFIX_R(word);
1416 ra = GET_PREFIX_RA(suffix);
1417 rd = (suffix >> 21) & 0x1f;
1419 op->val = regs->gpr[rd];
1420 suffixopcode = get_op(suffix);
1421 prefixtype = (word >> 24) & 0x3;
1422 switch (prefixtype) {
1426 switch (suffixopcode) {
1427 case 14: /* paddi */
1428 op->type = COMPUTE | PREFIXED;
1429 op->val = mlsd_8lsd_ea(word, suffix, regs);
1435 if (rd & trap_compare(regs->gpr[ra], (short) word))
1440 if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1444 #ifdef __powerpc64__
1447 * There are very many instructions with this primary opcode
1448 * introduced in the ISA as early as v2.03. However, the ones
1449 * we currently emulate were all introduced with ISA 3.0
1451 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1452 goto unknown_opcode;
1454 switch (word & 0x3f) {
1455 case 48: /* maddhd */
1456 asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1457 "=r" (op->val) : "r" (regs->gpr[ra]),
1458 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1461 case 49: /* maddhdu */
1462 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1463 "=r" (op->val) : "r" (regs->gpr[ra]),
1464 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1467 case 51: /* maddld */
1468 asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1469 "=r" (op->val) : "r" (regs->gpr[ra]),
1470 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1475 * There are other instructions from ISA 3.0 with the same
1476 * primary opcode which do not have emulation support yet.
1478 goto unknown_opcode;
1482 op->val = regs->gpr[ra] * (short) word;
1485 case 8: /* subfic */
1487 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1490 case 10: /* cmpli */
1491 imm = (unsigned short) word;
1492 val = regs->gpr[ra];
1493 #ifdef __powerpc64__
1495 val = (unsigned int) val;
1497 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1502 val = regs->gpr[ra];
1503 #ifdef __powerpc64__
1507 do_cmp_signed(regs, op, val, imm, rd >> 2);
1510 case 12: /* addic */
1512 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1515 case 13: /* addic. */
1517 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1524 imm += regs->gpr[ra];
1528 case 15: /* addis */
1529 imm = ((short) word) << 16;
1531 imm += regs->gpr[ra];
1536 if (((word >> 1) & 0x1f) == 2) {
1538 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1539 goto unknown_opcode;
1540 imm = (short) (word & 0xffc1); /* d0 + d2 fields */
1541 imm |= (word >> 15) & 0x3e; /* d1 field */
1542 op->val = regs->nip + (imm << 16) + 4;
1548 case 20: /* rlwimi */
1549 mb = (word >> 6) & 0x1f;
1550 me = (word >> 1) & 0x1f;
1551 val = DATA32(regs->gpr[rd]);
1552 imm = MASK32(mb, me);
1553 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1556 case 21: /* rlwinm */
1557 mb = (word >> 6) & 0x1f;
1558 me = (word >> 1) & 0x1f;
1559 val = DATA32(regs->gpr[rd]);
1560 op->val = ROTATE(val, rb) & MASK32(mb, me);
1563 case 23: /* rlwnm */
1564 mb = (word >> 6) & 0x1f;
1565 me = (word >> 1) & 0x1f;
1566 rb = regs->gpr[rb] & 0x1f;
1567 val = DATA32(regs->gpr[rd]);
1568 op->val = ROTATE(val, rb) & MASK32(mb, me);
1572 op->val = regs->gpr[rd] | (unsigned short) word;
1573 goto logical_done_nocc;
1576 imm = (unsigned short) word;
1577 op->val = regs->gpr[rd] | (imm << 16);
1578 goto logical_done_nocc;
1581 op->val = regs->gpr[rd] ^ (unsigned short) word;
1582 goto logical_done_nocc;
1584 case 27: /* xoris */
1585 imm = (unsigned short) word;
1586 op->val = regs->gpr[rd] ^ (imm << 16);
1587 goto logical_done_nocc;
1589 case 28: /* andi. */
1590 op->val = regs->gpr[rd] & (unsigned short) word;
1592 goto logical_done_nocc;
1594 case 29: /* andis. */
1595 imm = (unsigned short) word;
1596 op->val = regs->gpr[rd] & (imm << 16);
1598 goto logical_done_nocc;
1600 #ifdef __powerpc64__
1602 mb = ((word >> 6) & 0x1f) | (word & 0x20);
1603 val = regs->gpr[rd];
1604 if ((word & 0x10) == 0) {
1605 sh = rb | ((word & 2) << 4);
1606 val = ROTATE(val, sh);
1607 switch ((word >> 2) & 3) {
1608 case 0: /* rldicl */
1609 val &= MASK64_L(mb);
1611 case 1: /* rldicr */
1612 val &= MASK64_R(mb);
1615 val &= MASK64(mb, 63 - sh);
1617 case 3: /* rldimi */
1618 imm = MASK64(mb, 63 - sh);
1619 val = (regs->gpr[ra] & ~imm) |
1625 sh = regs->gpr[rb] & 0x3f;
1626 val = ROTATE(val, sh);
1627 switch ((word >> 1) & 7) {
1629 op->val = val & MASK64_L(mb);
1632 op->val = val & MASK64_R(mb);
1637 op->type = UNKNOWN; /* illegal instruction */
1641 /* isel occupies 32 minor opcodes */
1642 if (((word >> 1) & 0x1f) == 15) {
1643 mb = (word >> 6) & 0x1f; /* bc field */
1644 val = (regs->ccr >> (31 - mb)) & 1;
1645 val2 = (ra) ? regs->gpr[ra] : 0;
1647 op->val = (val) ? val2 : regs->gpr[rb];
1651 switch ((word >> 1) & 0x3ff) {
1654 (rd & trap_compare((int)regs->gpr[ra],
1655 (int)regs->gpr[rb])))
1658 #ifdef __powerpc64__
1660 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1664 case 83: /* mfmsr */
1665 if (regs->msr & MSR_PR)
1670 case 146: /* mtmsr */
1671 if (regs->msr & MSR_PR)
1675 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1678 case 178: /* mtmsrd */
1679 if (regs->msr & MSR_PR)
1683 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1684 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1685 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1692 if ((word >> 20) & 1) {
1694 for (sh = 0; sh < 8; ++sh) {
1695 if (word & (0x80000 >> sh))
1700 op->val = regs->ccr & imm;
1703 case 128: /* setb */
1704 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1705 goto unknown_opcode;
1707 * 'ra' encodes the CR field number (bfa) in the top 3 bits.
1708 * Since each CR field is 4 bits,
1709 * we can simply mask off the bottom two bits (bfa * 4)
1710 * to yield the first bit in the CR field.
1713 /* 'val' stores bits of the CR field (bfa) */
1714 val = regs->ccr >> (CR0_SHIFT - ra);
1715 /* checks if the LT bit of CR field (bfa) is set */
1718 /* checks if the GT bit of CR field (bfa) is set */
1725 case 144: /* mtcrf */
1726 op->type = COMPUTE + SETCC;
1728 val = regs->gpr[rd];
1729 op->ccval = regs->ccr;
1730 for (sh = 0; sh < 8; ++sh) {
1731 if (word & (0x80000 >> sh))
1732 op->ccval = (op->ccval & ~imm) |
1738 case 339: /* mfspr */
1739 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1743 if (spr == SPRN_XER || spr == SPRN_LR ||
1748 case 467: /* mtspr */
1749 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1751 op->val = regs->gpr[rd];
1753 if (spr == SPRN_XER || spr == SPRN_LR ||
1759 * Compare instructions
1762 val = regs->gpr[ra];
1763 val2 = regs->gpr[rb];
1764 #ifdef __powerpc64__
1765 if ((rd & 1) == 0) {
1766 /* word (32-bit) compare */
1771 do_cmp_signed(regs, op, val, val2, rd >> 2);
1775 val = regs->gpr[ra];
1776 val2 = regs->gpr[rb];
1777 #ifdef __powerpc64__
1778 if ((rd & 1) == 0) {
1779 /* word (32-bit) compare */
1780 val = (unsigned int) val;
1781 val2 = (unsigned int) val2;
1784 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1787 case 508: /* cmpb */
1788 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1789 goto logical_done_nocc;
1792 * Arithmetic instructions
1795 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1798 #ifdef __powerpc64__
1799 case 9: /* mulhdu */
1800 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1801 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1805 add_with_carry(regs, op, rd, regs->gpr[ra],
1809 case 11: /* mulhwu */
1810 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1811 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1815 op->val = regs->gpr[rb] - regs->gpr[ra];
1817 #ifdef __powerpc64__
1818 case 73: /* mulhd */
1819 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1820 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1823 case 75: /* mulhw */
1824 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1825 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1829 op->val = -regs->gpr[ra];
1832 case 136: /* subfe */
1833 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1834 regs->gpr[rb], regs->xer & XER_CA);
1837 case 138: /* adde */
1838 add_with_carry(regs, op, rd, regs->gpr[ra],
1839 regs->gpr[rb], regs->xer & XER_CA);
1842 case 200: /* subfze */
1843 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1844 regs->xer & XER_CA);
1847 case 202: /* addze */
1848 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1849 regs->xer & XER_CA);
1852 case 232: /* subfme */
1853 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1854 regs->xer & XER_CA);
1856 #ifdef __powerpc64__
1857 case 233: /* mulld */
1858 op->val = regs->gpr[ra] * regs->gpr[rb];
1861 case 234: /* addme */
1862 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1863 regs->xer & XER_CA);
1866 case 235: /* mullw */
1867 op->val = (long)(int) regs->gpr[ra] *
1868 (int) regs->gpr[rb];
1871 #ifdef __powerpc64__
1872 case 265: /* modud */
1873 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1874 goto unknown_opcode;
1875 op->val = regs->gpr[ra] % regs->gpr[rb];
1879 op->val = regs->gpr[ra] + regs->gpr[rb];
1882 case 267: /* moduw */
1883 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1884 goto unknown_opcode;
1885 op->val = (unsigned int) regs->gpr[ra] %
1886 (unsigned int) regs->gpr[rb];
1888 #ifdef __powerpc64__
1889 case 457: /* divdu */
1890 op->val = regs->gpr[ra] / regs->gpr[rb];
1893 case 459: /* divwu */
1894 op->val = (unsigned int) regs->gpr[ra] /
1895 (unsigned int) regs->gpr[rb];
1897 #ifdef __powerpc64__
1898 case 489: /* divd */
1899 op->val = (long int) regs->gpr[ra] /
1900 (long int) regs->gpr[rb];
1903 case 491: /* divw */
1904 op->val = (int) regs->gpr[ra] /
1905 (int) regs->gpr[rb];
1907 #ifdef __powerpc64__
1908 case 425: /* divde[.] */
1909 asm volatile(PPC_DIVDE(%0, %1, %2) :
1910 "=r" (op->val) : "r" (regs->gpr[ra]),
1911 "r" (regs->gpr[rb]));
1913 case 393: /* divdeu[.] */
1914 asm volatile(PPC_DIVDEU(%0, %1, %2) :
1915 "=r" (op->val) : "r" (regs->gpr[ra]),
1916 "r" (regs->gpr[rb]));
1919 case 755: /* darn */
1920 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1921 goto unknown_opcode;
1924 /* 32-bit conditioned */
1925 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1929 /* 64-bit conditioned */
1930 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1935 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1939 goto unknown_opcode;
1940 #ifdef __powerpc64__
1941 case 777: /* modsd */
1942 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1943 goto unknown_opcode;
1944 op->val = (long int) regs->gpr[ra] %
1945 (long int) regs->gpr[rb];
1948 case 779: /* modsw */
1949 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1950 goto unknown_opcode;
1951 op->val = (int) regs->gpr[ra] %
1952 (int) regs->gpr[rb];
1957 * Logical instructions
1959 case 26: /* cntlzw */
1960 val = (unsigned int) regs->gpr[rd];
1961 op->val = ( val ? __builtin_clz(val) : 32 );
1963 #ifdef __powerpc64__
1964 case 58: /* cntlzd */
1965 val = regs->gpr[rd];
1966 op->val = ( val ? __builtin_clzl(val) : 64 );
1970 op->val = regs->gpr[rd] & regs->gpr[rb];
1974 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1977 case 122: /* popcntb */
1978 do_popcnt(regs, op, regs->gpr[rd], 8);
1979 goto logical_done_nocc;
1982 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1985 case 154: /* prtyw */
1986 do_prty(regs, op, regs->gpr[rd], 32);
1987 goto logical_done_nocc;
1989 case 186: /* prtyd */
1990 do_prty(regs, op, regs->gpr[rd], 64);
1991 goto logical_done_nocc;
1993 case 252: /* bpermd */
1994 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1995 goto logical_done_nocc;
1998 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
2002 op->val = regs->gpr[rd] ^ regs->gpr[rb];
2005 case 378: /* popcntw */
2006 do_popcnt(regs, op, regs->gpr[rd], 32);
2007 goto logical_done_nocc;
2010 op->val = regs->gpr[rd] | ~regs->gpr[rb];
2014 op->val = regs->gpr[rd] | regs->gpr[rb];
2017 case 476: /* nand */
2018 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2021 case 506: /* popcntd */
2022 do_popcnt(regs, op, regs->gpr[rd], 64);
2023 goto logical_done_nocc;
2025 case 538: /* cnttzw */
2026 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2027 goto unknown_opcode;
2028 val = (unsigned int) regs->gpr[rd];
2029 op->val = (val ? __builtin_ctz(val) : 32);
2031 #ifdef __powerpc64__
2032 case 570: /* cnttzd */
2033 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2034 goto unknown_opcode;
2035 val = regs->gpr[rd];
2036 op->val = (val ? __builtin_ctzl(val) : 64);
2039 case 922: /* extsh */
2040 op->val = (signed short) regs->gpr[rd];
2043 case 954: /* extsb */
2044 op->val = (signed char) regs->gpr[rd];
2046 #ifdef __powerpc64__
2047 case 986: /* extsw */
2048 op->val = (signed int) regs->gpr[rd];
2053 * Shift instructions
2056 sh = regs->gpr[rb] & 0x3f;
2058 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2064 sh = regs->gpr[rb] & 0x3f;
2066 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2071 case 792: /* sraw */
2072 op->type = COMPUTE + SETREG + SETXER;
2073 sh = regs->gpr[rb] & 0x3f;
2074 ival = (signed int) regs->gpr[rd];
2075 op->val = ival >> (sh < 32 ? sh : 31);
2076 op->xerval = regs->xer;
2077 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2078 op->xerval |= XER_CA;
2080 op->xerval &= ~XER_CA;
2081 set_ca32(op, op->xerval & XER_CA);
2084 case 824: /* srawi */
2085 op->type = COMPUTE + SETREG + SETXER;
2087 ival = (signed int) regs->gpr[rd];
2088 op->val = ival >> sh;
2089 op->xerval = regs->xer;
2090 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2091 op->xerval |= XER_CA;
2093 op->xerval &= ~XER_CA;
2094 set_ca32(op, op->xerval & XER_CA);
2097 #ifdef __powerpc64__
2099 sh = regs->gpr[rb] & 0x7f;
2101 op->val = regs->gpr[rd] << sh;
2107 sh = regs->gpr[rb] & 0x7f;
2109 op->val = regs->gpr[rd] >> sh;
2114 case 794: /* srad */
2115 op->type = COMPUTE + SETREG + SETXER;
2116 sh = regs->gpr[rb] & 0x7f;
2117 ival = (signed long int) regs->gpr[rd];
2118 op->val = ival >> (sh < 64 ? sh : 63);
2119 op->xerval = regs->xer;
2120 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2121 op->xerval |= XER_CA;
2123 op->xerval &= ~XER_CA;
2124 set_ca32(op, op->xerval & XER_CA);
2127 case 826: /* sradi with sh_5 = 0 */
2128 case 827: /* sradi with sh_5 = 1 */
2129 op->type = COMPUTE + SETREG + SETXER;
2130 sh = rb | ((word & 2) << 4);
2131 ival = (signed long int) regs->gpr[rd];
2132 op->val = ival >> sh;
2133 op->xerval = regs->xer;
2134 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2135 op->xerval |= XER_CA;
2137 op->xerval &= ~XER_CA;
2138 set_ca32(op, op->xerval & XER_CA);
2141 case 890: /* extswsli with sh_5 = 0 */
2142 case 891: /* extswsli with sh_5 = 1 */
2143 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2144 goto unknown_opcode;
2145 op->type = COMPUTE + SETREG;
2146 sh = rb | ((word & 2) << 4);
2147 val = (signed int) regs->gpr[rd];
2149 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2154 #endif /* __powerpc64__ */
2157 * Cache instructions
2159 case 54: /* dcbst */
2160 op->type = MKOP(CACHEOP, DCBST, 0);
2161 op->ea = xform_ea(word, regs);
2165 op->type = MKOP(CACHEOP, DCBF, 0);
2166 op->ea = xform_ea(word, regs);
2169 case 246: /* dcbtst */
2170 op->type = MKOP(CACHEOP, DCBTST, 0);
2171 op->ea = xform_ea(word, regs);
2175 case 278: /* dcbt */
2176 op->type = MKOP(CACHEOP, DCBTST, 0);
2177 op->ea = xform_ea(word, regs);
2181 case 982: /* icbi */
2182 op->type = MKOP(CACHEOP, ICBI, 0);
2183 op->ea = xform_ea(word, regs);
2186 case 1014: /* dcbz */
2187 op->type = MKOP(CACHEOP, DCBZ, 0);
2188 op->ea = xform_ea(word, regs);
2198 op->update_reg = ra;
2200 op->val = regs->gpr[rd];
2201 u = (word >> 20) & UPDATE;
2207 op->ea = xform_ea(word, regs);
2208 switch ((word >> 1) & 0x3ff) {
2209 case 20: /* lwarx */
2210 op->type = MKOP(LARX, 0, 4);
2213 case 150: /* stwcx. */
2214 op->type = MKOP(STCX, 0, 4);
2217 #ifdef __powerpc64__
2218 case 84: /* ldarx */
2219 op->type = MKOP(LARX, 0, 8);
2222 case 214: /* stdcx. */
2223 op->type = MKOP(STCX, 0, 8);
2226 case 52: /* lbarx */
2227 op->type = MKOP(LARX, 0, 1);
2230 case 694: /* stbcx. */
2231 op->type = MKOP(STCX, 0, 1);
2234 case 116: /* lharx */
2235 op->type = MKOP(LARX, 0, 2);
2238 case 726: /* sthcx. */
2239 op->type = MKOP(STCX, 0, 2);
2242 case 276: /* lqarx */
2243 if (!((rd & 1) || rd == ra || rd == rb))
2244 op->type = MKOP(LARX, 0, 16);
2247 case 182: /* stqcx. */
2249 op->type = MKOP(STCX, 0, 16);
2254 case 55: /* lwzux */
2255 op->type = MKOP(LOAD, u, 4);
2259 case 119: /* lbzux */
2260 op->type = MKOP(LOAD, u, 1);
2263 #ifdef CONFIG_ALTIVEC
2265 * Note: for the load/store vector element instructions,
2266 * bits of the EA say which field of the VMX register to use.
2269 op->type = MKOP(LOAD_VMX, 0, 1);
2270 op->element_size = 1;
2273 case 39: /* lvehx */
2274 op->type = MKOP(LOAD_VMX, 0, 2);
2275 op->element_size = 2;
2278 case 71: /* lvewx */
2279 op->type = MKOP(LOAD_VMX, 0, 4);
2280 op->element_size = 4;
2284 case 359: /* lvxl */
2285 op->type = MKOP(LOAD_VMX, 0, 16);
2286 op->element_size = 16;
2289 case 135: /* stvebx */
2290 op->type = MKOP(STORE_VMX, 0, 1);
2291 op->element_size = 1;
2294 case 167: /* stvehx */
2295 op->type = MKOP(STORE_VMX, 0, 2);
2296 op->element_size = 2;
2299 case 199: /* stvewx */
2300 op->type = MKOP(STORE_VMX, 0, 4);
2301 op->element_size = 4;
2304 case 231: /* stvx */
2305 case 487: /* stvxl */
2306 op->type = MKOP(STORE_VMX, 0, 16);
2308 #endif /* CONFIG_ALTIVEC */
2310 #ifdef __powerpc64__
2313 op->type = MKOP(LOAD, u, 8);
2316 case 149: /* stdx */
2317 case 181: /* stdux */
2318 op->type = MKOP(STORE, u, 8);
2322 case 151: /* stwx */
2323 case 183: /* stwux */
2324 op->type = MKOP(STORE, u, 4);
2327 case 215: /* stbx */
2328 case 247: /* stbux */
2329 op->type = MKOP(STORE, u, 1);
2332 case 279: /* lhzx */
2333 case 311: /* lhzux */
2334 op->type = MKOP(LOAD, u, 2);
2337 #ifdef __powerpc64__
2338 case 341: /* lwax */
2339 case 373: /* lwaux */
2340 op->type = MKOP(LOAD, SIGNEXT | u, 4);
2344 case 343: /* lhax */
2345 case 375: /* lhaux */
2346 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2349 case 407: /* sthx */
2350 case 439: /* sthux */
2351 op->type = MKOP(STORE, u, 2);
2354 #ifdef __powerpc64__
2355 case 532: /* ldbrx */
2356 op->type = MKOP(LOAD, BYTEREV, 8);
2360 case 533: /* lswx */
2361 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2364 case 534: /* lwbrx */
2365 op->type = MKOP(LOAD, BYTEREV, 4);
2368 case 597: /* lswi */
2370 rb = 32; /* # bytes to load */
2371 op->type = MKOP(LOAD_MULTI, 0, rb);
2372 op->ea = ra ? regs->gpr[ra] : 0;
2375 #ifdef CONFIG_PPC_FPU
2376 case 535: /* lfsx */
2377 case 567: /* lfsux */
2378 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2381 case 599: /* lfdx */
2382 case 631: /* lfdux */
2383 op->type = MKOP(LOAD_FP, u, 8);
2386 case 663: /* stfsx */
2387 case 695: /* stfsux */
2388 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2391 case 727: /* stfdx */
2392 case 759: /* stfdux */
2393 op->type = MKOP(STORE_FP, u, 8);
2396 #ifdef __powerpc64__
2397 case 791: /* lfdpx */
2398 op->type = MKOP(LOAD_FP, 0, 16);
2401 case 855: /* lfiwax */
2402 op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2405 case 887: /* lfiwzx */
2406 op->type = MKOP(LOAD_FP, 0, 4);
2409 case 919: /* stfdpx */
2410 op->type = MKOP(STORE_FP, 0, 16);
2413 case 983: /* stfiwx */
2414 op->type = MKOP(STORE_FP, 0, 4);
2416 #endif /* __powerpc64 */
2417 #endif /* CONFIG_PPC_FPU */
2419 #ifdef __powerpc64__
2420 case 660: /* stdbrx */
2421 op->type = MKOP(STORE, BYTEREV, 8);
2422 op->val = byterev_8(regs->gpr[rd]);
2426 case 661: /* stswx */
2427 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2430 case 662: /* stwbrx */
2431 op->type = MKOP(STORE, BYTEREV, 4);
2432 op->val = byterev_4(regs->gpr[rd]);
2435 case 725: /* stswi */
2437 rb = 32; /* # bytes to store */
2438 op->type = MKOP(STORE_MULTI, 0, rb);
2439 op->ea = ra ? regs->gpr[ra] : 0;
2442 case 790: /* lhbrx */
2443 op->type = MKOP(LOAD, BYTEREV, 2);
2446 case 918: /* sthbrx */
2447 op->type = MKOP(STORE, BYTEREV, 2);
2448 op->val = byterev_2(regs->gpr[rd]);
2452 case 12: /* lxsiwzx */
2453 op->reg = rd | ((word & 1) << 5);
2454 op->type = MKOP(LOAD_VSX, 0, 4);
2455 op->element_size = 8;
2458 case 76: /* lxsiwax */
2459 op->reg = rd | ((word & 1) << 5);
2460 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2461 op->element_size = 8;
2464 case 140: /* stxsiwx */
2465 op->reg = rd | ((word & 1) << 5);
2466 op->type = MKOP(STORE_VSX, 0, 4);
2467 op->element_size = 8;
2470 case 268: /* lxvx */
2471 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2472 goto unknown_opcode;
2473 op->reg = rd | ((word & 1) << 5);
2474 op->type = MKOP(LOAD_VSX, 0, 16);
2475 op->element_size = 16;
2476 op->vsx_flags = VSX_CHECK_VEC;
2479 case 269: /* lxvl */
2480 case 301: { /* lxvll */
2482 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2483 goto unknown_opcode;
2484 op->reg = rd | ((word & 1) << 5);
2485 op->ea = ra ? regs->gpr[ra] : 0;
2486 nb = regs->gpr[rb] & 0xff;
2489 op->type = MKOP(LOAD_VSX, 0, nb);
2490 op->element_size = 16;
2491 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2495 case 332: /* lxvdsx */
2496 op->reg = rd | ((word & 1) << 5);
2497 op->type = MKOP(LOAD_VSX, 0, 8);
2498 op->element_size = 8;
2499 op->vsx_flags = VSX_SPLAT;
2502 case 333: /* lxvpx */
2503 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2504 goto unknown_opcode;
2505 op->reg = VSX_REGISTER_XTP(rd);
2506 op->type = MKOP(LOAD_VSX, 0, 32);
2507 op->element_size = 32;
2510 case 364: /* lxvwsx */
2511 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2512 goto unknown_opcode;
2513 op->reg = rd | ((word & 1) << 5);
2514 op->type = MKOP(LOAD_VSX, 0, 4);
2515 op->element_size = 4;
2516 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2519 case 396: /* stxvx */
2520 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2521 goto unknown_opcode;
2522 op->reg = rd | ((word & 1) << 5);
2523 op->type = MKOP(STORE_VSX, 0, 16);
2524 op->element_size = 16;
2525 op->vsx_flags = VSX_CHECK_VEC;
2528 case 397: /* stxvl */
2529 case 429: { /* stxvll */
2531 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2532 goto unknown_opcode;
2533 op->reg = rd | ((word & 1) << 5);
2534 op->ea = ra ? regs->gpr[ra] : 0;
2535 nb = regs->gpr[rb] & 0xff;
2538 op->type = MKOP(STORE_VSX, 0, nb);
2539 op->element_size = 16;
2540 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2544 case 461: /* stxvpx */
2545 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2546 goto unknown_opcode;
2547 op->reg = VSX_REGISTER_XTP(rd);
2548 op->type = MKOP(STORE_VSX, 0, 32);
2549 op->element_size = 32;
2551 case 524: /* lxsspx */
2552 op->reg = rd | ((word & 1) << 5);
2553 op->type = MKOP(LOAD_VSX, 0, 4);
2554 op->element_size = 8;
2555 op->vsx_flags = VSX_FPCONV;
2558 case 588: /* lxsdx */
2559 op->reg = rd | ((word & 1) << 5);
2560 op->type = MKOP(LOAD_VSX, 0, 8);
2561 op->element_size = 8;
2564 case 652: /* stxsspx */
2565 op->reg = rd | ((word & 1) << 5);
2566 op->type = MKOP(STORE_VSX, 0, 4);
2567 op->element_size = 8;
2568 op->vsx_flags = VSX_FPCONV;
2571 case 716: /* stxsdx */
2572 op->reg = rd | ((word & 1) << 5);
2573 op->type = MKOP(STORE_VSX, 0, 8);
2574 op->element_size = 8;
2577 case 780: /* lxvw4x */
2578 op->reg = rd | ((word & 1) << 5);
2579 op->type = MKOP(LOAD_VSX, 0, 16);
2580 op->element_size = 4;
2583 case 781: /* lxsibzx */
2584 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2585 goto unknown_opcode;
2586 op->reg = rd | ((word & 1) << 5);
2587 op->type = MKOP(LOAD_VSX, 0, 1);
2588 op->element_size = 8;
2589 op->vsx_flags = VSX_CHECK_VEC;
2592 case 812: /* lxvh8x */
2593 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2594 goto unknown_opcode;
2595 op->reg = rd | ((word & 1) << 5);
2596 op->type = MKOP(LOAD_VSX, 0, 16);
2597 op->element_size = 2;
2598 op->vsx_flags = VSX_CHECK_VEC;
2601 case 813: /* lxsihzx */
2602 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2603 goto unknown_opcode;
2604 op->reg = rd | ((word & 1) << 5);
2605 op->type = MKOP(LOAD_VSX, 0, 2);
2606 op->element_size = 8;
2607 op->vsx_flags = VSX_CHECK_VEC;
2610 case 844: /* lxvd2x */
2611 op->reg = rd | ((word & 1) << 5);
2612 op->type = MKOP(LOAD_VSX, 0, 16);
2613 op->element_size = 8;
2616 case 876: /* lxvb16x */
2617 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2618 goto unknown_opcode;
2619 op->reg = rd | ((word & 1) << 5);
2620 op->type = MKOP(LOAD_VSX, 0, 16);
2621 op->element_size = 1;
2622 op->vsx_flags = VSX_CHECK_VEC;
2625 case 908: /* stxvw4x */
2626 op->reg = rd | ((word & 1) << 5);
2627 op->type = MKOP(STORE_VSX, 0, 16);
2628 op->element_size = 4;
2631 case 909: /* stxsibx */
2632 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2633 goto unknown_opcode;
2634 op->reg = rd | ((word & 1) << 5);
2635 op->type = MKOP(STORE_VSX, 0, 1);
2636 op->element_size = 8;
2637 op->vsx_flags = VSX_CHECK_VEC;
2640 case 940: /* stxvh8x */
2641 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2642 goto unknown_opcode;
2643 op->reg = rd | ((word & 1) << 5);
2644 op->type = MKOP(STORE_VSX, 0, 16);
2645 op->element_size = 2;
2646 op->vsx_flags = VSX_CHECK_VEC;
2649 case 941: /* stxsihx */
2650 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2651 goto unknown_opcode;
2652 op->reg = rd | ((word & 1) << 5);
2653 op->type = MKOP(STORE_VSX, 0, 2);
2654 op->element_size = 8;
2655 op->vsx_flags = VSX_CHECK_VEC;
2658 case 972: /* stxvd2x */
2659 op->reg = rd | ((word & 1) << 5);
2660 op->type = MKOP(STORE_VSX, 0, 16);
2661 op->element_size = 8;
2664 case 1004: /* stxvb16x */
2665 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2666 goto unknown_opcode;
2667 op->reg = rd | ((word & 1) << 5);
2668 op->type = MKOP(STORE_VSX, 0, 16);
2669 op->element_size = 1;
2670 op->vsx_flags = VSX_CHECK_VEC;
2673 #endif /* CONFIG_VSX */
2679 op->type = MKOP(LOAD, u, 4);
2680 op->ea = dform_ea(word, regs);
2685 op->type = MKOP(LOAD, u, 1);
2686 op->ea = dform_ea(word, regs);
2691 op->type = MKOP(STORE, u, 4);
2692 op->ea = dform_ea(word, regs);
2697 op->type = MKOP(STORE, u, 1);
2698 op->ea = dform_ea(word, regs);
2703 op->type = MKOP(LOAD, u, 2);
2704 op->ea = dform_ea(word, regs);
2709 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2710 op->ea = dform_ea(word, regs);
2715 op->type = MKOP(STORE, u, 2);
2716 op->ea = dform_ea(word, regs);
2721 break; /* invalid form, ra in range to load */
2722 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2723 op->ea = dform_ea(word, regs);
2727 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2728 op->ea = dform_ea(word, regs);
2731 #ifdef CONFIG_PPC_FPU
2734 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2735 op->ea = dform_ea(word, regs);
2740 op->type = MKOP(LOAD_FP, u, 8);
2741 op->ea = dform_ea(word, regs);
2745 case 53: /* stfsu */
2746 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2747 op->ea = dform_ea(word, regs);
2751 case 55: /* stfdu */
2752 op->type = MKOP(STORE_FP, u, 8);
2753 op->ea = dform_ea(word, regs);
2757 #ifdef __powerpc64__
2759 if (!((rd & 1) || (rd == ra)))
2760 op->type = MKOP(LOAD, 0, 16);
2761 op->ea = dqform_ea(word, regs);
2766 case 57: /* lfdp, lxsd, lxssp */
2767 op->ea = dsform_ea(word, regs);
2771 break; /* reg must be even */
2772 op->type = MKOP(LOAD_FP, 0, 16);
2775 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2776 goto unknown_opcode;
2778 op->type = MKOP(LOAD_VSX, 0, 8);
2779 op->element_size = 8;
2780 op->vsx_flags = VSX_CHECK_VEC;
2783 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2784 goto unknown_opcode;
2786 op->type = MKOP(LOAD_VSX, 0, 4);
2787 op->element_size = 8;
2788 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2792 #endif /* CONFIG_VSX */
2794 #ifdef __powerpc64__
2795 case 58: /* ld[u], lwa */
2796 op->ea = dsform_ea(word, regs);
2799 op->type = MKOP(LOAD, 0, 8);
2802 op->type = MKOP(LOAD, UPDATE, 8);
2805 op->type = MKOP(LOAD, SIGNEXT, 4);
2813 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2814 goto unknown_opcode;
2815 op->ea = dqform_ea(word, regs);
2816 op->reg = VSX_REGISTER_XTP(rd);
2817 op->element_size = 32;
2818 switch (word & 0xf) {
2820 op->type = MKOP(LOAD_VSX, 0, 32);
2823 op->type = MKOP(STORE_VSX, 0, 32);
2828 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2830 case 0: /* stfdp with LSB of DS field = 0 */
2831 case 4: /* stfdp with LSB of DS field = 1 */
2832 op->ea = dsform_ea(word, regs);
2833 op->type = MKOP(STORE_FP, 0, 16);
2837 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2838 goto unknown_opcode;
2839 op->ea = dqform_ea(word, regs);
2842 op->type = MKOP(LOAD_VSX, 0, 16);
2843 op->element_size = 16;
2844 op->vsx_flags = VSX_CHECK_VEC;
2847 case 2: /* stxsd with LSB of DS field = 0 */
2848 case 6: /* stxsd with LSB of DS field = 1 */
2849 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2850 goto unknown_opcode;
2851 op->ea = dsform_ea(word, regs);
2853 op->type = MKOP(STORE_VSX, 0, 8);
2854 op->element_size = 8;
2855 op->vsx_flags = VSX_CHECK_VEC;
2858 case 3: /* stxssp with LSB of DS field = 0 */
2859 case 7: /* stxssp with LSB of DS field = 1 */
2860 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2861 goto unknown_opcode;
2862 op->ea = dsform_ea(word, regs);
2864 op->type = MKOP(STORE_VSX, 0, 4);
2865 op->element_size = 8;
2866 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2870 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2871 goto unknown_opcode;
2872 op->ea = dqform_ea(word, regs);
2875 op->type = MKOP(STORE_VSX, 0, 16);
2876 op->element_size = 16;
2877 op->vsx_flags = VSX_CHECK_VEC;
2881 #endif /* CONFIG_VSX */
2883 #ifdef __powerpc64__
2884 case 62: /* std[u] */
2885 op->ea = dsform_ea(word, regs);
2888 op->type = MKOP(STORE, 0, 8);
2891 op->type = MKOP(STORE, UPDATE, 8);
2895 op->type = MKOP(STORE, 0, 16);
2899 case 1: /* Prefixed instructions */
2900 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2901 goto unknown_opcode;
2903 prefix_r = GET_PREFIX_R(word);
2904 ra = GET_PREFIX_RA(suffix);
2905 op->update_reg = ra;
2906 rd = (suffix >> 21) & 0x1f;
2908 op->val = regs->gpr[rd];
2910 suffixopcode = get_op(suffix);
2911 prefixtype = (word >> 24) & 0x3;
2912 switch (prefixtype) {
2913 case 0: /* Type 00 Eight-Byte Load/Store */
2916 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2917 switch (suffixopcode) {
2919 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2922 case 42: /* plxsd */
2924 op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2925 op->element_size = 8;
2926 op->vsx_flags = VSX_CHECK_VEC;
2928 case 43: /* plxssp */
2930 op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2931 op->element_size = 8;
2932 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2934 case 46: /* pstxsd */
2936 op->type = MKOP(STORE_VSX, PREFIXED, 8);
2937 op->element_size = 8;
2938 op->vsx_flags = VSX_CHECK_VEC;
2940 case 47: /* pstxssp */
2942 op->type = MKOP(STORE_VSX, PREFIXED, 4);
2943 op->element_size = 8;
2944 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2946 case 51: /* plxv1 */
2949 case 50: /* plxv0 */
2950 op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2951 op->element_size = 16;
2952 op->vsx_flags = VSX_CHECK_VEC;
2954 case 55: /* pstxv1 */
2957 case 54: /* pstxv0 */
2958 op->type = MKOP(STORE_VSX, PREFIXED, 16);
2959 op->element_size = 16;
2960 op->vsx_flags = VSX_CHECK_VEC;
2962 #endif /* CONFIG_VSX */
2964 op->type = MKOP(LOAD, PREFIXED, 16);
2967 op->type = MKOP(LOAD, PREFIXED, 8);
2970 case 58: /* plxvp */
2971 op->reg = VSX_REGISTER_XTP(rd);
2972 op->type = MKOP(LOAD_VSX, PREFIXED, 32);
2973 op->element_size = 32;
2975 #endif /* CONFIG_VSX */
2977 op->type = MKOP(STORE, PREFIXED, 16);
2980 op->type = MKOP(STORE, PREFIXED, 8);
2983 case 62: /* pstxvp */
2984 op->reg = VSX_REGISTER_XTP(rd);
2985 op->type = MKOP(STORE_VSX, PREFIXED, 32);
2986 op->element_size = 32;
2988 #endif /* CONFIG_VSX */
2991 case 1: /* Type 01 Eight-Byte Register-to-Register */
2993 case 2: /* Type 10 Modified Load/Store */
2996 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2997 switch (suffixopcode) {
2999 op->type = MKOP(LOAD, PREFIXED, 4);
3002 op->type = MKOP(LOAD, PREFIXED, 1);
3005 op->type = MKOP(STORE, PREFIXED, 4);
3008 op->type = MKOP(STORE, PREFIXED, 1);
3011 op->type = MKOP(LOAD, PREFIXED, 2);
3014 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
3017 op->type = MKOP(STORE, PREFIXED, 2);
3020 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3023 op->type = MKOP(LOAD_FP, PREFIXED, 8);
3025 case 52: /* pstfs */
3026 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3028 case 54: /* pstfd */
3029 op->type = MKOP(STORE_FP, PREFIXED, 8);
3033 case 3: /* Type 11 Modified Register-to-Register */
3036 #endif /* __powerpc64__ */
3040 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3041 switch (GETTYPE(op->type)) {
3044 goto unknown_opcode;
3050 goto unknown_opcode;
3055 if ((GETTYPE(op->type) == LOAD_VSX ||
3056 GETTYPE(op->type) == STORE_VSX) &&
3057 !cpu_has_feature(CPU_FTR_VSX)) {
3060 #endif /* CONFIG_VSX */
3085 op->type = INTERRUPT | 0x700;
3086 op->val = SRR1_PROGPRIV;
3090 op->type = INTERRUPT | 0x700;
3091 op->val = SRR1_PROGTRAP;
3094 EXPORT_SYMBOL_GPL(analyse_instr);
3095 NOKPROBE_SYMBOL(analyse_instr);
3098 * For PPC32 we always use stwu with r1 to change the stack pointer.
3099 * So this emulated store may corrupt the exception frame, now we
3100 * have to provide the exception frame trampoline, which is pushed
3101 * below the kprobed function stack. So we only update gpr[1] but
3102 * don't emulate the real store operation. We will do real store
3103 * operation safely in exception return code by checking this flag.
3105 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3108 * Check if we already set since that means we'll
3109 * lose the previous value.
3111 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3112 set_thread_flag(TIF_EMULATE_STACK_STORE);
3116 static nokprobe_inline void do_signext(unsigned long *valp, int size)
3120 *valp = (signed short) *valp;
3123 *valp = (signed int) *valp;
3128 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3132 *valp = byterev_2(*valp);
3135 *valp = byterev_4(*valp);
3137 #ifdef __powerpc64__
3139 *valp = byterev_8(*valp);
3146 * Emulate an instruction that can be executed just by updating
3149 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3151 unsigned long next_pc;
3153 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3154 switch (GETTYPE(op->type)) {
3156 if (op->type & SETREG)
3157 regs->gpr[op->reg] = op->val;
3158 if (op->type & SETCC)
3159 regs->ccr = op->ccval;
3160 if (op->type & SETXER)
3161 regs->xer = op->xerval;
3165 if (op->type & SETLK)
3166 regs->link = next_pc;
3167 if (op->type & BRTAKEN)
3169 if (op->type & DECCTR)
3174 switch (op->type & BARRIER_MASK) {
3184 case BARRIER_LWSYNC:
3185 asm volatile("lwsync" : : : "memory");
3187 case BARRIER_PTESYNC:
3188 asm volatile("ptesync" : : : "memory");
3196 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3199 regs->gpr[op->reg] = regs->link;
3202 regs->gpr[op->reg] = regs->ctr;
3212 regs->xer = op->val & 0xffffffffUL;
3215 regs->link = op->val;
3218 regs->ctr = op->val;
3228 regs_set_return_ip(regs, next_pc);
3230 NOKPROBE_SYMBOL(emulate_update_regs);
3233 * Emulate a previously-analysed load or store instruction.
3234 * Return values are:
3235 * 0 = instruction emulated successfully
3236 * -EFAULT = address out of range or access faulted (regs->dar
3237 * contains the faulting address)
3238 * -EACCES = misaligned access, instruction requires alignment
3239 * -EINVAL = unknown operation in *op
3241 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3243 int err, size, type;
3251 size = GETSIZE(op->type);
3252 type = GETTYPE(op->type);
3253 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3254 ea = truncate_if_32bit(regs->msr, op->ea);
3258 if (ea & (size - 1))
3259 return -EACCES; /* can't handle misaligned */
3260 if (!address_ok(regs, ea, size))
3265 #ifdef __powerpc64__
3267 __get_user_asmx(val, ea, err, "lbarx");
3270 __get_user_asmx(val, ea, err, "lharx");
3274 __get_user_asmx(val, ea, err, "lwarx");
3276 #ifdef __powerpc64__
3278 __get_user_asmx(val, ea, err, "ldarx");
3281 err = do_lqarx(ea, ®s->gpr[op->reg]);
3292 regs->gpr[op->reg] = val;
3296 if (ea & (size - 1))
3297 return -EACCES; /* can't handle misaligned */
3298 if (!address_ok(regs, ea, size))
3302 #ifdef __powerpc64__
3304 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3307 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3311 __put_user_asmx(op->val, ea, err, "stwcx.", cr);
3313 #ifdef __powerpc64__
3315 __put_user_asmx(op->val, ea, err, "stdcx.", cr);
3318 err = do_stqcx(ea, regs->gpr[op->reg],
3319 regs->gpr[op->reg + 1], &cr);
3326 regs->ccr = (regs->ccr & 0x0fffffff) |
3328 ((regs->xer >> 3) & 0x10000000);
3334 #ifdef __powerpc64__
3336 err = emulate_lq(regs, ea, op->reg, cross_endian);
3340 err = read_mem(®s->gpr[op->reg], ea, size, regs);
3342 if (op->type & SIGNEXT)
3343 do_signext(®s->gpr[op->reg], size);
3344 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3345 do_byterev(®s->gpr[op->reg], size);
3349 #ifdef CONFIG_PPC_FPU
3352 * If the instruction is in userspace, we can emulate it even
3353 * if the VMX state is not live, because we have the state
3354 * stored in the thread_struct. If the instruction is in
3355 * the kernel, we must not touch the state in the thread_struct.
3357 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3359 err = do_fp_load(op, ea, regs, cross_endian);
3362 #ifdef CONFIG_ALTIVEC
3364 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3366 err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3371 unsigned long msrbit = MSR_VSX;
3374 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3375 * when the target of the instruction is a vector register.
3377 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3379 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3381 err = do_vsx_load(op, ea, regs, cross_endian);
3386 if (!address_ok(regs, ea, size))
3389 for (i = 0; i < size; i += 4) {
3390 unsigned int v32 = 0;
3395 err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3398 if (unlikely(cross_endian))
3399 v32 = byterev_4(v32);
3400 regs->gpr[rd] = v32;
3402 /* reg number wraps from 31 to 0 for lsw[ix] */
3403 rd = (rd + 1) & 0x1f;
3408 #ifdef __powerpc64__
3410 err = emulate_stq(regs, ea, op->reg, cross_endian);
3414 if ((op->type & UPDATE) && size == sizeof(long) &&
3415 op->reg == 1 && op->update_reg == 1 &&
3416 !(regs->msr & MSR_PR) &&
3417 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3418 err = handle_stack_update(ea, regs);
3421 if (unlikely(cross_endian))
3422 do_byterev(&op->val, size);
3423 err = write_mem(op->val, ea, size, regs);
3426 #ifdef CONFIG_PPC_FPU
3428 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3430 err = do_fp_store(op, ea, regs, cross_endian);
3433 #ifdef CONFIG_ALTIVEC
3435 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3437 err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3442 unsigned long msrbit = MSR_VSX;
3445 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3446 * when the target of the instruction is a vector register.
3448 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3450 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3452 err = do_vsx_store(op, ea, regs, cross_endian);
3457 if (!address_ok(regs, ea, size))
3460 for (i = 0; i < size; i += 4) {
3461 unsigned int v32 = regs->gpr[rd];
3466 if (unlikely(cross_endian))
3467 v32 = byterev_4(v32);
3468 err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3472 /* reg number wraps from 31 to 0 for stsw[ix] */
3473 rd = (rd + 1) & 0x1f;
3484 if (op->type & UPDATE)
3485 regs->gpr[op->update_reg] = op->ea;
3489 NOKPROBE_SYMBOL(emulate_loadstore);
3492 * Emulate instructions that cause a transfer of control,
3493 * loads and stores, and a few other instructions.
3494 * Returns 1 if the step was emulated, 0 if not,
3495 * or -1 if the instruction is one that should not be stepped,
3496 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3498 int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3500 struct instruction_op op;
3505 r = analyse_instr(&op, regs, instr);
3509 emulate_update_regs(regs, &op);
3514 type = GETTYPE(op.type);
3516 if (OP_IS_LOAD_STORE(type)) {
3517 err = emulate_loadstore(regs, &op);
3525 ea = truncate_if_32bit(regs->msr, op.ea);
3526 if (!address_ok(regs, ea, 8))
3528 switch (op.type & CACHEOP_MASK) {
3530 __cacheop_user_asmx(ea, err, "dcbst");
3533 __cacheop_user_asmx(ea, err, "dcbf");
3537 prefetchw((void *) ea);
3541 prefetch((void *) ea);
3544 __cacheop_user_asmx(ea, err, "icbi");
3547 err = emulate_dcbz(ea, regs);
3557 regs->gpr[op.reg] = regs->msr & MSR_MASK;
3561 val = regs->gpr[op.reg];
3562 if ((val & MSR_RI) == 0)
3563 /* can't step mtmsr[d] that would clear MSR_RI */
3565 /* here op.val is the mask of bits to change */
3566 regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
3570 case SYSCALL: /* sc */
3572 * N.B. this uses knowledge about how the syscall
3573 * entry code works. If that is changed, this will
3574 * need to be changed also.
3576 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3577 cpu_has_feature(CPU_FTR_REAL_LE) &&
3578 regs->gpr[0] == 0x1ebe) {
3579 regs_set_return_msr(regs, regs->msr ^ MSR_LE);
3582 regs->gpr[9] = regs->gpr[13];
3583 regs->gpr[10] = MSR_KERNEL;
3584 regs->gpr[11] = regs->nip + 4;
3585 regs->gpr[12] = regs->msr & MSR_MASK;
3586 regs->gpr[13] = (unsigned long) get_paca();
3587 regs_set_return_ip(regs, (unsigned long) &system_call_common);
3588 regs_set_return_msr(regs, MSR_KERNEL);
3591 #ifdef CONFIG_PPC_BOOK3S_64
3592 case SYSCALL_VECTORED_0: /* scv 0 */
3593 regs->gpr[9] = regs->gpr[13];
3594 regs->gpr[10] = MSR_KERNEL;
3595 regs->gpr[11] = regs->nip + 4;
3596 regs->gpr[12] = regs->msr & MSR_MASK;
3597 regs->gpr[13] = (unsigned long) get_paca();
3598 regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
3599 regs_set_return_msr(regs, MSR_KERNEL);
3610 regs_set_return_ip(regs,
3611 truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
3614 NOKPROBE_SYMBOL(emulate_step);