1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
18 extern char system_call_common[];
19 extern char system_call_vectored_emulate[];
22 /* Bits in SRR1 that are copied from MSR */
23 #define MSR_MASK 0xffffffff87c0ffffUL
25 #define MSR_MASK 0x87c0ffff
29 #define XER_SO 0x80000000U
30 #define XER_OV 0x40000000U
31 #define XER_CA 0x20000000U
32 #define XER_OV32 0x00080000U
33 #define XER_CA32 0x00040000U
37 * Functions in ldstfp.S
39 extern void get_fpr(int rn, double *p);
40 extern void put_fpr(int rn, const double *p);
41 extern void get_vr(int rn, __vector128 *p);
42 extern void put_vr(int rn, __vector128 *p);
43 extern void load_vsrn(int vsr, const void *p);
44 extern void store_vsrn(int vsr, void *p);
45 extern void conv_sp_to_dp(const float *sp, double *dp);
46 extern void conv_dp_to_sp(const double *dp, float *sp);
53 extern int do_lq(unsigned long ea, unsigned long *regs);
54 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
55 extern int do_lqarx(unsigned long ea, unsigned long *regs);
56 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
60 #ifdef __LITTLE_ENDIAN__
69 * Emulate the truncation of 64 bit values in 32-bit mode.
71 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
75 if ((msr & MSR_64BIT) == 0)
82 * Determine whether a conditional branch instruction would branch.
84 static nokprobe_inline int branch_taken(unsigned int instr,
85 const struct pt_regs *regs,
86 struct instruction_op *op)
88 unsigned int bo = (instr >> 21) & 0x1f;
92 /* decrement counter */
94 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
97 if ((bo & 0x10) == 0) {
98 /* check bit from CR */
99 bi = (instr >> 16) & 0x1f;
100 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
106 static nokprobe_inline long address_ok(struct pt_regs *regs,
107 unsigned long ea, int nb)
109 if (!user_mode(regs))
111 if (access_ok((void __user *)ea, nb))
113 if (access_ok((void __user *)ea, 1))
114 /* Access overlaps the end of the user region */
115 regs->dar = TASK_SIZE_MAX - 1;
122 * Calculate effective address for a D-form instruction
124 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
125 const struct pt_regs *regs)
130 ra = (instr >> 16) & 0x1f;
131 ea = (signed short) instr; /* sign-extend */
140 * Calculate effective address for a DS-form instruction
142 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
143 const struct pt_regs *regs)
148 ra = (instr >> 16) & 0x1f;
149 ea = (signed short) (instr & ~3); /* sign-extend */
157 * Calculate effective address for a DQ-form instruction
159 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
160 const struct pt_regs *regs)
165 ra = (instr >> 16) & 0x1f;
166 ea = (signed short) (instr & ~0xf); /* sign-extend */
172 #endif /* __powerpc64 */
175 * Calculate effective address for an X-form instruction
177 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
178 const struct pt_regs *regs)
183 ra = (instr >> 16) & 0x1f;
184 rb = (instr >> 11) & 0x1f;
193 * Calculate effective address for a MLS:D-form / 8LS:D-form
194 * prefixed instruction
196 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
198 const struct pt_regs *regs)
202 unsigned long ea, d0, d1, d;
204 prefix_r = GET_PREFIX_R(instr);
205 ra = GET_PREFIX_RA(suffix);
207 d0 = instr & 0x3ffff;
208 d1 = suffix & 0xffff;
212 * sign extend a 34 bit number
214 dd = (unsigned int)(d >> 2);
216 ea = (ea << 2) | (d & 0x3);
220 else if (!prefix_r && !ra)
221 ; /* Leave ea as is */
226 * (prefix_r && ra) is an invalid form. Should already be
227 * checked for by caller!
234 * Return the largest power of 2, not greater than sizeof(unsigned long),
235 * such that x is a multiple of it.
237 static nokprobe_inline unsigned long max_align(unsigned long x)
239 x |= sizeof(unsigned long);
240 return x & -x; /* isolates rightmost bit */
243 static nokprobe_inline unsigned long byterev_2(unsigned long x)
245 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
248 static nokprobe_inline unsigned long byterev_4(unsigned long x)
250 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
251 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
255 static nokprobe_inline unsigned long byterev_8(unsigned long x)
257 return (byterev_4(x) << 32) | byterev_4(x >> 32);
261 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
265 *(u16 *)ptr = byterev_2(*(u16 *)ptr);
268 *(u32 *)ptr = byterev_4(*(u32 *)ptr);
272 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
275 unsigned long *up = (unsigned long *)ptr;
277 tmp = byterev_8(up[0]);
278 up[0] = byterev_8(up[1]);
288 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
289 unsigned long ea, int nb,
290 struct pt_regs *regs)
297 err = __get_user(x, (unsigned char __user *) ea);
300 err = __get_user(x, (unsigned short __user *) ea);
303 err = __get_user(x, (unsigned int __user *) ea);
307 err = __get_user(x, (unsigned long __user *) ea);
319 * Copy from userspace to a buffer, using the largest possible
320 * aligned accesses, up to sizeof(long).
322 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
323 struct pt_regs *regs)
328 for (; nb > 0; nb -= c) {
334 err = __get_user(*dest, (unsigned char __user *) ea);
337 err = __get_user(*(u16 *)dest,
338 (unsigned short __user *) ea);
341 err = __get_user(*(u32 *)dest,
342 (unsigned int __user *) ea);
346 err = __get_user(*(unsigned long *)dest,
347 (unsigned long __user *) ea);
361 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
362 unsigned long ea, int nb,
363 struct pt_regs *regs)
367 u8 b[sizeof(unsigned long)];
373 i = IS_BE ? sizeof(unsigned long) - nb : 0;
374 err = copy_mem_in(&u.b[i], ea, nb, regs);
381 * Read memory at address ea for nb bytes, return 0 for success
382 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
383 * If nb < sizeof(long), the result is right-justified on BE systems.
385 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
386 struct pt_regs *regs)
388 if (!address_ok(regs, ea, nb))
390 if ((ea & (nb - 1)) == 0)
391 return read_mem_aligned(dest, ea, nb, regs);
392 return read_mem_unaligned(dest, ea, nb, regs);
394 NOKPROBE_SYMBOL(read_mem);
396 static nokprobe_inline int write_mem_aligned(unsigned long val,
397 unsigned long ea, int nb,
398 struct pt_regs *regs)
404 err = __put_user(val, (unsigned char __user *) ea);
407 err = __put_user(val, (unsigned short __user *) ea);
410 err = __put_user(val, (unsigned int __user *) ea);
414 err = __put_user(val, (unsigned long __user *) ea);
424 * Copy from a buffer to userspace, using the largest possible
425 * aligned accesses, up to sizeof(long).
427 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
428 struct pt_regs *regs)
433 for (; nb > 0; nb -= c) {
439 err = __put_user(*dest, (unsigned char __user *) ea);
442 err = __put_user(*(u16 *)dest,
443 (unsigned short __user *) ea);
446 err = __put_user(*(u32 *)dest,
447 (unsigned int __user *) ea);
451 err = __put_user(*(unsigned long *)dest,
452 (unsigned long __user *) ea);
466 static nokprobe_inline int write_mem_unaligned(unsigned long val,
467 unsigned long ea, int nb,
468 struct pt_regs *regs)
472 u8 b[sizeof(unsigned long)];
477 i = IS_BE ? sizeof(unsigned long) - nb : 0;
478 return copy_mem_out(&u.b[i], ea, nb, regs);
482 * Write memory at address ea for nb bytes, return 0 for success
483 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
485 static int write_mem(unsigned long val, unsigned long ea, int nb,
486 struct pt_regs *regs)
488 if (!address_ok(regs, ea, nb))
490 if ((ea & (nb - 1)) == 0)
491 return write_mem_aligned(val, ea, nb, regs);
492 return write_mem_unaligned(val, ea, nb, regs);
494 NOKPROBE_SYMBOL(write_mem);
496 #ifdef CONFIG_PPC_FPU
498 * These access either the real FP register or the image in the
499 * thread_struct, depending on regs->msr & MSR_FP.
501 static int do_fp_load(struct instruction_op *op, unsigned long ea,
502 struct pt_regs *regs, bool cross_endian)
511 u8 b[2 * sizeof(double)];
514 nb = GETSIZE(op->type);
517 if (!address_ok(regs, ea, nb))
520 err = copy_mem_in(u.b, ea, nb, regs);
523 if (unlikely(cross_endian)) {
524 do_byte_reverse(u.b, min(nb, 8));
526 do_byte_reverse(&u.b[8], 8);
530 if (op->type & FPCONV)
531 conv_sp_to_dp(&u.f, &u.d[0]);
532 else if (op->type & SIGNEXT)
537 if (regs->msr & MSR_FP)
538 put_fpr(rn, &u.d[0]);
540 current->thread.TS_FPR(rn) = u.l[0];
544 if (regs->msr & MSR_FP)
545 put_fpr(rn, &u.d[1]);
547 current->thread.TS_FPR(rn) = u.l[1];
552 NOKPROBE_SYMBOL(do_fp_load);
554 static int do_fp_store(struct instruction_op *op, unsigned long ea,
555 struct pt_regs *regs, bool cross_endian)
563 u8 b[2 * sizeof(double)];
566 nb = GETSIZE(op->type);
569 if (!address_ok(regs, ea, nb))
573 if (regs->msr & MSR_FP)
574 get_fpr(rn, &u.d[0]);
576 u.l[0] = current->thread.TS_FPR(rn);
578 if (op->type & FPCONV)
579 conv_dp_to_sp(&u.d[0], &u.f);
585 if (regs->msr & MSR_FP)
586 get_fpr(rn, &u.d[1]);
588 u.l[1] = current->thread.TS_FPR(rn);
591 if (unlikely(cross_endian)) {
592 do_byte_reverse(u.b, min(nb, 8));
594 do_byte_reverse(&u.b[8], 8);
596 return copy_mem_out(u.b, ea, nb, regs);
598 NOKPROBE_SYMBOL(do_fp_store);
601 #ifdef CONFIG_ALTIVEC
602 /* For Altivec/VMX, no need to worry about alignment */
603 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
604 int size, struct pt_regs *regs,
610 u8 b[sizeof(__vector128)];
613 if (size > sizeof(u))
616 if (!address_ok(regs, ea & ~0xfUL, 16))
618 /* align to multiple of size */
620 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
623 if (unlikely(cross_endian))
624 do_byte_reverse(&u.b[ea & 0xf], size);
626 if (regs->msr & MSR_VEC)
629 current->thread.vr_state.vr[rn] = u.v;
634 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
635 int size, struct pt_regs *regs,
640 u8 b[sizeof(__vector128)];
643 if (size > sizeof(u))
646 if (!address_ok(regs, ea & ~0xfUL, 16))
648 /* align to multiple of size */
652 if (regs->msr & MSR_VEC)
655 u.v = current->thread.vr_state.vr[rn];
657 if (unlikely(cross_endian))
658 do_byte_reverse(&u.b[ea & 0xf], size);
659 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
661 #endif /* CONFIG_ALTIVEC */
664 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
665 int reg, bool cross_endian)
669 if (!address_ok(regs, ea, 16))
671 /* if aligned, should be atomic */
672 if ((ea & 0xf) == 0) {
673 err = do_lq(ea, ®s->gpr[reg]);
675 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs);
677 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs);
679 if (!err && unlikely(cross_endian))
680 do_byte_reverse(®s->gpr[reg], 16);
684 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
685 int reg, bool cross_endian)
688 unsigned long vals[2];
690 if (!address_ok(regs, ea, 16))
692 vals[0] = regs->gpr[reg];
693 vals[1] = regs->gpr[reg + 1];
694 if (unlikely(cross_endian))
695 do_byte_reverse(vals, 16);
697 /* if aligned, should be atomic */
699 return do_stq(ea, vals[0], vals[1]);
701 err = write_mem(vals[IS_LE], ea, 8, regs);
703 err = write_mem(vals[IS_BE], ea + 8, 8, regs);
706 #endif /* __powerpc64 */
709 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
710 const void *mem, bool rev)
714 const unsigned int *wp;
715 const unsigned short *hp;
716 const unsigned char *bp;
718 size = GETSIZE(op->type);
719 reg->d[0] = reg->d[1] = 0;
721 switch (op->element_size) {
723 /* whole vector; lxv[x] or lxvl[l] */
726 memcpy(reg, mem, size);
727 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
730 do_byte_reverse(reg, 16);
733 /* scalar loads, lxvd2x, lxvdsx */
734 read_size = (size >= 8) ? 8 : size;
735 i = IS_LE ? 8 : 8 - read_size;
736 memcpy(®->b[i], mem, read_size);
738 do_byte_reverse(®->b[i], 8);
740 if (op->type & SIGNEXT) {
741 /* size == 4 is the only case here */
742 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
743 } else if (op->vsx_flags & VSX_FPCONV) {
745 conv_sp_to_dp(®->fp[1 + IS_LE],
751 unsigned long v = *(unsigned long *)(mem + 8);
752 reg->d[IS_BE] = !rev ? v : byterev_8(v);
753 } else if (op->vsx_flags & VSX_SPLAT)
754 reg->d[IS_BE] = reg->d[IS_LE];
760 for (j = 0; j < size / 4; ++j) {
761 i = IS_LE ? 3 - j : j;
762 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
764 if (op->vsx_flags & VSX_SPLAT) {
765 u32 val = reg->w[IS_LE ? 3 : 0];
767 i = IS_LE ? 3 - j : j;
775 for (j = 0; j < size / 2; ++j) {
776 i = IS_LE ? 7 - j : j;
777 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
783 for (j = 0; j < size; ++j) {
784 i = IS_LE ? 15 - j : j;
790 EXPORT_SYMBOL_GPL(emulate_vsx_load);
791 NOKPROBE_SYMBOL(emulate_vsx_load);
793 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
796 int size, write_size;
803 size = GETSIZE(op->type);
805 switch (op->element_size) {
807 /* stxv, stxvx, stxvl, stxvll */
810 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
813 /* reverse 16 bytes */
814 buf.d[0] = byterev_8(reg->d[1]);
815 buf.d[1] = byterev_8(reg->d[0]);
818 memcpy(mem, reg, size);
821 /* scalar stores, stxvd2x */
822 write_size = (size >= 8) ? 8 : size;
823 i = IS_LE ? 8 : 8 - write_size;
824 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
825 buf.d[0] = buf.d[1] = 0;
827 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
831 memcpy(mem, ®->b[i], write_size);
833 memcpy(mem + 8, ®->d[IS_BE], 8);
835 do_byte_reverse(mem, write_size);
837 do_byte_reverse(mem + 8, 8);
843 for (j = 0; j < size / 4; ++j) {
844 i = IS_LE ? 3 - j : j;
845 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
851 for (j = 0; j < size / 2; ++j) {
852 i = IS_LE ? 7 - j : j;
853 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
859 for (j = 0; j < size; ++j) {
860 i = IS_LE ? 15 - j : j;
866 EXPORT_SYMBOL_GPL(emulate_vsx_store);
867 NOKPROBE_SYMBOL(emulate_vsx_store);
869 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
870 unsigned long ea, struct pt_regs *regs,
876 int size = GETSIZE(op->type);
878 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
881 emulate_vsx_load(op, &buf, mem, cross_endian);
884 /* FP regs + extensions */
885 if (regs->msr & MSR_FP) {
886 load_vsrn(reg, &buf);
888 current->thread.fp_state.fpr[reg][0] = buf.d[0];
889 current->thread.fp_state.fpr[reg][1] = buf.d[1];
892 if (regs->msr & MSR_VEC)
893 load_vsrn(reg, &buf);
895 current->thread.vr_state.vr[reg - 32] = buf.v;
901 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
902 unsigned long ea, struct pt_regs *regs,
908 int size = GETSIZE(op->type);
910 if (!address_ok(regs, ea, size))
915 /* FP regs + extensions */
916 if (regs->msr & MSR_FP) {
917 store_vsrn(reg, &buf);
919 buf.d[0] = current->thread.fp_state.fpr[reg][0];
920 buf.d[1] = current->thread.fp_state.fpr[reg][1];
923 if (regs->msr & MSR_VEC)
924 store_vsrn(reg, &buf);
926 buf.v = current->thread.vr_state.vr[reg - 32];
929 emulate_vsx_store(op, &buf, mem, cross_endian);
930 return copy_mem_out(mem, ea, size, regs);
932 #endif /* CONFIG_VSX */
934 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
937 unsigned long i, size;
940 size = ppc64_caches.l1d.block_size;
941 if (!(regs->msr & MSR_64BIT))
944 size = L1_CACHE_BYTES;
947 if (!address_ok(regs, ea, size))
949 for (i = 0; i < size; i += sizeof(long)) {
950 err = __put_user(0, (unsigned long __user *) (ea + i));
958 NOKPROBE_SYMBOL(emulate_dcbz);
960 #define __put_user_asmx(x, addr, err, op, cr) \
961 __asm__ __volatile__( \
963 ".machine power8\n" \
964 "1: " op " %2,0,%3\n" \
968 ".section .fixup,\"ax\"\n" \
973 : "=r" (err), "=r" (cr) \
974 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
976 #define __get_user_asmx(x, addr, err, op) \
977 __asm__ __volatile__( \
979 ".machine power8\n" \
980 "1: "op" %1,0,%2\n" \
983 ".section .fixup,\"ax\"\n" \
988 : "=r" (err), "=r" (x) \
989 : "r" (addr), "i" (-EFAULT), "0" (err))
991 #define __cacheop_user_asmx(addr, err, op) \
992 __asm__ __volatile__( \
995 ".section .fixup,\"ax\"\n" \
1001 : "r" (addr), "i" (-EFAULT), "0" (err))
1003 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1004 struct instruction_op *op)
1009 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1010 #ifdef __powerpc64__
1011 if (!(regs->msr & MSR_64BIT))
1015 op->ccval |= 0x80000000;
1017 op->ccval |= 0x40000000;
1019 op->ccval |= 0x20000000;
1022 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1024 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1026 op->xerval |= XER_CA32;
1028 op->xerval &= ~XER_CA32;
1032 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1033 struct instruction_op *op, int rd,
1034 unsigned long val1, unsigned long val2,
1035 unsigned long carry_in)
1037 unsigned long val = val1 + val2;
1041 op->type = COMPUTE + SETREG + SETXER;
1044 #ifdef __powerpc64__
1045 if (!(regs->msr & MSR_64BIT)) {
1046 val = (unsigned int) val;
1047 val1 = (unsigned int) val1;
1050 op->xerval = regs->xer;
1051 if (val < val1 || (carry_in && val == val1))
1052 op->xerval |= XER_CA;
1054 op->xerval &= ~XER_CA;
1056 set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1057 (carry_in && (unsigned int)val == (unsigned int)val1));
1060 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1061 struct instruction_op *op,
1062 long v1, long v2, int crfld)
1064 unsigned int crval, shift;
1066 op->type = COMPUTE + SETCC;
1067 crval = (regs->xer >> 31) & 1; /* get SO bit */
1074 shift = (7 - crfld) * 4;
1075 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1078 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1079 struct instruction_op *op,
1081 unsigned long v2, int crfld)
1083 unsigned int crval, shift;
1085 op->type = COMPUTE + SETCC;
1086 crval = (regs->xer >> 31) & 1; /* get SO bit */
1093 shift = (7 - crfld) * 4;
1094 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1097 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1098 struct instruction_op *op,
1099 unsigned long v1, unsigned long v2)
1101 unsigned long long out_val, mask;
1105 for (i = 0; i < 8; i++) {
1106 mask = 0xffUL << (i * 8);
1107 if ((v1 & mask) == (v2 & mask))
1114 * The size parameter is used to adjust the equivalent popcnt instruction.
1115 * popcntb = 8, popcntw = 32, popcntd = 64
1117 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1118 struct instruction_op *op,
1119 unsigned long v1, int size)
1121 unsigned long long out = v1;
1123 out -= (out >> 1) & 0x5555555555555555ULL;
1124 out = (0x3333333333333333ULL & out) +
1125 (0x3333333333333333ULL & (out >> 2));
1126 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1128 if (size == 8) { /* popcntb */
1134 if (size == 32) { /* popcntw */
1135 op->val = out & 0x0000003f0000003fULL;
1139 out = (out + (out >> 32)) & 0x7f;
1140 op->val = out; /* popcntd */
1144 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1145 struct instruction_op *op,
1146 unsigned long v1, unsigned long v2)
1148 unsigned char perm, idx;
1152 for (i = 0; i < 8; i++) {
1153 idx = (v1 >> (i * 8)) & 0xff;
1155 if (v2 & PPC_BIT(idx))
1160 #endif /* CONFIG_PPC64 */
1162 * The size parameter adjusts the equivalent prty instruction.
1163 * prtyw = 32, prtyd = 64
1165 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1166 struct instruction_op *op,
1167 unsigned long v, int size)
1169 unsigned long long res = v ^ (v >> 8);
1172 if (size == 32) { /* prtyw */
1173 op->val = res & 0x0000000100000001ULL;
1178 op->val = res & 1; /*prtyd */
1181 static nokprobe_inline int trap_compare(long v1, long v2)
1191 if ((unsigned long)v1 < (unsigned long)v2)
1193 else if ((unsigned long)v1 > (unsigned long)v2)
1199 * Elements of 32-bit rotate and mask instructions.
1201 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1202 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1203 #ifdef __powerpc64__
1204 #define MASK64_L(mb) (~0UL >> (mb))
1205 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1206 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1207 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1209 #define DATA32(x) (x)
1211 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1214 * Decode an instruction, and return information about it in *op
1215 * without changing *regs.
1216 * Integer arithmetic and logical instructions, branches, and barrier
1217 * instructions can be emulated just using the information in *op.
1219 * Return value is 1 if the instruction can be emulated just by
1220 * updating *regs with the information in *op, -1 if we need the
1221 * GPRs but *regs doesn't contain the full register set, or 0
1224 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1225 struct ppc_inst instr)
1228 unsigned int suffixopcode, prefixtype, prefix_r;
1230 unsigned int opcode, ra, rb, rc, rd, spr, u;
1231 unsigned long int imm;
1232 unsigned long int val, val2;
1233 unsigned int mb, me, sh;
1234 unsigned int word, suffix;
1237 word = ppc_inst_val(instr);
1238 suffix = ppc_inst_suffix(instr);
1242 opcode = ppc_inst_primary_opcode(instr);
1246 imm = (signed short)(word & 0xfffc);
1247 if ((word & 2) == 0)
1249 op->val = truncate_if_32bit(regs->msr, imm);
1252 if (branch_taken(word, regs, op))
1253 op->type |= BRTAKEN;
1257 if ((word & 0xfe2) == 2)
1259 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1260 (word & 0xfe3) == 1) { /* scv */
1261 op->type = SYSCALL_VECTORED_0;
1262 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1263 goto unknown_opcode;
1269 op->type = BRANCH | BRTAKEN;
1270 imm = word & 0x03fffffc;
1271 if (imm & 0x02000000)
1273 if ((word & 2) == 0)
1275 op->val = truncate_if_32bit(regs->msr, imm);
1280 switch ((word >> 1) & 0x3ff) {
1282 op->type = COMPUTE + SETCC;
1283 rd = 7 - ((word >> 23) & 0x7);
1284 ra = 7 - ((word >> 18) & 0x7);
1287 val = (regs->ccr >> ra) & 0xf;
1288 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1292 case 528: /* bcctr */
1294 imm = (word & 0x400)? regs->ctr: regs->link;
1295 op->val = truncate_if_32bit(regs->msr, imm);
1298 if (branch_taken(word, regs, op))
1299 op->type |= BRTAKEN;
1302 case 18: /* rfid, scary */
1303 if (regs->msr & MSR_PR)
1308 case 150: /* isync */
1309 op->type = BARRIER | BARRIER_ISYNC;
1312 case 33: /* crnor */
1313 case 129: /* crandc */
1314 case 193: /* crxor */
1315 case 225: /* crnand */
1316 case 257: /* crand */
1317 case 289: /* creqv */
1318 case 417: /* crorc */
1319 case 449: /* cror */
1320 op->type = COMPUTE + SETCC;
1321 ra = (word >> 16) & 0x1f;
1322 rb = (word >> 11) & 0x1f;
1323 rd = (word >> 21) & 0x1f;
1324 ra = (regs->ccr >> (31 - ra)) & 1;
1325 rb = (regs->ccr >> (31 - rb)) & 1;
1326 val = (word >> (6 + ra * 2 + rb)) & 1;
1327 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1333 switch ((word >> 1) & 0x3ff) {
1334 case 598: /* sync */
1335 op->type = BARRIER + BARRIER_SYNC;
1336 #ifdef __powerpc64__
1337 switch ((word >> 21) & 3) {
1338 case 1: /* lwsync */
1339 op->type = BARRIER + BARRIER_LWSYNC;
1341 case 2: /* ptesync */
1342 op->type = BARRIER + BARRIER_PTESYNC;
1348 case 854: /* eieio */
1349 op->type = BARRIER + BARRIER_EIEIO;
1355 /* Following cases refer to regs->gpr[], so we need all regs */
1356 if (!FULL_REGS(regs))
1359 rd = (word >> 21) & 0x1f;
1360 ra = (word >> 16) & 0x1f;
1361 rb = (word >> 11) & 0x1f;
1362 rc = (word >> 6) & 0x1f;
1365 #ifdef __powerpc64__
1367 if (!cpu_has_feature(CPU_FTR_ARCH_31))
1368 goto unknown_opcode;
1370 prefix_r = GET_PREFIX_R(word);
1371 ra = GET_PREFIX_RA(suffix);
1372 rd = (suffix >> 21) & 0x1f;
1374 op->val = regs->gpr[rd];
1375 suffixopcode = get_op(suffix);
1376 prefixtype = (word >> 24) & 0x3;
1377 switch (prefixtype) {
1381 switch (suffixopcode) {
1382 case 14: /* paddi */
1383 op->type = COMPUTE | PREFIXED;
1384 op->val = mlsd_8lsd_ea(word, suffix, regs);
1390 if (rd & trap_compare(regs->gpr[ra], (short) word))
1395 if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1399 #ifdef __powerpc64__
1402 * There are very many instructions with this primary opcode
1403 * introduced in the ISA as early as v2.03. However, the ones
1404 * we currently emulate were all introduced with ISA 3.0
1406 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1407 goto unknown_opcode;
1409 switch (word & 0x3f) {
1410 case 48: /* maddhd */
1411 asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1412 "=r" (op->val) : "r" (regs->gpr[ra]),
1413 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1416 case 49: /* maddhdu */
1417 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1418 "=r" (op->val) : "r" (regs->gpr[ra]),
1419 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1422 case 51: /* maddld */
1423 asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1424 "=r" (op->val) : "r" (regs->gpr[ra]),
1425 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1430 * There are other instructions from ISA 3.0 with the same
1431 * primary opcode which do not have emulation support yet.
1433 goto unknown_opcode;
1437 op->val = regs->gpr[ra] * (short) word;
1440 case 8: /* subfic */
1442 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1445 case 10: /* cmpli */
1446 imm = (unsigned short) word;
1447 val = regs->gpr[ra];
1448 #ifdef __powerpc64__
1450 val = (unsigned int) val;
1452 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1457 val = regs->gpr[ra];
1458 #ifdef __powerpc64__
1462 do_cmp_signed(regs, op, val, imm, rd >> 2);
1465 case 12: /* addic */
1467 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1470 case 13: /* addic. */
1472 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1479 imm += regs->gpr[ra];
1483 case 15: /* addis */
1484 imm = ((short) word) << 16;
1486 imm += regs->gpr[ra];
1491 if (((word >> 1) & 0x1f) == 2) {
1493 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1494 goto unknown_opcode;
1495 imm = (short) (word & 0xffc1); /* d0 + d2 fields */
1496 imm |= (word >> 15) & 0x3e; /* d1 field */
1497 op->val = regs->nip + (imm << 16) + 4;
1503 case 20: /* rlwimi */
1504 mb = (word >> 6) & 0x1f;
1505 me = (word >> 1) & 0x1f;
1506 val = DATA32(regs->gpr[rd]);
1507 imm = MASK32(mb, me);
1508 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1511 case 21: /* rlwinm */
1512 mb = (word >> 6) & 0x1f;
1513 me = (word >> 1) & 0x1f;
1514 val = DATA32(regs->gpr[rd]);
1515 op->val = ROTATE(val, rb) & MASK32(mb, me);
1518 case 23: /* rlwnm */
1519 mb = (word >> 6) & 0x1f;
1520 me = (word >> 1) & 0x1f;
1521 rb = regs->gpr[rb] & 0x1f;
1522 val = DATA32(regs->gpr[rd]);
1523 op->val = ROTATE(val, rb) & MASK32(mb, me);
1527 op->val = regs->gpr[rd] | (unsigned short) word;
1528 goto logical_done_nocc;
1531 imm = (unsigned short) word;
1532 op->val = regs->gpr[rd] | (imm << 16);
1533 goto logical_done_nocc;
1536 op->val = regs->gpr[rd] ^ (unsigned short) word;
1537 goto logical_done_nocc;
1539 case 27: /* xoris */
1540 imm = (unsigned short) word;
1541 op->val = regs->gpr[rd] ^ (imm << 16);
1542 goto logical_done_nocc;
1544 case 28: /* andi. */
1545 op->val = regs->gpr[rd] & (unsigned short) word;
1547 goto logical_done_nocc;
1549 case 29: /* andis. */
1550 imm = (unsigned short) word;
1551 op->val = regs->gpr[rd] & (imm << 16);
1553 goto logical_done_nocc;
1555 #ifdef __powerpc64__
1557 mb = ((word >> 6) & 0x1f) | (word & 0x20);
1558 val = regs->gpr[rd];
1559 if ((word & 0x10) == 0) {
1560 sh = rb | ((word & 2) << 4);
1561 val = ROTATE(val, sh);
1562 switch ((word >> 2) & 3) {
1563 case 0: /* rldicl */
1564 val &= MASK64_L(mb);
1566 case 1: /* rldicr */
1567 val &= MASK64_R(mb);
1570 val &= MASK64(mb, 63 - sh);
1572 case 3: /* rldimi */
1573 imm = MASK64(mb, 63 - sh);
1574 val = (regs->gpr[ra] & ~imm) |
1580 sh = regs->gpr[rb] & 0x3f;
1581 val = ROTATE(val, sh);
1582 switch ((word >> 1) & 7) {
1584 op->val = val & MASK64_L(mb);
1587 op->val = val & MASK64_R(mb);
1592 op->type = UNKNOWN; /* illegal instruction */
1596 /* isel occupies 32 minor opcodes */
1597 if (((word >> 1) & 0x1f) == 15) {
1598 mb = (word >> 6) & 0x1f; /* bc field */
1599 val = (regs->ccr >> (31 - mb)) & 1;
1600 val2 = (ra) ? regs->gpr[ra] : 0;
1602 op->val = (val) ? val2 : regs->gpr[rb];
1606 switch ((word >> 1) & 0x3ff) {
1609 (rd & trap_compare((int)regs->gpr[ra],
1610 (int)regs->gpr[rb])))
1613 #ifdef __powerpc64__
1615 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1619 case 83: /* mfmsr */
1620 if (regs->msr & MSR_PR)
1625 case 146: /* mtmsr */
1626 if (regs->msr & MSR_PR)
1630 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1633 case 178: /* mtmsrd */
1634 if (regs->msr & MSR_PR)
1638 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1639 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1640 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1647 if ((word >> 20) & 1) {
1649 for (sh = 0; sh < 8; ++sh) {
1650 if (word & (0x80000 >> sh))
1655 op->val = regs->ccr & imm;
1658 case 144: /* mtcrf */
1659 op->type = COMPUTE + SETCC;
1661 val = regs->gpr[rd];
1662 op->ccval = regs->ccr;
1663 for (sh = 0; sh < 8; ++sh) {
1664 if (word & (0x80000 >> sh))
1665 op->ccval = (op->ccval & ~imm) |
1671 case 339: /* mfspr */
1672 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1676 if (spr == SPRN_XER || spr == SPRN_LR ||
1681 case 467: /* mtspr */
1682 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1684 op->val = regs->gpr[rd];
1686 if (spr == SPRN_XER || spr == SPRN_LR ||
1692 * Compare instructions
1695 val = regs->gpr[ra];
1696 val2 = regs->gpr[rb];
1697 #ifdef __powerpc64__
1698 if ((rd & 1) == 0) {
1699 /* word (32-bit) compare */
1704 do_cmp_signed(regs, op, val, val2, rd >> 2);
1708 val = regs->gpr[ra];
1709 val2 = regs->gpr[rb];
1710 #ifdef __powerpc64__
1711 if ((rd & 1) == 0) {
1712 /* word (32-bit) compare */
1713 val = (unsigned int) val;
1714 val2 = (unsigned int) val2;
1717 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1720 case 508: /* cmpb */
1721 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1722 goto logical_done_nocc;
1725 * Arithmetic instructions
1728 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1731 #ifdef __powerpc64__
1732 case 9: /* mulhdu */
1733 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1734 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1738 add_with_carry(regs, op, rd, regs->gpr[ra],
1742 case 11: /* mulhwu */
1743 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1744 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1748 op->val = regs->gpr[rb] - regs->gpr[ra];
1750 #ifdef __powerpc64__
1751 case 73: /* mulhd */
1752 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1753 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1756 case 75: /* mulhw */
1757 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1758 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1762 op->val = -regs->gpr[ra];
1765 case 136: /* subfe */
1766 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1767 regs->gpr[rb], regs->xer & XER_CA);
1770 case 138: /* adde */
1771 add_with_carry(regs, op, rd, regs->gpr[ra],
1772 regs->gpr[rb], regs->xer & XER_CA);
1775 case 200: /* subfze */
1776 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1777 regs->xer & XER_CA);
1780 case 202: /* addze */
1781 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1782 regs->xer & XER_CA);
1785 case 232: /* subfme */
1786 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1787 regs->xer & XER_CA);
1789 #ifdef __powerpc64__
1790 case 233: /* mulld */
1791 op->val = regs->gpr[ra] * regs->gpr[rb];
1794 case 234: /* addme */
1795 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1796 regs->xer & XER_CA);
1799 case 235: /* mullw */
1800 op->val = (long)(int) regs->gpr[ra] *
1801 (int) regs->gpr[rb];
1804 #ifdef __powerpc64__
1805 case 265: /* modud */
1806 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1807 goto unknown_opcode;
1808 op->val = regs->gpr[ra] % regs->gpr[rb];
1812 op->val = regs->gpr[ra] + regs->gpr[rb];
1815 case 267: /* moduw */
1816 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1817 goto unknown_opcode;
1818 op->val = (unsigned int) regs->gpr[ra] %
1819 (unsigned int) regs->gpr[rb];
1821 #ifdef __powerpc64__
1822 case 457: /* divdu */
1823 op->val = regs->gpr[ra] / regs->gpr[rb];
1826 case 459: /* divwu */
1827 op->val = (unsigned int) regs->gpr[ra] /
1828 (unsigned int) regs->gpr[rb];
1830 #ifdef __powerpc64__
1831 case 489: /* divd */
1832 op->val = (long int) regs->gpr[ra] /
1833 (long int) regs->gpr[rb];
1836 case 491: /* divw */
1837 op->val = (int) regs->gpr[ra] /
1838 (int) regs->gpr[rb];
1840 #ifdef __powerpc64__
1841 case 425: /* divde[.] */
1842 asm volatile(PPC_DIVDE(%0, %1, %2) :
1843 "=r" (op->val) : "r" (regs->gpr[ra]),
1844 "r" (regs->gpr[rb]));
1846 case 393: /* divdeu[.] */
1847 asm volatile(PPC_DIVDEU(%0, %1, %2) :
1848 "=r" (op->val) : "r" (regs->gpr[ra]),
1849 "r" (regs->gpr[rb]));
1852 case 755: /* darn */
1853 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1854 goto unknown_opcode;
1857 /* 32-bit conditioned */
1858 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1862 /* 64-bit conditioned */
1863 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1868 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1872 goto unknown_opcode;
1873 #ifdef __powerpc64__
1874 case 777: /* modsd */
1875 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1876 goto unknown_opcode;
1877 op->val = (long int) regs->gpr[ra] %
1878 (long int) regs->gpr[rb];
1881 case 779: /* modsw */
1882 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1883 goto unknown_opcode;
1884 op->val = (int) regs->gpr[ra] %
1885 (int) regs->gpr[rb];
1890 * Logical instructions
1892 case 26: /* cntlzw */
1893 val = (unsigned int) regs->gpr[rd];
1894 op->val = ( val ? __builtin_clz(val) : 32 );
1896 #ifdef __powerpc64__
1897 case 58: /* cntlzd */
1898 val = regs->gpr[rd];
1899 op->val = ( val ? __builtin_clzl(val) : 64 );
1903 op->val = regs->gpr[rd] & regs->gpr[rb];
1907 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1910 case 122: /* popcntb */
1911 do_popcnt(regs, op, regs->gpr[rd], 8);
1912 goto logical_done_nocc;
1915 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1918 case 154: /* prtyw */
1919 do_prty(regs, op, regs->gpr[rd], 32);
1920 goto logical_done_nocc;
1922 case 186: /* prtyd */
1923 do_prty(regs, op, regs->gpr[rd], 64);
1924 goto logical_done_nocc;
1926 case 252: /* bpermd */
1927 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1928 goto logical_done_nocc;
1931 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1935 op->val = regs->gpr[rd] ^ regs->gpr[rb];
1938 case 378: /* popcntw */
1939 do_popcnt(regs, op, regs->gpr[rd], 32);
1940 goto logical_done_nocc;
1943 op->val = regs->gpr[rd] | ~regs->gpr[rb];
1947 op->val = regs->gpr[rd] | regs->gpr[rb];
1950 case 476: /* nand */
1951 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1954 case 506: /* popcntd */
1955 do_popcnt(regs, op, regs->gpr[rd], 64);
1956 goto logical_done_nocc;
1958 case 538: /* cnttzw */
1959 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1960 goto unknown_opcode;
1961 val = (unsigned int) regs->gpr[rd];
1962 op->val = (val ? __builtin_ctz(val) : 32);
1964 #ifdef __powerpc64__
1965 case 570: /* cnttzd */
1966 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1967 goto unknown_opcode;
1968 val = regs->gpr[rd];
1969 op->val = (val ? __builtin_ctzl(val) : 64);
1972 case 922: /* extsh */
1973 op->val = (signed short) regs->gpr[rd];
1976 case 954: /* extsb */
1977 op->val = (signed char) regs->gpr[rd];
1979 #ifdef __powerpc64__
1980 case 986: /* extsw */
1981 op->val = (signed int) regs->gpr[rd];
1986 * Shift instructions
1989 sh = regs->gpr[rb] & 0x3f;
1991 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1997 sh = regs->gpr[rb] & 0x3f;
1999 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2004 case 792: /* sraw */
2005 op->type = COMPUTE + SETREG + SETXER;
2006 sh = regs->gpr[rb] & 0x3f;
2007 ival = (signed int) regs->gpr[rd];
2008 op->val = ival >> (sh < 32 ? sh : 31);
2009 op->xerval = regs->xer;
2010 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2011 op->xerval |= XER_CA;
2013 op->xerval &= ~XER_CA;
2014 set_ca32(op, op->xerval & XER_CA);
2017 case 824: /* srawi */
2018 op->type = COMPUTE + SETREG + SETXER;
2020 ival = (signed int) regs->gpr[rd];
2021 op->val = ival >> sh;
2022 op->xerval = regs->xer;
2023 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2024 op->xerval |= XER_CA;
2026 op->xerval &= ~XER_CA;
2027 set_ca32(op, op->xerval & XER_CA);
2030 #ifdef __powerpc64__
2032 sh = regs->gpr[rb] & 0x7f;
2034 op->val = regs->gpr[rd] << sh;
2040 sh = regs->gpr[rb] & 0x7f;
2042 op->val = regs->gpr[rd] >> sh;
2047 case 794: /* srad */
2048 op->type = COMPUTE + SETREG + SETXER;
2049 sh = regs->gpr[rb] & 0x7f;
2050 ival = (signed long int) regs->gpr[rd];
2051 op->val = ival >> (sh < 64 ? sh : 63);
2052 op->xerval = regs->xer;
2053 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2054 op->xerval |= XER_CA;
2056 op->xerval &= ~XER_CA;
2057 set_ca32(op, op->xerval & XER_CA);
2060 case 826: /* sradi with sh_5 = 0 */
2061 case 827: /* sradi with sh_5 = 1 */
2062 op->type = COMPUTE + SETREG + SETXER;
2063 sh = rb | ((word & 2) << 4);
2064 ival = (signed long int) regs->gpr[rd];
2065 op->val = ival >> sh;
2066 op->xerval = regs->xer;
2067 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2068 op->xerval |= XER_CA;
2070 op->xerval &= ~XER_CA;
2071 set_ca32(op, op->xerval & XER_CA);
2074 case 890: /* extswsli with sh_5 = 0 */
2075 case 891: /* extswsli with sh_5 = 1 */
2076 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2077 goto unknown_opcode;
2078 op->type = COMPUTE + SETREG;
2079 sh = rb | ((word & 2) << 4);
2080 val = (signed int) regs->gpr[rd];
2082 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2087 #endif /* __powerpc64__ */
2090 * Cache instructions
2092 case 54: /* dcbst */
2093 op->type = MKOP(CACHEOP, DCBST, 0);
2094 op->ea = xform_ea(word, regs);
2098 op->type = MKOP(CACHEOP, DCBF, 0);
2099 op->ea = xform_ea(word, regs);
2102 case 246: /* dcbtst */
2103 op->type = MKOP(CACHEOP, DCBTST, 0);
2104 op->ea = xform_ea(word, regs);
2108 case 278: /* dcbt */
2109 op->type = MKOP(CACHEOP, DCBTST, 0);
2110 op->ea = xform_ea(word, regs);
2114 case 982: /* icbi */
2115 op->type = MKOP(CACHEOP, ICBI, 0);
2116 op->ea = xform_ea(word, regs);
2119 case 1014: /* dcbz */
2120 op->type = MKOP(CACHEOP, DCBZ, 0);
2121 op->ea = xform_ea(word, regs);
2131 op->update_reg = ra;
2133 op->val = regs->gpr[rd];
2134 u = (word >> 20) & UPDATE;
2140 op->ea = xform_ea(word, regs);
2141 switch ((word >> 1) & 0x3ff) {
2142 case 20: /* lwarx */
2143 op->type = MKOP(LARX, 0, 4);
2146 case 150: /* stwcx. */
2147 op->type = MKOP(STCX, 0, 4);
2150 #ifdef __powerpc64__
2151 case 84: /* ldarx */
2152 op->type = MKOP(LARX, 0, 8);
2155 case 214: /* stdcx. */
2156 op->type = MKOP(STCX, 0, 8);
2159 case 52: /* lbarx */
2160 op->type = MKOP(LARX, 0, 1);
2163 case 694: /* stbcx. */
2164 op->type = MKOP(STCX, 0, 1);
2167 case 116: /* lharx */
2168 op->type = MKOP(LARX, 0, 2);
2171 case 726: /* sthcx. */
2172 op->type = MKOP(STCX, 0, 2);
2175 case 276: /* lqarx */
2176 if (!((rd & 1) || rd == ra || rd == rb))
2177 op->type = MKOP(LARX, 0, 16);
2180 case 182: /* stqcx. */
2182 op->type = MKOP(STCX, 0, 16);
2187 case 55: /* lwzux */
2188 op->type = MKOP(LOAD, u, 4);
2192 case 119: /* lbzux */
2193 op->type = MKOP(LOAD, u, 1);
2196 #ifdef CONFIG_ALTIVEC
2198 * Note: for the load/store vector element instructions,
2199 * bits of the EA say which field of the VMX register to use.
2202 op->type = MKOP(LOAD_VMX, 0, 1);
2203 op->element_size = 1;
2206 case 39: /* lvehx */
2207 op->type = MKOP(LOAD_VMX, 0, 2);
2208 op->element_size = 2;
2211 case 71: /* lvewx */
2212 op->type = MKOP(LOAD_VMX, 0, 4);
2213 op->element_size = 4;
2217 case 359: /* lvxl */
2218 op->type = MKOP(LOAD_VMX, 0, 16);
2219 op->element_size = 16;
2222 case 135: /* stvebx */
2223 op->type = MKOP(STORE_VMX, 0, 1);
2224 op->element_size = 1;
2227 case 167: /* stvehx */
2228 op->type = MKOP(STORE_VMX, 0, 2);
2229 op->element_size = 2;
2232 case 199: /* stvewx */
2233 op->type = MKOP(STORE_VMX, 0, 4);
2234 op->element_size = 4;
2237 case 231: /* stvx */
2238 case 487: /* stvxl */
2239 op->type = MKOP(STORE_VMX, 0, 16);
2241 #endif /* CONFIG_ALTIVEC */
2243 #ifdef __powerpc64__
2246 op->type = MKOP(LOAD, u, 8);
2249 case 149: /* stdx */
2250 case 181: /* stdux */
2251 op->type = MKOP(STORE, u, 8);
2255 case 151: /* stwx */
2256 case 183: /* stwux */
2257 op->type = MKOP(STORE, u, 4);
2260 case 215: /* stbx */
2261 case 247: /* stbux */
2262 op->type = MKOP(STORE, u, 1);
2265 case 279: /* lhzx */
2266 case 311: /* lhzux */
2267 op->type = MKOP(LOAD, u, 2);
2270 #ifdef __powerpc64__
2271 case 341: /* lwax */
2272 case 373: /* lwaux */
2273 op->type = MKOP(LOAD, SIGNEXT | u, 4);
2277 case 343: /* lhax */
2278 case 375: /* lhaux */
2279 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2282 case 407: /* sthx */
2283 case 439: /* sthux */
2284 op->type = MKOP(STORE, u, 2);
2287 #ifdef __powerpc64__
2288 case 532: /* ldbrx */
2289 op->type = MKOP(LOAD, BYTEREV, 8);
2293 case 533: /* lswx */
2294 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2297 case 534: /* lwbrx */
2298 op->type = MKOP(LOAD, BYTEREV, 4);
2301 case 597: /* lswi */
2303 rb = 32; /* # bytes to load */
2304 op->type = MKOP(LOAD_MULTI, 0, rb);
2305 op->ea = ra ? regs->gpr[ra] : 0;
2308 #ifdef CONFIG_PPC_FPU
2309 case 535: /* lfsx */
2310 case 567: /* lfsux */
2311 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2314 case 599: /* lfdx */
2315 case 631: /* lfdux */
2316 op->type = MKOP(LOAD_FP, u, 8);
2319 case 663: /* stfsx */
2320 case 695: /* stfsux */
2321 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2324 case 727: /* stfdx */
2325 case 759: /* stfdux */
2326 op->type = MKOP(STORE_FP, u, 8);
2329 #ifdef __powerpc64__
2330 case 791: /* lfdpx */
2331 op->type = MKOP(LOAD_FP, 0, 16);
2334 case 855: /* lfiwax */
2335 op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2338 case 887: /* lfiwzx */
2339 op->type = MKOP(LOAD_FP, 0, 4);
2342 case 919: /* stfdpx */
2343 op->type = MKOP(STORE_FP, 0, 16);
2346 case 983: /* stfiwx */
2347 op->type = MKOP(STORE_FP, 0, 4);
2349 #endif /* __powerpc64 */
2350 #endif /* CONFIG_PPC_FPU */
2352 #ifdef __powerpc64__
2353 case 660: /* stdbrx */
2354 op->type = MKOP(STORE, BYTEREV, 8);
2355 op->val = byterev_8(regs->gpr[rd]);
2359 case 661: /* stswx */
2360 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2363 case 662: /* stwbrx */
2364 op->type = MKOP(STORE, BYTEREV, 4);
2365 op->val = byterev_4(regs->gpr[rd]);
2368 case 725: /* stswi */
2370 rb = 32; /* # bytes to store */
2371 op->type = MKOP(STORE_MULTI, 0, rb);
2372 op->ea = ra ? regs->gpr[ra] : 0;
2375 case 790: /* lhbrx */
2376 op->type = MKOP(LOAD, BYTEREV, 2);
2379 case 918: /* sthbrx */
2380 op->type = MKOP(STORE, BYTEREV, 2);
2381 op->val = byterev_2(regs->gpr[rd]);
2385 case 12: /* lxsiwzx */
2386 op->reg = rd | ((word & 1) << 5);
2387 op->type = MKOP(LOAD_VSX, 0, 4);
2388 op->element_size = 8;
2391 case 76: /* lxsiwax */
2392 op->reg = rd | ((word & 1) << 5);
2393 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2394 op->element_size = 8;
2397 case 140: /* stxsiwx */
2398 op->reg = rd | ((word & 1) << 5);
2399 op->type = MKOP(STORE_VSX, 0, 4);
2400 op->element_size = 8;
2403 case 268: /* lxvx */
2404 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2405 goto unknown_opcode;
2406 op->reg = rd | ((word & 1) << 5);
2407 op->type = MKOP(LOAD_VSX, 0, 16);
2408 op->element_size = 16;
2409 op->vsx_flags = VSX_CHECK_VEC;
2412 case 269: /* lxvl */
2413 case 301: { /* lxvll */
2415 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2416 goto unknown_opcode;
2417 op->reg = rd | ((word & 1) << 5);
2418 op->ea = ra ? regs->gpr[ra] : 0;
2419 nb = regs->gpr[rb] & 0xff;
2422 op->type = MKOP(LOAD_VSX, 0, nb);
2423 op->element_size = 16;
2424 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2428 case 332: /* lxvdsx */
2429 op->reg = rd | ((word & 1) << 5);
2430 op->type = MKOP(LOAD_VSX, 0, 8);
2431 op->element_size = 8;
2432 op->vsx_flags = VSX_SPLAT;
2435 case 364: /* lxvwsx */
2436 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2437 goto unknown_opcode;
2438 op->reg = rd | ((word & 1) << 5);
2439 op->type = MKOP(LOAD_VSX, 0, 4);
2440 op->element_size = 4;
2441 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2444 case 396: /* stxvx */
2445 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2446 goto unknown_opcode;
2447 op->reg = rd | ((word & 1) << 5);
2448 op->type = MKOP(STORE_VSX, 0, 16);
2449 op->element_size = 16;
2450 op->vsx_flags = VSX_CHECK_VEC;
2453 case 397: /* stxvl */
2454 case 429: { /* stxvll */
2456 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2457 goto unknown_opcode;
2458 op->reg = rd | ((word & 1) << 5);
2459 op->ea = ra ? regs->gpr[ra] : 0;
2460 nb = regs->gpr[rb] & 0xff;
2463 op->type = MKOP(STORE_VSX, 0, nb);
2464 op->element_size = 16;
2465 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2469 case 524: /* lxsspx */
2470 op->reg = rd | ((word & 1) << 5);
2471 op->type = MKOP(LOAD_VSX, 0, 4);
2472 op->element_size = 8;
2473 op->vsx_flags = VSX_FPCONV;
2476 case 588: /* lxsdx */
2477 op->reg = rd | ((word & 1) << 5);
2478 op->type = MKOP(LOAD_VSX, 0, 8);
2479 op->element_size = 8;
2482 case 652: /* stxsspx */
2483 op->reg = rd | ((word & 1) << 5);
2484 op->type = MKOP(STORE_VSX, 0, 4);
2485 op->element_size = 8;
2486 op->vsx_flags = VSX_FPCONV;
2489 case 716: /* stxsdx */
2490 op->reg = rd | ((word & 1) << 5);
2491 op->type = MKOP(STORE_VSX, 0, 8);
2492 op->element_size = 8;
2495 case 780: /* lxvw4x */
2496 op->reg = rd | ((word & 1) << 5);
2497 op->type = MKOP(LOAD_VSX, 0, 16);
2498 op->element_size = 4;
2501 case 781: /* lxsibzx */
2502 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2503 goto unknown_opcode;
2504 op->reg = rd | ((word & 1) << 5);
2505 op->type = MKOP(LOAD_VSX, 0, 1);
2506 op->element_size = 8;
2507 op->vsx_flags = VSX_CHECK_VEC;
2510 case 812: /* lxvh8x */
2511 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2512 goto unknown_opcode;
2513 op->reg = rd | ((word & 1) << 5);
2514 op->type = MKOP(LOAD_VSX, 0, 16);
2515 op->element_size = 2;
2516 op->vsx_flags = VSX_CHECK_VEC;
2519 case 813: /* lxsihzx */
2520 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2521 goto unknown_opcode;
2522 op->reg = rd | ((word & 1) << 5);
2523 op->type = MKOP(LOAD_VSX, 0, 2);
2524 op->element_size = 8;
2525 op->vsx_flags = VSX_CHECK_VEC;
2528 case 844: /* lxvd2x */
2529 op->reg = rd | ((word & 1) << 5);
2530 op->type = MKOP(LOAD_VSX, 0, 16);
2531 op->element_size = 8;
2534 case 876: /* lxvb16x */
2535 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2536 goto unknown_opcode;
2537 op->reg = rd | ((word & 1) << 5);
2538 op->type = MKOP(LOAD_VSX, 0, 16);
2539 op->element_size = 1;
2540 op->vsx_flags = VSX_CHECK_VEC;
2543 case 908: /* stxvw4x */
2544 op->reg = rd | ((word & 1) << 5);
2545 op->type = MKOP(STORE_VSX, 0, 16);
2546 op->element_size = 4;
2549 case 909: /* stxsibx */
2550 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2551 goto unknown_opcode;
2552 op->reg = rd | ((word & 1) << 5);
2553 op->type = MKOP(STORE_VSX, 0, 1);
2554 op->element_size = 8;
2555 op->vsx_flags = VSX_CHECK_VEC;
2558 case 940: /* stxvh8x */
2559 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2560 goto unknown_opcode;
2561 op->reg = rd | ((word & 1) << 5);
2562 op->type = MKOP(STORE_VSX, 0, 16);
2563 op->element_size = 2;
2564 op->vsx_flags = VSX_CHECK_VEC;
2567 case 941: /* stxsihx */
2568 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2569 goto unknown_opcode;
2570 op->reg = rd | ((word & 1) << 5);
2571 op->type = MKOP(STORE_VSX, 0, 2);
2572 op->element_size = 8;
2573 op->vsx_flags = VSX_CHECK_VEC;
2576 case 972: /* stxvd2x */
2577 op->reg = rd | ((word & 1) << 5);
2578 op->type = MKOP(STORE_VSX, 0, 16);
2579 op->element_size = 8;
2582 case 1004: /* stxvb16x */
2583 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2584 goto unknown_opcode;
2585 op->reg = rd | ((word & 1) << 5);
2586 op->type = MKOP(STORE_VSX, 0, 16);
2587 op->element_size = 1;
2588 op->vsx_flags = VSX_CHECK_VEC;
2591 #endif /* CONFIG_VSX */
2597 op->type = MKOP(LOAD, u, 4);
2598 op->ea = dform_ea(word, regs);
2603 op->type = MKOP(LOAD, u, 1);
2604 op->ea = dform_ea(word, regs);
2609 op->type = MKOP(STORE, u, 4);
2610 op->ea = dform_ea(word, regs);
2615 op->type = MKOP(STORE, u, 1);
2616 op->ea = dform_ea(word, regs);
2621 op->type = MKOP(LOAD, u, 2);
2622 op->ea = dform_ea(word, regs);
2627 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2628 op->ea = dform_ea(word, regs);
2633 op->type = MKOP(STORE, u, 2);
2634 op->ea = dform_ea(word, regs);
2639 break; /* invalid form, ra in range to load */
2640 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2641 op->ea = dform_ea(word, regs);
2645 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2646 op->ea = dform_ea(word, regs);
2649 #ifdef CONFIG_PPC_FPU
2652 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2653 op->ea = dform_ea(word, regs);
2658 op->type = MKOP(LOAD_FP, u, 8);
2659 op->ea = dform_ea(word, regs);
2663 case 53: /* stfsu */
2664 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2665 op->ea = dform_ea(word, regs);
2669 case 55: /* stfdu */
2670 op->type = MKOP(STORE_FP, u, 8);
2671 op->ea = dform_ea(word, regs);
2675 #ifdef __powerpc64__
2677 if (!((rd & 1) || (rd == ra)))
2678 op->type = MKOP(LOAD, 0, 16);
2679 op->ea = dqform_ea(word, regs);
2684 case 57: /* lfdp, lxsd, lxssp */
2685 op->ea = dsform_ea(word, regs);
2689 break; /* reg must be even */
2690 op->type = MKOP(LOAD_FP, 0, 16);
2693 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2694 goto unknown_opcode;
2696 op->type = MKOP(LOAD_VSX, 0, 8);
2697 op->element_size = 8;
2698 op->vsx_flags = VSX_CHECK_VEC;
2701 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2702 goto unknown_opcode;
2704 op->type = MKOP(LOAD_VSX, 0, 4);
2705 op->element_size = 8;
2706 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2710 #endif /* CONFIG_VSX */
2712 #ifdef __powerpc64__
2713 case 58: /* ld[u], lwa */
2714 op->ea = dsform_ea(word, regs);
2717 op->type = MKOP(LOAD, 0, 8);
2720 op->type = MKOP(LOAD, UPDATE, 8);
2723 op->type = MKOP(LOAD, SIGNEXT, 4);
2730 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2732 case 0: /* stfdp with LSB of DS field = 0 */
2733 case 4: /* stfdp with LSB of DS field = 1 */
2734 op->ea = dsform_ea(word, regs);
2735 op->type = MKOP(STORE_FP, 0, 16);
2739 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2740 goto unknown_opcode;
2741 op->ea = dqform_ea(word, regs);
2744 op->type = MKOP(LOAD_VSX, 0, 16);
2745 op->element_size = 16;
2746 op->vsx_flags = VSX_CHECK_VEC;
2749 case 2: /* stxsd with LSB of DS field = 0 */
2750 case 6: /* stxsd with LSB of DS field = 1 */
2751 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2752 goto unknown_opcode;
2753 op->ea = dsform_ea(word, regs);
2755 op->type = MKOP(STORE_VSX, 0, 8);
2756 op->element_size = 8;
2757 op->vsx_flags = VSX_CHECK_VEC;
2760 case 3: /* stxssp with LSB of DS field = 0 */
2761 case 7: /* stxssp with LSB of DS field = 1 */
2762 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2763 goto unknown_opcode;
2764 op->ea = dsform_ea(word, regs);
2766 op->type = MKOP(STORE_VSX, 0, 4);
2767 op->element_size = 8;
2768 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2772 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2773 goto unknown_opcode;
2774 op->ea = dqform_ea(word, regs);
2777 op->type = MKOP(STORE_VSX, 0, 16);
2778 op->element_size = 16;
2779 op->vsx_flags = VSX_CHECK_VEC;
2783 #endif /* CONFIG_VSX */
2785 #ifdef __powerpc64__
2786 case 62: /* std[u] */
2787 op->ea = dsform_ea(word, regs);
2790 op->type = MKOP(STORE, 0, 8);
2793 op->type = MKOP(STORE, UPDATE, 8);
2797 op->type = MKOP(STORE, 0, 16);
2801 case 1: /* Prefixed instructions */
2802 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2803 goto unknown_opcode;
2805 prefix_r = GET_PREFIX_R(word);
2806 ra = GET_PREFIX_RA(suffix);
2807 op->update_reg = ra;
2808 rd = (suffix >> 21) & 0x1f;
2810 op->val = regs->gpr[rd];
2812 suffixopcode = get_op(suffix);
2813 prefixtype = (word >> 24) & 0x3;
2814 switch (prefixtype) {
2815 case 0: /* Type 00 Eight-Byte Load/Store */
2818 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2819 switch (suffixopcode) {
2821 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2824 case 42: /* plxsd */
2826 op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2827 op->element_size = 8;
2828 op->vsx_flags = VSX_CHECK_VEC;
2830 case 43: /* plxssp */
2832 op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2833 op->element_size = 8;
2834 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2836 case 46: /* pstxsd */
2838 op->type = MKOP(STORE_VSX, PREFIXED, 8);
2839 op->element_size = 8;
2840 op->vsx_flags = VSX_CHECK_VEC;
2842 case 47: /* pstxssp */
2844 op->type = MKOP(STORE_VSX, PREFIXED, 4);
2845 op->element_size = 8;
2846 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2848 case 51: /* plxv1 */
2851 case 50: /* plxv0 */
2852 op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2853 op->element_size = 16;
2854 op->vsx_flags = VSX_CHECK_VEC;
2856 case 55: /* pstxv1 */
2859 case 54: /* pstxv0 */
2860 op->type = MKOP(STORE_VSX, PREFIXED, 16);
2861 op->element_size = 16;
2862 op->vsx_flags = VSX_CHECK_VEC;
2864 #endif /* CONFIG_VSX */
2866 op->type = MKOP(LOAD, PREFIXED, 16);
2869 op->type = MKOP(LOAD, PREFIXED, 8);
2872 op->type = MKOP(STORE, PREFIXED, 16);
2875 op->type = MKOP(STORE, PREFIXED, 8);
2879 case 1: /* Type 01 Eight-Byte Register-to-Register */
2881 case 2: /* Type 10 Modified Load/Store */
2884 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2885 switch (suffixopcode) {
2887 op->type = MKOP(LOAD, PREFIXED, 4);
2890 op->type = MKOP(LOAD, PREFIXED, 1);
2893 op->type = MKOP(STORE, PREFIXED, 4);
2896 op->type = MKOP(STORE, PREFIXED, 1);
2899 op->type = MKOP(LOAD, PREFIXED, 2);
2902 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
2905 op->type = MKOP(STORE, PREFIXED, 2);
2908 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
2911 op->type = MKOP(LOAD_FP, PREFIXED, 8);
2913 case 52: /* pstfs */
2914 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
2916 case 54: /* pstfd */
2917 op->type = MKOP(STORE_FP, PREFIXED, 8);
2921 case 3: /* Type 11 Modified Register-to-Register */
2924 #endif /* __powerpc64__ */
2928 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
2929 switch (GETTYPE(op->type)) {
2932 goto unknown_opcode;
2938 goto unknown_opcode;
2943 if ((GETTYPE(op->type) == LOAD_VSX ||
2944 GETTYPE(op->type) == STORE_VSX) &&
2945 !cpu_has_feature(CPU_FTR_VSX)) {
2948 #endif /* CONFIG_VSX */
2973 op->type = INTERRUPT | 0x700;
2974 op->val = SRR1_PROGPRIV;
2978 op->type = INTERRUPT | 0x700;
2979 op->val = SRR1_PROGTRAP;
2982 EXPORT_SYMBOL_GPL(analyse_instr);
2983 NOKPROBE_SYMBOL(analyse_instr);
2986 * For PPC32 we always use stwu with r1 to change the stack pointer.
2987 * So this emulated store may corrupt the exception frame, now we
2988 * have to provide the exception frame trampoline, which is pushed
2989 * below the kprobed function stack. So we only update gpr[1] but
2990 * don't emulate the real store operation. We will do real store
2991 * operation safely in exception return code by checking this flag.
2993 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2997 * Check if we will touch kernel stack overflow
2999 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
3000 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
3003 #endif /* CONFIG_PPC32 */
3005 * Check if we already set since that means we'll
3006 * lose the previous value.
3008 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3009 set_thread_flag(TIF_EMULATE_STACK_STORE);
3013 static nokprobe_inline void do_signext(unsigned long *valp, int size)
3017 *valp = (signed short) *valp;
3020 *valp = (signed int) *valp;
3025 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3029 *valp = byterev_2(*valp);
3032 *valp = byterev_4(*valp);
3034 #ifdef __powerpc64__
3036 *valp = byterev_8(*valp);
3043 * Emulate an instruction that can be executed just by updating
3046 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3048 unsigned long next_pc;
3050 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3051 switch (GETTYPE(op->type)) {
3053 if (op->type & SETREG)
3054 regs->gpr[op->reg] = op->val;
3055 if (op->type & SETCC)
3056 regs->ccr = op->ccval;
3057 if (op->type & SETXER)
3058 regs->xer = op->xerval;
3062 if (op->type & SETLK)
3063 regs->link = next_pc;
3064 if (op->type & BRTAKEN)
3066 if (op->type & DECCTR)
3071 switch (op->type & BARRIER_MASK) {
3082 case BARRIER_LWSYNC:
3083 asm volatile("lwsync" : : : "memory");
3085 case BARRIER_PTESYNC:
3086 asm volatile("ptesync" : : : "memory");
3095 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3098 regs->gpr[op->reg] = regs->link;
3101 regs->gpr[op->reg] = regs->ctr;
3111 regs->xer = op->val & 0xffffffffUL;
3114 regs->link = op->val;
3117 regs->ctr = op->val;
3127 regs->nip = next_pc;
3129 NOKPROBE_SYMBOL(emulate_update_regs);
3132 * Emulate a previously-analysed load or store instruction.
3133 * Return values are:
3134 * 0 = instruction emulated successfully
3135 * -EFAULT = address out of range or access faulted (regs->dar
3136 * contains the faulting address)
3137 * -EACCES = misaligned access, instruction requires alignment
3138 * -EINVAL = unknown operation in *op
3140 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3142 int err, size, type;
3150 size = GETSIZE(op->type);
3151 type = GETTYPE(op->type);
3152 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3153 ea = truncate_if_32bit(regs->msr, op->ea);
3157 if (ea & (size - 1))
3158 return -EACCES; /* can't handle misaligned */
3159 if (!address_ok(regs, ea, size))
3164 #ifdef __powerpc64__
3166 __get_user_asmx(val, ea, err, "lbarx");
3169 __get_user_asmx(val, ea, err, "lharx");
3173 __get_user_asmx(val, ea, err, "lwarx");
3175 #ifdef __powerpc64__
3177 __get_user_asmx(val, ea, err, "ldarx");
3180 err = do_lqarx(ea, ®s->gpr[op->reg]);
3191 regs->gpr[op->reg] = val;
3195 if (ea & (size - 1))
3196 return -EACCES; /* can't handle misaligned */
3197 if (!address_ok(regs, ea, size))
3201 #ifdef __powerpc64__
3203 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3206 __put_user_asmx(op->val, ea, err, "sthcx.", cr);
3210 __put_user_asmx(op->val, ea, err, "stwcx.", cr);
3212 #ifdef __powerpc64__
3214 __put_user_asmx(op->val, ea, err, "stdcx.", cr);
3217 err = do_stqcx(ea, regs->gpr[op->reg],
3218 regs->gpr[op->reg + 1], &cr);
3225 regs->ccr = (regs->ccr & 0x0fffffff) |
3227 ((regs->xer >> 3) & 0x10000000);
3233 #ifdef __powerpc64__
3235 err = emulate_lq(regs, ea, op->reg, cross_endian);
3239 err = read_mem(®s->gpr[op->reg], ea, size, regs);
3241 if (op->type & SIGNEXT)
3242 do_signext(®s->gpr[op->reg], size);
3243 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3244 do_byterev(®s->gpr[op->reg], size);
3248 #ifdef CONFIG_PPC_FPU
3251 * If the instruction is in userspace, we can emulate it even
3252 * if the VMX state is not live, because we have the state
3253 * stored in the thread_struct. If the instruction is in
3254 * the kernel, we must not touch the state in the thread_struct.
3256 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3258 err = do_fp_load(op, ea, regs, cross_endian);
3261 #ifdef CONFIG_ALTIVEC
3263 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3265 err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3270 unsigned long msrbit = MSR_VSX;
3273 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3274 * when the target of the instruction is a vector register.
3276 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3278 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3280 err = do_vsx_load(op, ea, regs, cross_endian);
3285 if (!address_ok(regs, ea, size))
3288 for (i = 0; i < size; i += 4) {
3289 unsigned int v32 = 0;
3294 err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3297 if (unlikely(cross_endian))
3298 v32 = byterev_4(v32);
3299 regs->gpr[rd] = v32;
3301 /* reg number wraps from 31 to 0 for lsw[ix] */
3302 rd = (rd + 1) & 0x1f;
3307 #ifdef __powerpc64__
3309 err = emulate_stq(regs, ea, op->reg, cross_endian);
3313 if ((op->type & UPDATE) && size == sizeof(long) &&
3314 op->reg == 1 && op->update_reg == 1 &&
3315 !(regs->msr & MSR_PR) &&
3316 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3317 err = handle_stack_update(ea, regs);
3320 if (unlikely(cross_endian))
3321 do_byterev(&op->val, size);
3322 err = write_mem(op->val, ea, size, regs);
3325 #ifdef CONFIG_PPC_FPU
3327 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3329 err = do_fp_store(op, ea, regs, cross_endian);
3332 #ifdef CONFIG_ALTIVEC
3334 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3336 err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3341 unsigned long msrbit = MSR_VSX;
3344 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3345 * when the target of the instruction is a vector register.
3347 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3349 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3351 err = do_vsx_store(op, ea, regs, cross_endian);
3356 if (!address_ok(regs, ea, size))
3359 for (i = 0; i < size; i += 4) {
3360 unsigned int v32 = regs->gpr[rd];
3365 if (unlikely(cross_endian))
3366 v32 = byterev_4(v32);
3367 err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3371 /* reg number wraps from 31 to 0 for stsw[ix] */
3372 rd = (rd + 1) & 0x1f;
3383 if (op->type & UPDATE)
3384 regs->gpr[op->update_reg] = op->ea;
3388 NOKPROBE_SYMBOL(emulate_loadstore);
3391 * Emulate instructions that cause a transfer of control,
3392 * loads and stores, and a few other instructions.
3393 * Returns 1 if the step was emulated, 0 if not,
3394 * or -1 if the instruction is one that should not be stepped,
3395 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3397 int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3399 struct instruction_op op;
3404 r = analyse_instr(&op, regs, instr);
3408 emulate_update_regs(regs, &op);
3413 type = GETTYPE(op.type);
3415 if (OP_IS_LOAD_STORE(type)) {
3416 err = emulate_loadstore(regs, &op);
3424 ea = truncate_if_32bit(regs->msr, op.ea);
3425 if (!address_ok(regs, ea, 8))
3427 switch (op.type & CACHEOP_MASK) {
3429 __cacheop_user_asmx(ea, err, "dcbst");
3432 __cacheop_user_asmx(ea, err, "dcbf");
3436 prefetchw((void *) ea);
3440 prefetch((void *) ea);
3443 __cacheop_user_asmx(ea, err, "icbi");
3446 err = emulate_dcbz(ea, regs);
3456 regs->gpr[op.reg] = regs->msr & MSR_MASK;
3460 val = regs->gpr[op.reg];
3461 if ((val & MSR_RI) == 0)
3462 /* can't step mtmsr[d] that would clear MSR_RI */
3464 /* here op.val is the mask of bits to change */
3465 regs->msr = (regs->msr & ~op.val) | (val & op.val);
3469 case SYSCALL: /* sc */
3471 * N.B. this uses knowledge about how the syscall
3472 * entry code works. If that is changed, this will
3473 * need to be changed also.
3475 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3476 cpu_has_feature(CPU_FTR_REAL_LE) &&
3477 regs->gpr[0] == 0x1ebe) {
3478 regs->msr ^= MSR_LE;
3481 regs->gpr[9] = regs->gpr[13];
3482 regs->gpr[10] = MSR_KERNEL;
3483 regs->gpr[11] = regs->nip + 4;
3484 regs->gpr[12] = regs->msr & MSR_MASK;
3485 regs->gpr[13] = (unsigned long) get_paca();
3486 regs->nip = (unsigned long) &system_call_common;
3487 regs->msr = MSR_KERNEL;
3490 #ifdef CONFIG_PPC_BOOK3S_64
3491 case SYSCALL_VECTORED_0: /* scv 0 */
3492 regs->gpr[9] = regs->gpr[13];
3493 regs->gpr[10] = MSR_KERNEL;
3494 regs->gpr[11] = regs->nip + 4;
3495 regs->gpr[12] = regs->msr & MSR_MASK;
3496 regs->gpr[13] = (unsigned long) get_paca();
3497 regs->nip = (unsigned long) &system_call_vectored_emulate;
3498 regs->msr = MSR_KERNEL;
3509 regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
3512 NOKPROBE_SYMBOL(emulate_step);