1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
18 extern char system_call_common[];
19 extern char system_call_vectored_emulate[];
22 /* Bits in SRR1 that are copied from MSR */
23 #define MSR_MASK 0xffffffff87c0ffffUL
25 #define MSR_MASK 0x87c0ffff
29 #define XER_SO 0x80000000U
30 #define XER_OV 0x40000000U
31 #define XER_CA 0x20000000U
32 #define XER_OV32 0x00080000U
33 #define XER_CA32 0x00040000U
36 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe))
41 * Functions in ldstfp.S
43 extern void get_fpr(int rn, double *p);
44 extern void put_fpr(int rn, const double *p);
45 extern void get_vr(int rn, __vector128 *p);
46 extern void put_vr(int rn, __vector128 *p);
47 extern void load_vsrn(int vsr, const void *p);
48 extern void store_vsrn(int vsr, void *p);
49 extern void conv_sp_to_dp(const float *sp, double *dp);
50 extern void conv_dp_to_sp(const double *dp, float *sp);
57 extern int do_lq(unsigned long ea, unsigned long *regs);
58 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
59 extern int do_lqarx(unsigned long ea, unsigned long *regs);
60 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
64 #ifdef __LITTLE_ENDIAN__
73 * Emulate the truncation of 64 bit values in 32-bit mode.
75 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
79 if ((msr & MSR_64BIT) == 0)
86 * Determine whether a conditional branch instruction would branch.
88 static nokprobe_inline int branch_taken(unsigned int instr,
89 const struct pt_regs *regs,
90 struct instruction_op *op)
92 unsigned int bo = (instr >> 21) & 0x1f;
96 /* decrement counter */
98 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
101 if ((bo & 0x10) == 0) {
102 /* check bit from CR */
103 bi = (instr >> 16) & 0x1f;
104 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
110 static nokprobe_inline long address_ok(struct pt_regs *regs,
111 unsigned long ea, int nb)
113 if (!user_mode(regs))
115 if (access_ok((void __user *)ea, nb))
117 if (access_ok((void __user *)ea, 1))
118 /* Access overlaps the end of the user region */
119 regs->dar = TASK_SIZE_MAX - 1;
126 * Calculate effective address for a D-form instruction
128 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
129 const struct pt_regs *regs)
134 ra = (instr >> 16) & 0x1f;
135 ea = (signed short) instr; /* sign-extend */
144 * Calculate effective address for a DS-form instruction
146 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
147 const struct pt_regs *regs)
152 ra = (instr >> 16) & 0x1f;
153 ea = (signed short) (instr & ~3); /* sign-extend */
161 * Calculate effective address for a DQ-form instruction
163 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
164 const struct pt_regs *regs)
169 ra = (instr >> 16) & 0x1f;
170 ea = (signed short) (instr & ~0xf); /* sign-extend */
176 #endif /* __powerpc64 */
179 * Calculate effective address for an X-form instruction
181 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
182 const struct pt_regs *regs)
187 ra = (instr >> 16) & 0x1f;
188 rb = (instr >> 11) & 0x1f;
197 * Calculate effective address for a MLS:D-form / 8LS:D-form
198 * prefixed instruction
200 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
202 const struct pt_regs *regs)
206 unsigned long ea, d0, d1, d;
208 prefix_r = GET_PREFIX_R(instr);
209 ra = GET_PREFIX_RA(suffix);
211 d0 = instr & 0x3ffff;
212 d1 = suffix & 0xffff;
216 * sign extend a 34 bit number
218 dd = (unsigned int)(d >> 2);
220 ea = (ea << 2) | (d & 0x3);
224 else if (!prefix_r && !ra)
225 ; /* Leave ea as is */
230 * (prefix_r && ra) is an invalid form. Should already be
231 * checked for by caller!
238 * Return the largest power of 2, not greater than sizeof(unsigned long),
239 * such that x is a multiple of it.
241 static nokprobe_inline unsigned long max_align(unsigned long x)
243 x |= sizeof(unsigned long);
244 return x & -x; /* isolates rightmost bit */
247 static nokprobe_inline unsigned long byterev_2(unsigned long x)
249 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
252 static nokprobe_inline unsigned long byterev_4(unsigned long x)
254 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
255 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
259 static nokprobe_inline unsigned long byterev_8(unsigned long x)
261 return (byterev_4(x) << 32) | byterev_4(x >> 32);
265 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
269 *(u16 *)ptr = byterev_2(*(u16 *)ptr);
272 *(u32 *)ptr = byterev_4(*(u32 *)ptr);
276 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
279 unsigned long *up = (unsigned long *)ptr;
281 tmp = byterev_8(up[0]);
282 up[0] = byterev_8(up[1]);
287 unsigned long *up = (unsigned long *)ptr;
290 tmp = byterev_8(up[0]);
291 up[0] = byterev_8(up[3]);
293 tmp = byterev_8(up[2]);
294 up[2] = byterev_8(up[1]);
305 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
306 unsigned long ea, int nb,
307 struct pt_regs *regs)
314 err = __get_user(x, (unsigned char __user *) ea);
317 err = __get_user(x, (unsigned short __user *) ea);
320 err = __get_user(x, (unsigned int __user *) ea);
324 err = __get_user(x, (unsigned long __user *) ea);
336 * Copy from userspace to a buffer, using the largest possible
337 * aligned accesses, up to sizeof(long).
339 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
340 struct pt_regs *regs)
345 for (; nb > 0; nb -= c) {
351 err = __get_user(*dest, (unsigned char __user *) ea);
354 err = __get_user(*(u16 *)dest,
355 (unsigned short __user *) ea);
358 err = __get_user(*(u32 *)dest,
359 (unsigned int __user *) ea);
363 err = __get_user(*(unsigned long *)dest,
364 (unsigned long __user *) ea);
378 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
379 unsigned long ea, int nb,
380 struct pt_regs *regs)
384 u8 b[sizeof(unsigned long)];
390 i = IS_BE ? sizeof(unsigned long) - nb : 0;
391 err = copy_mem_in(&u.b[i], ea, nb, regs);
398 * Read memory at address ea for nb bytes, return 0 for success
399 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
400 * If nb < sizeof(long), the result is right-justified on BE systems.
402 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
403 struct pt_regs *regs)
405 if (!address_ok(regs, ea, nb))
407 if ((ea & (nb - 1)) == 0)
408 return read_mem_aligned(dest, ea, nb, regs);
409 return read_mem_unaligned(dest, ea, nb, regs);
411 NOKPROBE_SYMBOL(read_mem);
413 static nokprobe_inline int write_mem_aligned(unsigned long val,
414 unsigned long ea, int nb,
415 struct pt_regs *regs)
421 err = __put_user(val, (unsigned char __user *) ea);
424 err = __put_user(val, (unsigned short __user *) ea);
427 err = __put_user(val, (unsigned int __user *) ea);
431 err = __put_user(val, (unsigned long __user *) ea);
441 * Copy from a buffer to userspace, using the largest possible
442 * aligned accesses, up to sizeof(long).
444 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
445 struct pt_regs *regs)
450 for (; nb > 0; nb -= c) {
456 err = __put_user(*dest, (unsigned char __user *) ea);
459 err = __put_user(*(u16 *)dest,
460 (unsigned short __user *) ea);
463 err = __put_user(*(u32 *)dest,
464 (unsigned int __user *) ea);
468 err = __put_user(*(unsigned long *)dest,
469 (unsigned long __user *) ea);
483 static nokprobe_inline int write_mem_unaligned(unsigned long val,
484 unsigned long ea, int nb,
485 struct pt_regs *regs)
489 u8 b[sizeof(unsigned long)];
494 i = IS_BE ? sizeof(unsigned long) - nb : 0;
495 return copy_mem_out(&u.b[i], ea, nb, regs);
499 * Write memory at address ea for nb bytes, return 0 for success
500 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
502 static int write_mem(unsigned long val, unsigned long ea, int nb,
503 struct pt_regs *regs)
505 if (!address_ok(regs, ea, nb))
507 if ((ea & (nb - 1)) == 0)
508 return write_mem_aligned(val, ea, nb, regs);
509 return write_mem_unaligned(val, ea, nb, regs);
511 NOKPROBE_SYMBOL(write_mem);
513 #ifdef CONFIG_PPC_FPU
515 * These access either the real FP register or the image in the
516 * thread_struct, depending on regs->msr & MSR_FP.
518 static int do_fp_load(struct instruction_op *op, unsigned long ea,
519 struct pt_regs *regs, bool cross_endian)
528 u8 b[2 * sizeof(double)];
531 nb = GETSIZE(op->type);
532 if (!address_ok(regs, ea, nb))
535 err = copy_mem_in(u.b, ea, nb, regs);
538 if (unlikely(cross_endian)) {
539 do_byte_reverse(u.b, min(nb, 8));
541 do_byte_reverse(&u.b[8], 8);
545 if (op->type & FPCONV)
546 conv_sp_to_dp(&u.f, &u.d[0]);
547 else if (op->type & SIGNEXT)
552 if (regs->msr & MSR_FP)
553 put_fpr(rn, &u.d[0]);
555 current->thread.TS_FPR(rn) = u.l[0];
559 if (regs->msr & MSR_FP)
560 put_fpr(rn, &u.d[1]);
562 current->thread.TS_FPR(rn) = u.l[1];
567 NOKPROBE_SYMBOL(do_fp_load);
569 static int do_fp_store(struct instruction_op *op, unsigned long ea,
570 struct pt_regs *regs, bool cross_endian)
578 u8 b[2 * sizeof(double)];
581 nb = GETSIZE(op->type);
582 if (!address_ok(regs, ea, nb))
586 if (regs->msr & MSR_FP)
587 get_fpr(rn, &u.d[0]);
589 u.l[0] = current->thread.TS_FPR(rn);
591 if (op->type & FPCONV)
592 conv_dp_to_sp(&u.d[0], &u.f);
598 if (regs->msr & MSR_FP)
599 get_fpr(rn, &u.d[1]);
601 u.l[1] = current->thread.TS_FPR(rn);
604 if (unlikely(cross_endian)) {
605 do_byte_reverse(u.b, min(nb, 8));
607 do_byte_reverse(&u.b[8], 8);
609 return copy_mem_out(u.b, ea, nb, regs);
611 NOKPROBE_SYMBOL(do_fp_store);
614 #ifdef CONFIG_ALTIVEC
615 /* For Altivec/VMX, no need to worry about alignment */
616 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
617 int size, struct pt_regs *regs,
623 u8 b[sizeof(__vector128)];
626 if (!address_ok(regs, ea & ~0xfUL, 16))
628 /* align to multiple of size */
630 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
633 if (unlikely(cross_endian))
634 do_byte_reverse(&u.b[ea & 0xf], size);
636 if (regs->msr & MSR_VEC)
639 current->thread.vr_state.vr[rn] = u.v;
644 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
645 int size, struct pt_regs *regs,
650 u8 b[sizeof(__vector128)];
653 if (!address_ok(regs, ea & ~0xfUL, 16))
655 /* align to multiple of size */
659 if (regs->msr & MSR_VEC)
662 u.v = current->thread.vr_state.vr[rn];
664 if (unlikely(cross_endian))
665 do_byte_reverse(&u.b[ea & 0xf], size);
666 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
668 #endif /* CONFIG_ALTIVEC */
671 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
672 int reg, bool cross_endian)
676 if (!address_ok(regs, ea, 16))
678 /* if aligned, should be atomic */
679 if ((ea & 0xf) == 0) {
680 err = do_lq(ea, ®s->gpr[reg]);
682 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs);
684 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs);
686 if (!err && unlikely(cross_endian))
687 do_byte_reverse(®s->gpr[reg], 16);
691 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
692 int reg, bool cross_endian)
695 unsigned long vals[2];
697 if (!address_ok(regs, ea, 16))
699 vals[0] = regs->gpr[reg];
700 vals[1] = regs->gpr[reg + 1];
701 if (unlikely(cross_endian))
702 do_byte_reverse(vals, 16);
704 /* if aligned, should be atomic */
706 return do_stq(ea, vals[0], vals[1]);
708 err = write_mem(vals[IS_LE], ea, 8, regs);
710 err = write_mem(vals[IS_BE], ea + 8, 8, regs);
713 #endif /* __powerpc64 */
716 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
717 const void *mem, bool rev)
721 const unsigned int *wp;
722 const unsigned short *hp;
723 const unsigned char *bp;
725 size = GETSIZE(op->type);
726 reg->d[0] = reg->d[1] = 0;
728 switch (op->element_size) {
732 /* whole vector; lxv[x] or lxvl[l] */
735 memcpy(reg, mem, size);
736 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
739 do_byte_reverse(reg, size);
742 /* scalar loads, lxvd2x, lxvdsx */
743 read_size = (size >= 8) ? 8 : size;
744 i = IS_LE ? 8 : 8 - read_size;
745 memcpy(®->b[i], mem, read_size);
747 do_byte_reverse(®->b[i], 8);
749 if (op->type & SIGNEXT) {
750 /* size == 4 is the only case here */
751 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
752 } else if (op->vsx_flags & VSX_FPCONV) {
754 conv_sp_to_dp(®->fp[1 + IS_LE],
760 unsigned long v = *(unsigned long *)(mem + 8);
761 reg->d[IS_BE] = !rev ? v : byterev_8(v);
762 } else if (op->vsx_flags & VSX_SPLAT)
763 reg->d[IS_BE] = reg->d[IS_LE];
769 for (j = 0; j < size / 4; ++j) {
770 i = IS_LE ? 3 - j : j;
771 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
773 if (op->vsx_flags & VSX_SPLAT) {
774 u32 val = reg->w[IS_LE ? 3 : 0];
776 i = IS_LE ? 3 - j : j;
784 for (j = 0; j < size / 2; ++j) {
785 i = IS_LE ? 7 - j : j;
786 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
792 for (j = 0; j < size; ++j) {
793 i = IS_LE ? 15 - j : j;
799 EXPORT_SYMBOL_GPL(emulate_vsx_load);
800 NOKPROBE_SYMBOL(emulate_vsx_load);
802 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
805 int size, write_size;
812 size = GETSIZE(op->type);
814 switch (op->element_size) {
820 /* reverse 32 bytes */
821 union vsx_reg buf32[2];
822 buf32[0].d[0] = byterev_8(reg[1].d[1]);
823 buf32[0].d[1] = byterev_8(reg[1].d[0]);
824 buf32[1].d[0] = byterev_8(reg[0].d[1]);
825 buf32[1].d[1] = byterev_8(reg[0].d[0]);
826 memcpy(mem, buf32, size);
828 memcpy(mem, reg, size);
832 /* stxv, stxvx, stxvl, stxvll */
835 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
838 /* reverse 16 bytes */
839 buf.d[0] = byterev_8(reg->d[1]);
840 buf.d[1] = byterev_8(reg->d[0]);
843 memcpy(mem, reg, size);
846 /* scalar stores, stxvd2x */
847 write_size = (size >= 8) ? 8 : size;
848 i = IS_LE ? 8 : 8 - write_size;
849 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
850 buf.d[0] = buf.d[1] = 0;
852 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
856 memcpy(mem, ®->b[i], write_size);
858 memcpy(mem + 8, ®->d[IS_BE], 8);
860 do_byte_reverse(mem, write_size);
862 do_byte_reverse(mem + 8, 8);
868 for (j = 0; j < size / 4; ++j) {
869 i = IS_LE ? 3 - j : j;
870 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
876 for (j = 0; j < size / 2; ++j) {
877 i = IS_LE ? 7 - j : j;
878 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
884 for (j = 0; j < size; ++j) {
885 i = IS_LE ? 15 - j : j;
891 EXPORT_SYMBOL_GPL(emulate_vsx_store);
892 NOKPROBE_SYMBOL(emulate_vsx_store);
894 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
895 unsigned long ea, struct pt_regs *regs,
899 int i, j, nr_vsx_regs;
901 union vsx_reg buf[2];
902 int size = GETSIZE(op->type);
904 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
907 nr_vsx_regs = max(1ul, size / sizeof(__vector128));
908 emulate_vsx_load(op, buf, mem, cross_endian);
911 /* FP regs + extensions */
912 if (regs->msr & MSR_FP) {
913 for (i = 0; i < nr_vsx_regs; i++) {
914 j = IS_LE ? nr_vsx_regs - i - 1 : i;
915 load_vsrn(reg + i, &buf[j].v);
918 for (i = 0; i < nr_vsx_regs; i++) {
919 j = IS_LE ? nr_vsx_regs - i - 1 : i;
920 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
921 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
925 if (regs->msr & MSR_VEC) {
926 for (i = 0; i < nr_vsx_regs; i++) {
927 j = IS_LE ? nr_vsx_regs - i - 1 : i;
928 load_vsrn(reg + i, &buf[j].v);
931 for (i = 0; i < nr_vsx_regs; i++) {
932 j = IS_LE ? nr_vsx_regs - i - 1 : i;
933 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
941 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
942 unsigned long ea, struct pt_regs *regs,
946 int i, j, nr_vsx_regs;
948 union vsx_reg buf[2];
949 int size = GETSIZE(op->type);
951 if (!address_ok(regs, ea, size))
954 nr_vsx_regs = max(1ul, size / sizeof(__vector128));
957 /* FP regs + extensions */
958 if (regs->msr & MSR_FP) {
959 for (i = 0; i < nr_vsx_regs; i++) {
960 j = IS_LE ? nr_vsx_regs - i - 1 : i;
961 store_vsrn(reg + i, &buf[j].v);
964 for (i = 0; i < nr_vsx_regs; i++) {
965 j = IS_LE ? nr_vsx_regs - i - 1 : i;
966 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
967 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
971 if (regs->msr & MSR_VEC) {
972 for (i = 0; i < nr_vsx_regs; i++) {
973 j = IS_LE ? nr_vsx_regs - i - 1 : i;
974 store_vsrn(reg + i, &buf[j].v);
977 for (i = 0; i < nr_vsx_regs; i++) {
978 j = IS_LE ? nr_vsx_regs - i - 1 : i;
979 buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
984 emulate_vsx_store(op, buf, mem, cross_endian);
985 return copy_mem_out(mem, ea, size, regs);
987 #endif /* CONFIG_VSX */
989 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
992 unsigned long i, size;
995 size = ppc64_caches.l1d.block_size;
996 if (!(regs->msr & MSR_64BIT))
999 size = L1_CACHE_BYTES;
1002 if (!address_ok(regs, ea, size))
1004 for (i = 0; i < size; i += sizeof(long)) {
1005 err = __put_user(0, (unsigned long __user *) (ea + i));
1013 NOKPROBE_SYMBOL(emulate_dcbz);
1015 #define __put_user_asmx(x, addr, err, op, cr) \
1016 __asm__ __volatile__( \
1018 ".machine power8\n" \
1019 "1: " op " %2,0,%3\n" \
1023 ".section .fixup,\"ax\"\n" \
1028 : "=r" (err), "=r" (cr) \
1029 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1031 #define __get_user_asmx(x, addr, err, op) \
1032 __asm__ __volatile__( \
1034 ".machine power8\n" \
1035 "1: "op" %1,0,%2\n" \
1038 ".section .fixup,\"ax\"\n" \
1043 : "=r" (err), "=r" (x) \
1044 : "r" (addr), "i" (-EFAULT), "0" (err))
1046 #define __cacheop_user_asmx(addr, err, op) \
1047 __asm__ __volatile__( \
1050 ".section .fixup,\"ax\"\n" \
1056 : "r" (addr), "i" (-EFAULT), "0" (err))
1058 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1059 struct instruction_op *op)
1064 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1065 #ifdef __powerpc64__
1066 if (!(regs->msr & MSR_64BIT))
1070 op->ccval |= 0x80000000;
1072 op->ccval |= 0x40000000;
1074 op->ccval |= 0x20000000;
1077 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1079 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1081 op->xerval |= XER_CA32;
1083 op->xerval &= ~XER_CA32;
1087 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1088 struct instruction_op *op, int rd,
1089 unsigned long val1, unsigned long val2,
1090 unsigned long carry_in)
1092 unsigned long val = val1 + val2;
1096 op->type = COMPUTE + SETREG + SETXER;
1099 #ifdef __powerpc64__
1100 if (!(regs->msr & MSR_64BIT)) {
1101 val = (unsigned int) val;
1102 val1 = (unsigned int) val1;
1105 op->xerval = regs->xer;
1106 if (val < val1 || (carry_in && val == val1))
1107 op->xerval |= XER_CA;
1109 op->xerval &= ~XER_CA;
1111 set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1112 (carry_in && (unsigned int)val == (unsigned int)val1));
1115 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1116 struct instruction_op *op,
1117 long v1, long v2, int crfld)
1119 unsigned int crval, shift;
1121 op->type = COMPUTE + SETCC;
1122 crval = (regs->xer >> 31) & 1; /* get SO bit */
1129 shift = (7 - crfld) * 4;
1130 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1133 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1134 struct instruction_op *op,
1136 unsigned long v2, int crfld)
1138 unsigned int crval, shift;
1140 op->type = COMPUTE + SETCC;
1141 crval = (regs->xer >> 31) & 1; /* get SO bit */
1148 shift = (7 - crfld) * 4;
1149 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1152 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1153 struct instruction_op *op,
1154 unsigned long v1, unsigned long v2)
1156 unsigned long long out_val, mask;
1160 for (i = 0; i < 8; i++) {
1161 mask = 0xffUL << (i * 8);
1162 if ((v1 & mask) == (v2 & mask))
1169 * The size parameter is used to adjust the equivalent popcnt instruction.
1170 * popcntb = 8, popcntw = 32, popcntd = 64
1172 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1173 struct instruction_op *op,
1174 unsigned long v1, int size)
1176 unsigned long long out = v1;
1178 out -= (out >> 1) & 0x5555555555555555ULL;
1179 out = (0x3333333333333333ULL & out) +
1180 (0x3333333333333333ULL & (out >> 2));
1181 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1183 if (size == 8) { /* popcntb */
1189 if (size == 32) { /* popcntw */
1190 op->val = out & 0x0000003f0000003fULL;
1194 out = (out + (out >> 32)) & 0x7f;
1195 op->val = out; /* popcntd */
1199 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1200 struct instruction_op *op,
1201 unsigned long v1, unsigned long v2)
1203 unsigned char perm, idx;
1207 for (i = 0; i < 8; i++) {
1208 idx = (v1 >> (i * 8)) & 0xff;
1210 if (v2 & PPC_BIT(idx))
1215 #endif /* CONFIG_PPC64 */
1217 * The size parameter adjusts the equivalent prty instruction.
1218 * prtyw = 32, prtyd = 64
1220 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1221 struct instruction_op *op,
1222 unsigned long v, int size)
1224 unsigned long long res = v ^ (v >> 8);
1227 if (size == 32) { /* prtyw */
1228 op->val = res & 0x0000000100000001ULL;
1233 op->val = res & 1; /*prtyd */
1236 static nokprobe_inline int trap_compare(long v1, long v2)
1246 if ((unsigned long)v1 < (unsigned long)v2)
1248 else if ((unsigned long)v1 > (unsigned long)v2)
1254 * Elements of 32-bit rotate and mask instructions.
1256 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1257 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1258 #ifdef __powerpc64__
1259 #define MASK64_L(mb) (~0UL >> (mb))
1260 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1261 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1262 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1264 #define DATA32(x) (x)
1266 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1269 * Decode an instruction, and return information about it in *op
1270 * without changing *regs.
1271 * Integer arithmetic and logical instructions, branches, and barrier
1272 * instructions can be emulated just using the information in *op.
1274 * Return value is 1 if the instruction can be emulated just by
1275 * updating *regs with the information in *op, -1 if we need the
1276 * GPRs but *regs doesn't contain the full register set, or 0
1279 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1280 struct ppc_inst instr)
1283 unsigned int suffixopcode, prefixtype, prefix_r;
1285 unsigned int opcode, ra, rb, rc, rd, spr, u;
1286 unsigned long int imm;
1287 unsigned long int val, val2;
1288 unsigned int mb, me, sh;
1289 unsigned int word, suffix;
1292 word = ppc_inst_val(instr);
1293 suffix = ppc_inst_suffix(instr);
1297 opcode = ppc_inst_primary_opcode(instr);
1301 imm = (signed short)(word & 0xfffc);
1302 if ((word & 2) == 0)
1304 op->val = truncate_if_32bit(regs->msr, imm);
1307 if (branch_taken(word, regs, op))
1308 op->type |= BRTAKEN;
1312 if ((word & 0xfe2) == 2)
1314 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1315 (word & 0xfe3) == 1) { /* scv */
1316 op->type = SYSCALL_VECTORED_0;
1317 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1318 goto unknown_opcode;
1324 op->type = BRANCH | BRTAKEN;
1325 imm = word & 0x03fffffc;
1326 if (imm & 0x02000000)
1328 if ((word & 2) == 0)
1330 op->val = truncate_if_32bit(regs->msr, imm);
1335 switch ((word >> 1) & 0x3ff) {
1337 op->type = COMPUTE + SETCC;
1338 rd = 7 - ((word >> 23) & 0x7);
1339 ra = 7 - ((word >> 18) & 0x7);
1342 val = (regs->ccr >> ra) & 0xf;
1343 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1347 case 528: /* bcctr */
1349 imm = (word & 0x400)? regs->ctr: regs->link;
1350 op->val = truncate_if_32bit(regs->msr, imm);
1353 if (branch_taken(word, regs, op))
1354 op->type |= BRTAKEN;
1357 case 18: /* rfid, scary */
1358 if (regs->msr & MSR_PR)
1363 case 150: /* isync */
1364 op->type = BARRIER | BARRIER_ISYNC;
1367 case 33: /* crnor */
1368 case 129: /* crandc */
1369 case 193: /* crxor */
1370 case 225: /* crnand */
1371 case 257: /* crand */
1372 case 289: /* creqv */
1373 case 417: /* crorc */
1374 case 449: /* cror */
1375 op->type = COMPUTE + SETCC;
1376 ra = (word >> 16) & 0x1f;
1377 rb = (word >> 11) & 0x1f;
1378 rd = (word >> 21) & 0x1f;
1379 ra = (regs->ccr >> (31 - ra)) & 1;
1380 rb = (regs->ccr >> (31 - rb)) & 1;
1381 val = (word >> (6 + ra * 2 + rb)) & 1;
1382 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1388 switch ((word >> 1) & 0x3ff) {
1389 case 598: /* sync */
1390 op->type = BARRIER + BARRIER_SYNC;
1391 #ifdef __powerpc64__
1392 switch ((word >> 21) & 3) {
1393 case 1: /* lwsync */
1394 op->type = BARRIER + BARRIER_LWSYNC;
1396 case 2: /* ptesync */
1397 op->type = BARRIER + BARRIER_PTESYNC;
1403 case 854: /* eieio */
1404 op->type = BARRIER + BARRIER_EIEIO;
1410 rd = (word >> 21) & 0x1f;
1411 ra = (word >> 16) & 0x1f;
1412 rb = (word >> 11) & 0x1f;
1413 rc = (word >> 6) & 0x1f;
1416 #ifdef __powerpc64__
1418 if (!cpu_has_feature(CPU_FTR_ARCH_31))
1419 goto unknown_opcode;
1421 prefix_r = GET_PREFIX_R(word);
1422 ra = GET_PREFIX_RA(suffix);
1423 rd = (suffix >> 21) & 0x1f;
1425 op->val = regs->gpr[rd];
1426 suffixopcode = get_op(suffix);
1427 prefixtype = (word >> 24) & 0x3;
1428 switch (prefixtype) {
1432 switch (suffixopcode) {
1433 case 14: /* paddi */
1434 op->type = COMPUTE | PREFIXED;
1435 op->val = mlsd_8lsd_ea(word, suffix, regs);
1441 if (rd & trap_compare(regs->gpr[ra], (short) word))
1446 if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1450 #ifdef __powerpc64__
1453 * There are very many instructions with this primary opcode
1454 * introduced in the ISA as early as v2.03. However, the ones
1455 * we currently emulate were all introduced with ISA 3.0
1457 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1458 goto unknown_opcode;
1460 switch (word & 0x3f) {
1461 case 48: /* maddhd */
1462 asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1463 "=r" (op->val) : "r" (regs->gpr[ra]),
1464 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1467 case 49: /* maddhdu */
1468 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1469 "=r" (op->val) : "r" (regs->gpr[ra]),
1470 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1473 case 51: /* maddld */
1474 asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1475 "=r" (op->val) : "r" (regs->gpr[ra]),
1476 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1481 * There are other instructions from ISA 3.0 with the same
1482 * primary opcode which do not have emulation support yet.
1484 goto unknown_opcode;
1488 op->val = regs->gpr[ra] * (short) word;
1491 case 8: /* subfic */
1493 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1496 case 10: /* cmpli */
1497 imm = (unsigned short) word;
1498 val = regs->gpr[ra];
1499 #ifdef __powerpc64__
1501 val = (unsigned int) val;
1503 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1508 val = regs->gpr[ra];
1509 #ifdef __powerpc64__
1513 do_cmp_signed(regs, op, val, imm, rd >> 2);
1516 case 12: /* addic */
1518 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1521 case 13: /* addic. */
1523 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1530 imm += regs->gpr[ra];
1534 case 15: /* addis */
1535 imm = ((short) word) << 16;
1537 imm += regs->gpr[ra];
1542 if (((word >> 1) & 0x1f) == 2) {
1544 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1545 goto unknown_opcode;
1546 imm = (short) (word & 0xffc1); /* d0 + d2 fields */
1547 imm |= (word >> 15) & 0x3e; /* d1 field */
1548 op->val = regs->nip + (imm << 16) + 4;
1554 case 20: /* rlwimi */
1555 mb = (word >> 6) & 0x1f;
1556 me = (word >> 1) & 0x1f;
1557 val = DATA32(regs->gpr[rd]);
1558 imm = MASK32(mb, me);
1559 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1562 case 21: /* rlwinm */
1563 mb = (word >> 6) & 0x1f;
1564 me = (word >> 1) & 0x1f;
1565 val = DATA32(regs->gpr[rd]);
1566 op->val = ROTATE(val, rb) & MASK32(mb, me);
1569 case 23: /* rlwnm */
1570 mb = (word >> 6) & 0x1f;
1571 me = (word >> 1) & 0x1f;
1572 rb = regs->gpr[rb] & 0x1f;
1573 val = DATA32(regs->gpr[rd]);
1574 op->val = ROTATE(val, rb) & MASK32(mb, me);
1578 op->val = regs->gpr[rd] | (unsigned short) word;
1579 goto logical_done_nocc;
1582 imm = (unsigned short) word;
1583 op->val = regs->gpr[rd] | (imm << 16);
1584 goto logical_done_nocc;
1587 op->val = regs->gpr[rd] ^ (unsigned short) word;
1588 goto logical_done_nocc;
1590 case 27: /* xoris */
1591 imm = (unsigned short) word;
1592 op->val = regs->gpr[rd] ^ (imm << 16);
1593 goto logical_done_nocc;
1595 case 28: /* andi. */
1596 op->val = regs->gpr[rd] & (unsigned short) word;
1598 goto logical_done_nocc;
1600 case 29: /* andis. */
1601 imm = (unsigned short) word;
1602 op->val = regs->gpr[rd] & (imm << 16);
1604 goto logical_done_nocc;
1606 #ifdef __powerpc64__
1608 mb = ((word >> 6) & 0x1f) | (word & 0x20);
1609 val = regs->gpr[rd];
1610 if ((word & 0x10) == 0) {
1611 sh = rb | ((word & 2) << 4);
1612 val = ROTATE(val, sh);
1613 switch ((word >> 2) & 3) {
1614 case 0: /* rldicl */
1615 val &= MASK64_L(mb);
1617 case 1: /* rldicr */
1618 val &= MASK64_R(mb);
1621 val &= MASK64(mb, 63 - sh);
1623 case 3: /* rldimi */
1624 imm = MASK64(mb, 63 - sh);
1625 val = (regs->gpr[ra] & ~imm) |
1631 sh = regs->gpr[rb] & 0x3f;
1632 val = ROTATE(val, sh);
1633 switch ((word >> 1) & 7) {
1635 op->val = val & MASK64_L(mb);
1638 op->val = val & MASK64_R(mb);
1643 op->type = UNKNOWN; /* illegal instruction */
1647 /* isel occupies 32 minor opcodes */
1648 if (((word >> 1) & 0x1f) == 15) {
1649 mb = (word >> 6) & 0x1f; /* bc field */
1650 val = (regs->ccr >> (31 - mb)) & 1;
1651 val2 = (ra) ? regs->gpr[ra] : 0;
1653 op->val = (val) ? val2 : regs->gpr[rb];
1657 switch ((word >> 1) & 0x3ff) {
1660 (rd & trap_compare((int)regs->gpr[ra],
1661 (int)regs->gpr[rb])))
1664 #ifdef __powerpc64__
1666 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1670 case 83: /* mfmsr */
1671 if (regs->msr & MSR_PR)
1676 case 146: /* mtmsr */
1677 if (regs->msr & MSR_PR)
1681 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1684 case 178: /* mtmsrd */
1685 if (regs->msr & MSR_PR)
1689 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1690 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1691 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1698 if ((word >> 20) & 1) {
1700 for (sh = 0; sh < 8; ++sh) {
1701 if (word & (0x80000 >> sh))
1706 op->val = regs->ccr & imm;
1709 case 128: /* setb */
1710 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1711 goto unknown_opcode;
1713 * 'ra' encodes the CR field number (bfa) in the top 3 bits.
1714 * Since each CR field is 4 bits,
1715 * we can simply mask off the bottom two bits (bfa * 4)
1716 * to yield the first bit in the CR field.
1719 /* 'val' stores bits of the CR field (bfa) */
1720 val = regs->ccr >> (CR0_SHIFT - ra);
1721 /* checks if the LT bit of CR field (bfa) is set */
1724 /* checks if the GT bit of CR field (bfa) is set */
1731 case 144: /* mtcrf */
1732 op->type = COMPUTE + SETCC;
1734 val = regs->gpr[rd];
1735 op->ccval = regs->ccr;
1736 for (sh = 0; sh < 8; ++sh) {
1737 if (word & (0x80000 >> sh))
1738 op->ccval = (op->ccval & ~imm) |
1744 case 339: /* mfspr */
1745 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1749 if (spr == SPRN_XER || spr == SPRN_LR ||
1754 case 467: /* mtspr */
1755 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1757 op->val = regs->gpr[rd];
1759 if (spr == SPRN_XER || spr == SPRN_LR ||
1765 * Compare instructions
1768 val = regs->gpr[ra];
1769 val2 = regs->gpr[rb];
1770 #ifdef __powerpc64__
1771 if ((rd & 1) == 0) {
1772 /* word (32-bit) compare */
1777 do_cmp_signed(regs, op, val, val2, rd >> 2);
1781 val = regs->gpr[ra];
1782 val2 = regs->gpr[rb];
1783 #ifdef __powerpc64__
1784 if ((rd & 1) == 0) {
1785 /* word (32-bit) compare */
1786 val = (unsigned int) val;
1787 val2 = (unsigned int) val2;
1790 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1793 case 508: /* cmpb */
1794 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1795 goto logical_done_nocc;
1798 * Arithmetic instructions
1801 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1804 #ifdef __powerpc64__
1805 case 9: /* mulhdu */
1806 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1807 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1811 add_with_carry(regs, op, rd, regs->gpr[ra],
1815 case 11: /* mulhwu */
1816 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1817 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1821 op->val = regs->gpr[rb] - regs->gpr[ra];
1823 #ifdef __powerpc64__
1824 case 73: /* mulhd */
1825 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1826 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1829 case 75: /* mulhw */
1830 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1831 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1835 op->val = -regs->gpr[ra];
1838 case 136: /* subfe */
1839 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1840 regs->gpr[rb], regs->xer & XER_CA);
1843 case 138: /* adde */
1844 add_with_carry(regs, op, rd, regs->gpr[ra],
1845 regs->gpr[rb], regs->xer & XER_CA);
1848 case 200: /* subfze */
1849 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1850 regs->xer & XER_CA);
1853 case 202: /* addze */
1854 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1855 regs->xer & XER_CA);
1858 case 232: /* subfme */
1859 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1860 regs->xer & XER_CA);
1862 #ifdef __powerpc64__
1863 case 233: /* mulld */
1864 op->val = regs->gpr[ra] * regs->gpr[rb];
1867 case 234: /* addme */
1868 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1869 regs->xer & XER_CA);
1872 case 235: /* mullw */
1873 op->val = (long)(int) regs->gpr[ra] *
1874 (int) regs->gpr[rb];
1877 #ifdef __powerpc64__
1878 case 265: /* modud */
1879 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1880 goto unknown_opcode;
1881 op->val = regs->gpr[ra] % regs->gpr[rb];
1885 op->val = regs->gpr[ra] + regs->gpr[rb];
1888 case 267: /* moduw */
1889 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1890 goto unknown_opcode;
1891 op->val = (unsigned int) regs->gpr[ra] %
1892 (unsigned int) regs->gpr[rb];
1894 #ifdef __powerpc64__
1895 case 457: /* divdu */
1896 op->val = regs->gpr[ra] / regs->gpr[rb];
1899 case 459: /* divwu */
1900 op->val = (unsigned int) regs->gpr[ra] /
1901 (unsigned int) regs->gpr[rb];
1903 #ifdef __powerpc64__
1904 case 489: /* divd */
1905 op->val = (long int) regs->gpr[ra] /
1906 (long int) regs->gpr[rb];
1909 case 491: /* divw */
1910 op->val = (int) regs->gpr[ra] /
1911 (int) regs->gpr[rb];
1913 #ifdef __powerpc64__
1914 case 425: /* divde[.] */
1915 asm volatile(PPC_DIVDE(%0, %1, %2) :
1916 "=r" (op->val) : "r" (regs->gpr[ra]),
1917 "r" (regs->gpr[rb]));
1919 case 393: /* divdeu[.] */
1920 asm volatile(PPC_DIVDEU(%0, %1, %2) :
1921 "=r" (op->val) : "r" (regs->gpr[ra]),
1922 "r" (regs->gpr[rb]));
1925 case 755: /* darn */
1926 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1927 goto unknown_opcode;
1930 /* 32-bit conditioned */
1931 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1935 /* 64-bit conditioned */
1936 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1941 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1945 goto unknown_opcode;
1946 #ifdef __powerpc64__
1947 case 777: /* modsd */
1948 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1949 goto unknown_opcode;
1950 op->val = (long int) regs->gpr[ra] %
1951 (long int) regs->gpr[rb];
1954 case 779: /* modsw */
1955 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1956 goto unknown_opcode;
1957 op->val = (int) regs->gpr[ra] %
1958 (int) regs->gpr[rb];
1963 * Logical instructions
1965 case 26: /* cntlzw */
1966 val = (unsigned int) regs->gpr[rd];
1967 op->val = ( val ? __builtin_clz(val) : 32 );
1969 #ifdef __powerpc64__
1970 case 58: /* cntlzd */
1971 val = regs->gpr[rd];
1972 op->val = ( val ? __builtin_clzl(val) : 64 );
1976 op->val = regs->gpr[rd] & regs->gpr[rb];
1980 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1983 case 122: /* popcntb */
1984 do_popcnt(regs, op, regs->gpr[rd], 8);
1985 goto logical_done_nocc;
1988 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1991 case 154: /* prtyw */
1992 do_prty(regs, op, regs->gpr[rd], 32);
1993 goto logical_done_nocc;
1995 case 186: /* prtyd */
1996 do_prty(regs, op, regs->gpr[rd], 64);
1997 goto logical_done_nocc;
1999 case 252: /* bpermd */
2000 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
2001 goto logical_done_nocc;
2004 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
2008 op->val = regs->gpr[rd] ^ regs->gpr[rb];
2011 case 378: /* popcntw */
2012 do_popcnt(regs, op, regs->gpr[rd], 32);
2013 goto logical_done_nocc;
2016 op->val = regs->gpr[rd] | ~regs->gpr[rb];
2020 op->val = regs->gpr[rd] | regs->gpr[rb];
2023 case 476: /* nand */
2024 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2027 case 506: /* popcntd */
2028 do_popcnt(regs, op, regs->gpr[rd], 64);
2029 goto logical_done_nocc;
2031 case 538: /* cnttzw */
2032 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2033 goto unknown_opcode;
2034 val = (unsigned int) regs->gpr[rd];
2035 op->val = (val ? __builtin_ctz(val) : 32);
2037 #ifdef __powerpc64__
2038 case 570: /* cnttzd */
2039 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2040 goto unknown_opcode;
2041 val = regs->gpr[rd];
2042 op->val = (val ? __builtin_ctzl(val) : 64);
2045 case 922: /* extsh */
2046 op->val = (signed short) regs->gpr[rd];
2049 case 954: /* extsb */
2050 op->val = (signed char) regs->gpr[rd];
2052 #ifdef __powerpc64__
2053 case 986: /* extsw */
2054 op->val = (signed int) regs->gpr[rd];
2059 * Shift instructions
2062 sh = regs->gpr[rb] & 0x3f;
2064 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2070 sh = regs->gpr[rb] & 0x3f;
2072 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2077 case 792: /* sraw */
2078 op->type = COMPUTE + SETREG + SETXER;
2079 sh = regs->gpr[rb] & 0x3f;
2080 ival = (signed int) regs->gpr[rd];
2081 op->val = ival >> (sh < 32 ? sh : 31);
2082 op->xerval = regs->xer;
2083 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2084 op->xerval |= XER_CA;
2086 op->xerval &= ~XER_CA;
2087 set_ca32(op, op->xerval & XER_CA);
2090 case 824: /* srawi */
2091 op->type = COMPUTE + SETREG + SETXER;
2093 ival = (signed int) regs->gpr[rd];
2094 op->val = ival >> sh;
2095 op->xerval = regs->xer;
2096 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2097 op->xerval |= XER_CA;
2099 op->xerval &= ~XER_CA;
2100 set_ca32(op, op->xerval & XER_CA);
2103 #ifdef __powerpc64__
2105 sh = regs->gpr[rb] & 0x7f;
2107 op->val = regs->gpr[rd] << sh;
2113 sh = regs->gpr[rb] & 0x7f;
2115 op->val = regs->gpr[rd] >> sh;
2120 case 794: /* srad */
2121 op->type = COMPUTE + SETREG + SETXER;
2122 sh = regs->gpr[rb] & 0x7f;
2123 ival = (signed long int) regs->gpr[rd];
2124 op->val = ival >> (sh < 64 ? sh : 63);
2125 op->xerval = regs->xer;
2126 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2127 op->xerval |= XER_CA;
2129 op->xerval &= ~XER_CA;
2130 set_ca32(op, op->xerval & XER_CA);
2133 case 826: /* sradi with sh_5 = 0 */
2134 case 827: /* sradi with sh_5 = 1 */
2135 op->type = COMPUTE + SETREG + SETXER;
2136 sh = rb | ((word & 2) << 4);
2137 ival = (signed long int) regs->gpr[rd];
2138 op->val = ival >> sh;
2139 op->xerval = regs->xer;
2140 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2141 op->xerval |= XER_CA;
2143 op->xerval &= ~XER_CA;
2144 set_ca32(op, op->xerval & XER_CA);
2147 case 890: /* extswsli with sh_5 = 0 */
2148 case 891: /* extswsli with sh_5 = 1 */
2149 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2150 goto unknown_opcode;
2151 op->type = COMPUTE + SETREG;
2152 sh = rb | ((word & 2) << 4);
2153 val = (signed int) regs->gpr[rd];
2155 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2160 #endif /* __powerpc64__ */
2163 * Cache instructions
2165 case 54: /* dcbst */
2166 op->type = MKOP(CACHEOP, DCBST, 0);
2167 op->ea = xform_ea(word, regs);
2171 op->type = MKOP(CACHEOP, DCBF, 0);
2172 op->ea = xform_ea(word, regs);
2175 case 246: /* dcbtst */
2176 op->type = MKOP(CACHEOP, DCBTST, 0);
2177 op->ea = xform_ea(word, regs);
2181 case 278: /* dcbt */
2182 op->type = MKOP(CACHEOP, DCBTST, 0);
2183 op->ea = xform_ea(word, regs);
2187 case 982: /* icbi */
2188 op->type = MKOP(CACHEOP, ICBI, 0);
2189 op->ea = xform_ea(word, regs);
2192 case 1014: /* dcbz */
2193 op->type = MKOP(CACHEOP, DCBZ, 0);
2194 op->ea = xform_ea(word, regs);
2204 op->update_reg = ra;
2206 op->val = regs->gpr[rd];
2207 u = (word >> 20) & UPDATE;
2213 op->ea = xform_ea(word, regs);
2214 switch ((word >> 1) & 0x3ff) {
2215 case 20: /* lwarx */
2216 op->type = MKOP(LARX, 0, 4);
2219 case 150: /* stwcx. */
2220 op->type = MKOP(STCX, 0, 4);
2223 #ifdef __powerpc64__
2224 case 84: /* ldarx */
2225 op->type = MKOP(LARX, 0, 8);
2228 case 214: /* stdcx. */
2229 op->type = MKOP(STCX, 0, 8);
2232 case 52: /* lbarx */
2233 op->type = MKOP(LARX, 0, 1);
2236 case 694: /* stbcx. */
2237 op->type = MKOP(STCX, 0, 1);
2240 case 116: /* lharx */
2241 op->type = MKOP(LARX, 0, 2);
2244 case 726: /* sthcx. */
2245 op->type = MKOP(STCX, 0, 2);
2248 case 276: /* lqarx */
2249 if (!((rd & 1) || rd == ra || rd == rb))
2250 op->type = MKOP(LARX, 0, 16);
2253 case 182: /* stqcx. */
2255 op->type = MKOP(STCX, 0, 16);
2260 case 55: /* lwzux */
2261 op->type = MKOP(LOAD, u, 4);
2265 case 119: /* lbzux */
2266 op->type = MKOP(LOAD, u, 1);
2269 #ifdef CONFIG_ALTIVEC
2271 * Note: for the load/store vector element instructions,
2272 * bits of the EA say which field of the VMX register to use.
2275 op->type = MKOP(LOAD_VMX, 0, 1);
2276 op->element_size = 1;
2279 case 39: /* lvehx */
2280 op->type = MKOP(LOAD_VMX, 0, 2);
2281 op->element_size = 2;
2284 case 71: /* lvewx */
2285 op->type = MKOP(LOAD_VMX, 0, 4);
2286 op->element_size = 4;
2290 case 359: /* lvxl */
2291 op->type = MKOP(LOAD_VMX, 0, 16);
2292 op->element_size = 16;
2295 case 135: /* stvebx */
2296 op->type = MKOP(STORE_VMX, 0, 1);
2297 op->element_size = 1;
2300 case 167: /* stvehx */
2301 op->type = MKOP(STORE_VMX, 0, 2);
2302 op->element_size = 2;
2305 case 199: /* stvewx */
2306 op->type = MKOP(STORE_VMX, 0, 4);
2307 op->element_size = 4;
2310 case 231: /* stvx */
2311 case 487: /* stvxl */
2312 op->type = MKOP(STORE_VMX, 0, 16);
2314 #endif /* CONFIG_ALTIVEC */
2316 #ifdef __powerpc64__
2319 op->type = MKOP(LOAD, u, 8);
2322 case 149: /* stdx */
2323 case 181: /* stdux */
2324 op->type = MKOP(STORE, u, 8);
2328 case 151: /* stwx */
2329 case 183: /* stwux */
2330 op->type = MKOP(STORE, u, 4);
2333 case 215: /* stbx */
2334 case 247: /* stbux */
2335 op->type = MKOP(STORE, u, 1);
2338 case 279: /* lhzx */
2339 case 311: /* lhzux */
2340 op->type = MKOP(LOAD, u, 2);
2343 #ifdef __powerpc64__
2344 case 341: /* lwax */
2345 case 373: /* lwaux */
2346 op->type = MKOP(LOAD, SIGNEXT | u, 4);
2350 case 343: /* lhax */
2351 case 375: /* lhaux */
2352 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2355 case 407: /* sthx */
2356 case 439: /* sthux */
2357 op->type = MKOP(STORE, u, 2);
2360 #ifdef __powerpc64__
2361 case 532: /* ldbrx */
2362 op->type = MKOP(LOAD, BYTEREV, 8);
2366 case 533: /* lswx */
2367 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2370 case 534: /* lwbrx */
2371 op->type = MKOP(LOAD, BYTEREV, 4);
2374 case 597: /* lswi */
2376 rb = 32; /* # bytes to load */
2377 op->type = MKOP(LOAD_MULTI, 0, rb);
2378 op->ea = ra ? regs->gpr[ra] : 0;
2381 #ifdef CONFIG_PPC_FPU
2382 case 535: /* lfsx */
2383 case 567: /* lfsux */
2384 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2387 case 599: /* lfdx */
2388 case 631: /* lfdux */
2389 op->type = MKOP(LOAD_FP, u, 8);
2392 case 663: /* stfsx */
2393 case 695: /* stfsux */
2394 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2397 case 727: /* stfdx */
2398 case 759: /* stfdux */
2399 op->type = MKOP(STORE_FP, u, 8);
2402 #ifdef __powerpc64__
2403 case 791: /* lfdpx */
2404 op->type = MKOP(LOAD_FP, 0, 16);
2407 case 855: /* lfiwax */
2408 op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2411 case 887: /* lfiwzx */
2412 op->type = MKOP(LOAD_FP, 0, 4);
2415 case 919: /* stfdpx */
2416 op->type = MKOP(STORE_FP, 0, 16);
2419 case 983: /* stfiwx */
2420 op->type = MKOP(STORE_FP, 0, 4);
2422 #endif /* __powerpc64 */
2423 #endif /* CONFIG_PPC_FPU */
2425 #ifdef __powerpc64__
2426 case 660: /* stdbrx */
2427 op->type = MKOP(STORE, BYTEREV, 8);
2428 op->val = byterev_8(regs->gpr[rd]);
2432 case 661: /* stswx */
2433 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2436 case 662: /* stwbrx */
2437 op->type = MKOP(STORE, BYTEREV, 4);
2438 op->val = byterev_4(regs->gpr[rd]);
2441 case 725: /* stswi */
2443 rb = 32; /* # bytes to store */
2444 op->type = MKOP(STORE_MULTI, 0, rb);
2445 op->ea = ra ? regs->gpr[ra] : 0;
2448 case 790: /* lhbrx */
2449 op->type = MKOP(LOAD, BYTEREV, 2);
2452 case 918: /* sthbrx */
2453 op->type = MKOP(STORE, BYTEREV, 2);
2454 op->val = byterev_2(regs->gpr[rd]);
2458 case 12: /* lxsiwzx */
2459 op->reg = rd | ((word & 1) << 5);
2460 op->type = MKOP(LOAD_VSX, 0, 4);
2461 op->element_size = 8;
2464 case 76: /* lxsiwax */
2465 op->reg = rd | ((word & 1) << 5);
2466 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2467 op->element_size = 8;
2470 case 140: /* stxsiwx */
2471 op->reg = rd | ((word & 1) << 5);
2472 op->type = MKOP(STORE_VSX, 0, 4);
2473 op->element_size = 8;
2476 case 268: /* lxvx */
2477 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2478 goto unknown_opcode;
2479 op->reg = rd | ((word & 1) << 5);
2480 op->type = MKOP(LOAD_VSX, 0, 16);
2481 op->element_size = 16;
2482 op->vsx_flags = VSX_CHECK_VEC;
2485 case 269: /* lxvl */
2486 case 301: { /* lxvll */
2488 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2489 goto unknown_opcode;
2490 op->reg = rd | ((word & 1) << 5);
2491 op->ea = ra ? regs->gpr[ra] : 0;
2492 nb = regs->gpr[rb] & 0xff;
2495 op->type = MKOP(LOAD_VSX, 0, nb);
2496 op->element_size = 16;
2497 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2501 case 332: /* lxvdsx */
2502 op->reg = rd | ((word & 1) << 5);
2503 op->type = MKOP(LOAD_VSX, 0, 8);
2504 op->element_size = 8;
2505 op->vsx_flags = VSX_SPLAT;
2508 case 333: /* lxvpx */
2509 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2510 goto unknown_opcode;
2511 op->reg = VSX_REGISTER_XTP(rd);
2512 op->type = MKOP(LOAD_VSX, 0, 32);
2513 op->element_size = 32;
2516 case 364: /* lxvwsx */
2517 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2518 goto unknown_opcode;
2519 op->reg = rd | ((word & 1) << 5);
2520 op->type = MKOP(LOAD_VSX, 0, 4);
2521 op->element_size = 4;
2522 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2525 case 396: /* stxvx */
2526 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2527 goto unknown_opcode;
2528 op->reg = rd | ((word & 1) << 5);
2529 op->type = MKOP(STORE_VSX, 0, 16);
2530 op->element_size = 16;
2531 op->vsx_flags = VSX_CHECK_VEC;
2534 case 397: /* stxvl */
2535 case 429: { /* stxvll */
2537 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2538 goto unknown_opcode;
2539 op->reg = rd | ((word & 1) << 5);
2540 op->ea = ra ? regs->gpr[ra] : 0;
2541 nb = regs->gpr[rb] & 0xff;
2544 op->type = MKOP(STORE_VSX, 0, nb);
2545 op->element_size = 16;
2546 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2550 case 461: /* stxvpx */
2551 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2552 goto unknown_opcode;
2553 op->reg = VSX_REGISTER_XTP(rd);
2554 op->type = MKOP(STORE_VSX, 0, 32);
2555 op->element_size = 32;
2557 case 524: /* lxsspx */
2558 op->reg = rd | ((word & 1) << 5);
2559 op->type = MKOP(LOAD_VSX, 0, 4);
2560 op->element_size = 8;
2561 op->vsx_flags = VSX_FPCONV;
2564 case 588: /* lxsdx */
2565 op->reg = rd | ((word & 1) << 5);
2566 op->type = MKOP(LOAD_VSX, 0, 8);
2567 op->element_size = 8;
2570 case 652: /* stxsspx */
2571 op->reg = rd | ((word & 1) << 5);
2572 op->type = MKOP(STORE_VSX, 0, 4);
2573 op->element_size = 8;
2574 op->vsx_flags = VSX_FPCONV;
2577 case 716: /* stxsdx */
2578 op->reg = rd | ((word & 1) << 5);
2579 op->type = MKOP(STORE_VSX, 0, 8);
2580 op->element_size = 8;
2583 case 780: /* lxvw4x */
2584 op->reg = rd | ((word & 1) << 5);
2585 op->type = MKOP(LOAD_VSX, 0, 16);
2586 op->element_size = 4;
2589 case 781: /* lxsibzx */
2590 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2591 goto unknown_opcode;
2592 op->reg = rd | ((word & 1) << 5);
2593 op->type = MKOP(LOAD_VSX, 0, 1);
2594 op->element_size = 8;
2595 op->vsx_flags = VSX_CHECK_VEC;
2598 case 812: /* lxvh8x */
2599 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2600 goto unknown_opcode;
2601 op->reg = rd | ((word & 1) << 5);
2602 op->type = MKOP(LOAD_VSX, 0, 16);
2603 op->element_size = 2;
2604 op->vsx_flags = VSX_CHECK_VEC;
2607 case 813: /* lxsihzx */
2608 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2609 goto unknown_opcode;
2610 op->reg = rd | ((word & 1) << 5);
2611 op->type = MKOP(LOAD_VSX, 0, 2);
2612 op->element_size = 8;
2613 op->vsx_flags = VSX_CHECK_VEC;
2616 case 844: /* lxvd2x */
2617 op->reg = rd | ((word & 1) << 5);
2618 op->type = MKOP(LOAD_VSX, 0, 16);
2619 op->element_size = 8;
2622 case 876: /* lxvb16x */
2623 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2624 goto unknown_opcode;
2625 op->reg = rd | ((word & 1) << 5);
2626 op->type = MKOP(LOAD_VSX, 0, 16);
2627 op->element_size = 1;
2628 op->vsx_flags = VSX_CHECK_VEC;
2631 case 908: /* stxvw4x */
2632 op->reg = rd | ((word & 1) << 5);
2633 op->type = MKOP(STORE_VSX, 0, 16);
2634 op->element_size = 4;
2637 case 909: /* stxsibx */
2638 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2639 goto unknown_opcode;
2640 op->reg = rd | ((word & 1) << 5);
2641 op->type = MKOP(STORE_VSX, 0, 1);
2642 op->element_size = 8;
2643 op->vsx_flags = VSX_CHECK_VEC;
2646 case 940: /* stxvh8x */
2647 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2648 goto unknown_opcode;
2649 op->reg = rd | ((word & 1) << 5);
2650 op->type = MKOP(STORE_VSX, 0, 16);
2651 op->element_size = 2;
2652 op->vsx_flags = VSX_CHECK_VEC;
2655 case 941: /* stxsihx */
2656 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2657 goto unknown_opcode;
2658 op->reg = rd | ((word & 1) << 5);
2659 op->type = MKOP(STORE_VSX, 0, 2);
2660 op->element_size = 8;
2661 op->vsx_flags = VSX_CHECK_VEC;
2664 case 972: /* stxvd2x */
2665 op->reg = rd | ((word & 1) << 5);
2666 op->type = MKOP(STORE_VSX, 0, 16);
2667 op->element_size = 8;
2670 case 1004: /* stxvb16x */
2671 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2672 goto unknown_opcode;
2673 op->reg = rd | ((word & 1) << 5);
2674 op->type = MKOP(STORE_VSX, 0, 16);
2675 op->element_size = 1;
2676 op->vsx_flags = VSX_CHECK_VEC;
2679 #endif /* CONFIG_VSX */
2685 op->type = MKOP(LOAD, u, 4);
2686 op->ea = dform_ea(word, regs);
2691 op->type = MKOP(LOAD, u, 1);
2692 op->ea = dform_ea(word, regs);
2697 op->type = MKOP(STORE, u, 4);
2698 op->ea = dform_ea(word, regs);
2703 op->type = MKOP(STORE, u, 1);
2704 op->ea = dform_ea(word, regs);
2709 op->type = MKOP(LOAD, u, 2);
2710 op->ea = dform_ea(word, regs);
2715 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2716 op->ea = dform_ea(word, regs);
2721 op->type = MKOP(STORE, u, 2);
2722 op->ea = dform_ea(word, regs);
2727 break; /* invalid form, ra in range to load */
2728 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2729 op->ea = dform_ea(word, regs);
2733 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2734 op->ea = dform_ea(word, regs);
2737 #ifdef CONFIG_PPC_FPU
2740 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2741 op->ea = dform_ea(word, regs);
2746 op->type = MKOP(LOAD_FP, u, 8);
2747 op->ea = dform_ea(word, regs);
2751 case 53: /* stfsu */
2752 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2753 op->ea = dform_ea(word, regs);
2757 case 55: /* stfdu */
2758 op->type = MKOP(STORE_FP, u, 8);
2759 op->ea = dform_ea(word, regs);
2763 #ifdef __powerpc64__
2765 if (!((rd & 1) || (rd == ra)))
2766 op->type = MKOP(LOAD, 0, 16);
2767 op->ea = dqform_ea(word, regs);
2772 case 57: /* lfdp, lxsd, lxssp */
2773 op->ea = dsform_ea(word, regs);
2777 break; /* reg must be even */
2778 op->type = MKOP(LOAD_FP, 0, 16);
2781 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2782 goto unknown_opcode;
2784 op->type = MKOP(LOAD_VSX, 0, 8);
2785 op->element_size = 8;
2786 op->vsx_flags = VSX_CHECK_VEC;
2789 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2790 goto unknown_opcode;
2792 op->type = MKOP(LOAD_VSX, 0, 4);
2793 op->element_size = 8;
2794 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2798 #endif /* CONFIG_VSX */
2800 #ifdef __powerpc64__
2801 case 58: /* ld[u], lwa */
2802 op->ea = dsform_ea(word, regs);
2805 op->type = MKOP(LOAD, 0, 8);
2808 op->type = MKOP(LOAD, UPDATE, 8);
2811 op->type = MKOP(LOAD, SIGNEXT, 4);
2819 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2820 goto unknown_opcode;
2821 op->ea = dqform_ea(word, regs);
2822 op->reg = VSX_REGISTER_XTP(rd);
2823 op->element_size = 32;
2824 switch (word & 0xf) {
2826 op->type = MKOP(LOAD_VSX, 0, 32);
2829 op->type = MKOP(STORE_VSX, 0, 32);
2834 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2836 case 0: /* stfdp with LSB of DS field = 0 */
2837 case 4: /* stfdp with LSB of DS field = 1 */
2838 op->ea = dsform_ea(word, regs);
2839 op->type = MKOP(STORE_FP, 0, 16);
2843 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2844 goto unknown_opcode;
2845 op->ea = dqform_ea(word, regs);
2848 op->type = MKOP(LOAD_VSX, 0, 16);
2849 op->element_size = 16;
2850 op->vsx_flags = VSX_CHECK_VEC;
2853 case 2: /* stxsd with LSB of DS field = 0 */
2854 case 6: /* stxsd with LSB of DS field = 1 */
2855 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2856 goto unknown_opcode;
2857 op->ea = dsform_ea(word, regs);
2859 op->type = MKOP(STORE_VSX, 0, 8);
2860 op->element_size = 8;
2861 op->vsx_flags = VSX_CHECK_VEC;
2864 case 3: /* stxssp with LSB of DS field = 0 */
2865 case 7: /* stxssp with LSB of DS field = 1 */
2866 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2867 goto unknown_opcode;
2868 op->ea = dsform_ea(word, regs);
2870 op->type = MKOP(STORE_VSX, 0, 4);
2871 op->element_size = 8;
2872 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2876 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2877 goto unknown_opcode;
2878 op->ea = dqform_ea(word, regs);
2881 op->type = MKOP(STORE_VSX, 0, 16);
2882 op->element_size = 16;
2883 op->vsx_flags = VSX_CHECK_VEC;
2887 #endif /* CONFIG_VSX */
2889 #ifdef __powerpc64__
2890 case 62: /* std[u] */
2891 op->ea = dsform_ea(word, regs);
2894 op->type = MKOP(STORE, 0, 8);
2897 op->type = MKOP(STORE, UPDATE, 8);
2901 op->type = MKOP(STORE, 0, 16);
2905 case 1: /* Prefixed instructions */
2906 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2907 goto unknown_opcode;
2909 prefix_r = GET_PREFIX_R(word);
2910 ra = GET_PREFIX_RA(suffix);
2911 op->update_reg = ra;
2912 rd = (suffix >> 21) & 0x1f;
2914 op->val = regs->gpr[rd];
2916 suffixopcode = get_op(suffix);
2917 prefixtype = (word >> 24) & 0x3;
2918 switch (prefixtype) {
2919 case 0: /* Type 00 Eight-Byte Load/Store */
2922 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2923 switch (suffixopcode) {
2925 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2928 case 42: /* plxsd */
2930 op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2931 op->element_size = 8;
2932 op->vsx_flags = VSX_CHECK_VEC;
2934 case 43: /* plxssp */
2936 op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2937 op->element_size = 8;
2938 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2940 case 46: /* pstxsd */
2942 op->type = MKOP(STORE_VSX, PREFIXED, 8);
2943 op->element_size = 8;
2944 op->vsx_flags = VSX_CHECK_VEC;
2946 case 47: /* pstxssp */
2948 op->type = MKOP(STORE_VSX, PREFIXED, 4);
2949 op->element_size = 8;
2950 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2952 case 51: /* plxv1 */
2955 case 50: /* plxv0 */
2956 op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2957 op->element_size = 16;
2958 op->vsx_flags = VSX_CHECK_VEC;
2960 case 55: /* pstxv1 */
2963 case 54: /* pstxv0 */
2964 op->type = MKOP(STORE_VSX, PREFIXED, 16);
2965 op->element_size = 16;
2966 op->vsx_flags = VSX_CHECK_VEC;
2968 #endif /* CONFIG_VSX */
2970 op->type = MKOP(LOAD, PREFIXED, 16);
2973 op->type = MKOP(LOAD, PREFIXED, 8);
2976 case 58: /* plxvp */
2977 op->reg = VSX_REGISTER_XTP(rd);
2978 op->type = MKOP(LOAD_VSX, PREFIXED, 32);
2979 op->element_size = 32;
2981 #endif /* CONFIG_VSX */
2983 op->type = MKOP(STORE, PREFIXED, 16);
2986 op->type = MKOP(STORE, PREFIXED, 8);
2989 case 62: /* pstxvp */
2990 op->reg = VSX_REGISTER_XTP(rd);
2991 op->type = MKOP(STORE_VSX, PREFIXED, 32);
2992 op->element_size = 32;
2994 #endif /* CONFIG_VSX */
2997 case 1: /* Type 01 Eight-Byte Register-to-Register */
2999 case 2: /* Type 10 Modified Load/Store */
3002 op->ea = mlsd_8lsd_ea(word, suffix, regs);
3003 switch (suffixopcode) {
3005 op->type = MKOP(LOAD, PREFIXED, 4);
3008 op->type = MKOP(LOAD, PREFIXED, 1);
3011 op->type = MKOP(STORE, PREFIXED, 4);
3014 op->type = MKOP(STORE, PREFIXED, 1);
3017 op->type = MKOP(LOAD, PREFIXED, 2);
3020 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
3023 op->type = MKOP(STORE, PREFIXED, 2);
3026 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3029 op->type = MKOP(LOAD_FP, PREFIXED, 8);
3031 case 52: /* pstfs */
3032 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3034 case 54: /* pstfd */
3035 op->type = MKOP(STORE_FP, PREFIXED, 8);
3039 case 3: /* Type 11 Modified Register-to-Register */
3042 #endif /* __powerpc64__ */
3046 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3047 switch (GETTYPE(op->type)) {
3050 goto unknown_opcode;
3056 goto unknown_opcode;
3061 if ((GETTYPE(op->type) == LOAD_VSX ||
3062 GETTYPE(op->type) == STORE_VSX) &&
3063 !cpu_has_feature(CPU_FTR_VSX)) {
3066 #endif /* CONFIG_VSX */
3091 op->type = INTERRUPT | 0x700;
3092 op->val = SRR1_PROGPRIV;
3096 op->type = INTERRUPT | 0x700;
3097 op->val = SRR1_PROGTRAP;
3100 EXPORT_SYMBOL_GPL(analyse_instr);
3101 NOKPROBE_SYMBOL(analyse_instr);
3104 * For PPC32 we always use stwu with r1 to change the stack pointer.
3105 * So this emulated store may corrupt the exception frame, now we
3106 * have to provide the exception frame trampoline, which is pushed
3107 * below the kprobed function stack. So we only update gpr[1] but
3108 * don't emulate the real store operation. We will do real store
3109 * operation safely in exception return code by checking this flag.
3111 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3114 * Check if we already set since that means we'll
3115 * lose the previous value.
3117 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3118 set_thread_flag(TIF_EMULATE_STACK_STORE);
3122 static nokprobe_inline void do_signext(unsigned long *valp, int size)
3126 *valp = (signed short) *valp;
3129 *valp = (signed int) *valp;
3134 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3138 *valp = byterev_2(*valp);
3141 *valp = byterev_4(*valp);
3143 #ifdef __powerpc64__
3145 *valp = byterev_8(*valp);
3152 * Emulate an instruction that can be executed just by updating
3155 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3157 unsigned long next_pc;
3159 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3160 switch (GETTYPE(op->type)) {
3162 if (op->type & SETREG)
3163 regs->gpr[op->reg] = op->val;
3164 if (op->type & SETCC)
3165 regs->ccr = op->ccval;
3166 if (op->type & SETXER)
3167 regs->xer = op->xerval;
3171 if (op->type & SETLK)
3172 regs->link = next_pc;
3173 if (op->type & BRTAKEN)
3175 if (op->type & DECCTR)
3180 switch (op->type & BARRIER_MASK) {
3191 case BARRIER_LWSYNC:
3192 asm volatile("lwsync" : : : "memory");
3194 case BARRIER_PTESYNC:
3195 asm volatile("ptesync" : : : "memory");
3204 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3207 regs->gpr[op->reg] = regs->link;
3210 regs->gpr[op->reg] = regs->ctr;
3220 regs->xer = op->val & 0xffffffffUL;
3223 regs->link = op->val;
3226 regs->ctr = op->val;
3236 regs_set_return_ip(regs, next_pc);
3238 NOKPROBE_SYMBOL(emulate_update_regs);
3241 * Emulate a previously-analysed load or store instruction.
3242 * Return values are:
3243 * 0 = instruction emulated successfully
3244 * -EFAULT = address out of range or access faulted (regs->dar
3245 * contains the faulting address)
3246 * -EACCES = misaligned access, instruction requires alignment
3247 * -EINVAL = unknown operation in *op
3249 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3251 int err, size, type;
3259 size = GETSIZE(op->type);
3260 type = GETTYPE(op->type);
3261 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3262 ea = truncate_if_32bit(regs->msr, op->ea);
3266 if (ea & (size - 1))
3267 return -EACCES; /* can't handle misaligned */
3268 if (!address_ok(regs, ea, size))
3273 #ifdef __powerpc64__
3275 __get_user_asmx(val, ea, err, "lbarx");
3278 __get_user_asmx(val, ea, err, "lharx");
3282 __get_user_asmx(val, ea, err, "lwarx");
3284 #ifdef __powerpc64__
3286 __get_user_asmx(val, ea, err, "ldarx");
3289 err = do_lqarx(ea, ®s->gpr[op->reg]);
3300 regs->gpr[op->reg] = val;
3304 if (ea & (size - 1))
3305 return -EACCES; /* can't handle misaligned */
3306 if (!address_ok(regs, ea, size))
3310 #ifdef __powerpc64__
3312 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3315 __put_user_asmx(op->val, ea, err, "sthcx.", cr);
3319 __put_user_asmx(op->val, ea, err, "stwcx.", cr);
3321 #ifdef __powerpc64__
3323 __put_user_asmx(op->val, ea, err, "stdcx.", cr);
3326 err = do_stqcx(ea, regs->gpr[op->reg],
3327 regs->gpr[op->reg + 1], &cr);
3334 regs->ccr = (regs->ccr & 0x0fffffff) |
3336 ((regs->xer >> 3) & 0x10000000);
3342 #ifdef __powerpc64__
3344 err = emulate_lq(regs, ea, op->reg, cross_endian);
3348 err = read_mem(®s->gpr[op->reg], ea, size, regs);
3350 if (op->type & SIGNEXT)
3351 do_signext(®s->gpr[op->reg], size);
3352 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3353 do_byterev(®s->gpr[op->reg], size);
3357 #ifdef CONFIG_PPC_FPU
3360 * If the instruction is in userspace, we can emulate it even
3361 * if the VMX state is not live, because we have the state
3362 * stored in the thread_struct. If the instruction is in
3363 * the kernel, we must not touch the state in the thread_struct.
3365 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3367 err = do_fp_load(op, ea, regs, cross_endian);
3370 #ifdef CONFIG_ALTIVEC
3372 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3374 err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3379 unsigned long msrbit = MSR_VSX;
3382 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3383 * when the target of the instruction is a vector register.
3385 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3387 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3389 err = do_vsx_load(op, ea, regs, cross_endian);
3394 if (!address_ok(regs, ea, size))
3397 for (i = 0; i < size; i += 4) {
3398 unsigned int v32 = 0;
3403 err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3406 if (unlikely(cross_endian))
3407 v32 = byterev_4(v32);
3408 regs->gpr[rd] = v32;
3410 /* reg number wraps from 31 to 0 for lsw[ix] */
3411 rd = (rd + 1) & 0x1f;
3416 #ifdef __powerpc64__
3418 err = emulate_stq(regs, ea, op->reg, cross_endian);
3422 if ((op->type & UPDATE) && size == sizeof(long) &&
3423 op->reg == 1 && op->update_reg == 1 &&
3424 !(regs->msr & MSR_PR) &&
3425 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3426 err = handle_stack_update(ea, regs);
3429 if (unlikely(cross_endian))
3430 do_byterev(&op->val, size);
3431 err = write_mem(op->val, ea, size, regs);
3434 #ifdef CONFIG_PPC_FPU
3436 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3438 err = do_fp_store(op, ea, regs, cross_endian);
3441 #ifdef CONFIG_ALTIVEC
3443 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3445 err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3450 unsigned long msrbit = MSR_VSX;
3453 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3454 * when the target of the instruction is a vector register.
3456 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3458 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3460 err = do_vsx_store(op, ea, regs, cross_endian);
3465 if (!address_ok(regs, ea, size))
3468 for (i = 0; i < size; i += 4) {
3469 unsigned int v32 = regs->gpr[rd];
3474 if (unlikely(cross_endian))
3475 v32 = byterev_4(v32);
3476 err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3480 /* reg number wraps from 31 to 0 for stsw[ix] */
3481 rd = (rd + 1) & 0x1f;
3492 if (op->type & UPDATE)
3493 regs->gpr[op->update_reg] = op->ea;
3497 NOKPROBE_SYMBOL(emulate_loadstore);
3500 * Emulate instructions that cause a transfer of control,
3501 * loads and stores, and a few other instructions.
3502 * Returns 1 if the step was emulated, 0 if not,
3503 * or -1 if the instruction is one that should not be stepped,
3504 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3506 int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3508 struct instruction_op op;
3513 r = analyse_instr(&op, regs, instr);
3517 emulate_update_regs(regs, &op);
3522 type = GETTYPE(op.type);
3524 if (OP_IS_LOAD_STORE(type)) {
3525 err = emulate_loadstore(regs, &op);
3533 ea = truncate_if_32bit(regs->msr, op.ea);
3534 if (!address_ok(regs, ea, 8))
3536 switch (op.type & CACHEOP_MASK) {
3538 __cacheop_user_asmx(ea, err, "dcbst");
3541 __cacheop_user_asmx(ea, err, "dcbf");
3545 prefetchw((void *) ea);
3549 prefetch((void *) ea);
3552 __cacheop_user_asmx(ea, err, "icbi");
3555 err = emulate_dcbz(ea, regs);
3565 regs->gpr[op.reg] = regs->msr & MSR_MASK;
3569 val = regs->gpr[op.reg];
3570 if ((val & MSR_RI) == 0)
3571 /* can't step mtmsr[d] that would clear MSR_RI */
3573 /* here op.val is the mask of bits to change */
3574 regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
3578 case SYSCALL: /* sc */
3580 * N.B. this uses knowledge about how the syscall
3581 * entry code works. If that is changed, this will
3582 * need to be changed also.
3584 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3585 cpu_has_feature(CPU_FTR_REAL_LE) &&
3586 regs->gpr[0] == 0x1ebe) {
3587 regs_set_return_msr(regs, regs->msr ^ MSR_LE);
3590 regs->gpr[9] = regs->gpr[13];
3591 regs->gpr[10] = MSR_KERNEL;
3592 regs->gpr[11] = regs->nip + 4;
3593 regs->gpr[12] = regs->msr & MSR_MASK;
3594 regs->gpr[13] = (unsigned long) get_paca();
3595 regs_set_return_ip(regs, (unsigned long) &system_call_common);
3596 regs_set_return_msr(regs, MSR_KERNEL);
3599 #ifdef CONFIG_PPC_BOOK3S_64
3600 case SYSCALL_VECTORED_0: /* scv 0 */
3601 regs->gpr[9] = regs->gpr[13];
3602 regs->gpr[10] = MSR_KERNEL;
3603 regs->gpr[11] = regs->nip + 4;
3604 regs->gpr[12] = regs->msr & MSR_MASK;
3605 regs->gpr[13] = (unsigned long) get_paca();
3606 regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
3607 regs_set_return_msr(regs, MSR_KERNEL);
3618 regs_set_return_ip(regs,
3619 truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
3622 NOKPROBE_SYMBOL(emulate_step);