4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
21 extern char system_call_common[];
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK 0xffffffff87c0ffffUL
27 #define MSR_MASK 0x87c0ffff
31 #define XER_SO 0x80000000U
32 #define XER_OV 0x40000000U
33 #define XER_CA 0x20000000U
34 #define XER_OV32 0x00080000U
35 #define XER_CA32 0x00040000U
39 * Functions in ldstfp.S
41 extern void get_fpr(int rn, double *p);
42 extern void put_fpr(int rn, const double *p);
43 extern void get_vr(int rn, __vector128 *p);
44 extern void put_vr(int rn, __vector128 *p);
45 extern void load_vsrn(int vsr, const void *p);
46 extern void store_vsrn(int vsr, void *p);
47 extern void conv_sp_to_dp(const float *sp, double *dp);
48 extern void conv_dp_to_sp(const double *dp, float *sp);
55 extern int do_lq(unsigned long ea, unsigned long *regs);
56 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
57 extern int do_lqarx(unsigned long ea, unsigned long *regs);
58 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
62 #ifdef __LITTLE_ENDIAN__
71 * Emulate the truncation of 64 bit values in 32-bit mode.
73 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
77 if ((msr & MSR_64BIT) == 0)
84 * Determine whether a conditional branch instruction would branch.
86 static nokprobe_inline int branch_taken(unsigned int instr,
87 const struct pt_regs *regs,
88 struct instruction_op *op)
90 unsigned int bo = (instr >> 21) & 0x1f;
94 /* decrement counter */
96 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
99 if ((bo & 0x10) == 0) {
100 /* check bit from CR */
101 bi = (instr >> 16) & 0x1f;
102 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
108 static nokprobe_inline long address_ok(struct pt_regs *regs,
109 unsigned long ea, int nb)
111 if (!user_mode(regs))
113 if (__access_ok(ea, nb, USER_DS))
115 if (__access_ok(ea, 1, USER_DS))
116 /* Access overlaps the end of the user region */
117 regs->dar = USER_DS.seg;
124 * Calculate effective address for a D-form instruction
126 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
127 const struct pt_regs *regs)
132 ra = (instr >> 16) & 0x1f;
133 ea = (signed short) instr; /* sign-extend */
142 * Calculate effective address for a DS-form instruction
144 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
145 const struct pt_regs *regs)
150 ra = (instr >> 16) & 0x1f;
151 ea = (signed short) (instr & ~3); /* sign-extend */
159 * Calculate effective address for a DQ-form instruction
161 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
162 const struct pt_regs *regs)
167 ra = (instr >> 16) & 0x1f;
168 ea = (signed short) (instr & ~0xf); /* sign-extend */
174 #endif /* __powerpc64 */
177 * Calculate effective address for an X-form instruction
179 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
180 const struct pt_regs *regs)
185 ra = (instr >> 16) & 0x1f;
186 rb = (instr >> 11) & 0x1f;
195 * Return the largest power of 2, not greater than sizeof(unsigned long),
196 * such that x is a multiple of it.
198 static nokprobe_inline unsigned long max_align(unsigned long x)
200 x |= sizeof(unsigned long);
201 return x & -x; /* isolates rightmost bit */
204 static nokprobe_inline unsigned long byterev_2(unsigned long x)
206 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
209 static nokprobe_inline unsigned long byterev_4(unsigned long x)
211 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
212 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
216 static nokprobe_inline unsigned long byterev_8(unsigned long x)
218 return (byterev_4(x) << 32) | byterev_4(x >> 32);
222 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
226 *(u16 *)ptr = byterev_2(*(u16 *)ptr);
229 *(u32 *)ptr = byterev_4(*(u32 *)ptr);
233 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
236 unsigned long *up = (unsigned long *)ptr;
238 tmp = byterev_8(up[0]);
239 up[0] = byterev_8(up[1]);
249 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
250 unsigned long ea, int nb,
251 struct pt_regs *regs)
258 err = __get_user(x, (unsigned char __user *) ea);
261 err = __get_user(x, (unsigned short __user *) ea);
264 err = __get_user(x, (unsigned int __user *) ea);
268 err = __get_user(x, (unsigned long __user *) ea);
280 * Copy from userspace to a buffer, using the largest possible
281 * aligned accesses, up to sizeof(long).
283 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
284 struct pt_regs *regs)
289 for (; nb > 0; nb -= c) {
295 err = __get_user(*dest, (unsigned char __user *) ea);
298 err = __get_user(*(u16 *)dest,
299 (unsigned short __user *) ea);
302 err = __get_user(*(u32 *)dest,
303 (unsigned int __user *) ea);
307 err = __get_user(*(unsigned long *)dest,
308 (unsigned long __user *) ea);
322 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
323 unsigned long ea, int nb,
324 struct pt_regs *regs)
328 u8 b[sizeof(unsigned long)];
334 i = IS_BE ? sizeof(unsigned long) - nb : 0;
335 err = copy_mem_in(&u.b[i], ea, nb, regs);
342 * Read memory at address ea for nb bytes, return 0 for success
343 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
344 * If nb < sizeof(long), the result is right-justified on BE systems.
346 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
347 struct pt_regs *regs)
349 if (!address_ok(regs, ea, nb))
351 if ((ea & (nb - 1)) == 0)
352 return read_mem_aligned(dest, ea, nb, regs);
353 return read_mem_unaligned(dest, ea, nb, regs);
355 NOKPROBE_SYMBOL(read_mem);
357 static nokprobe_inline int write_mem_aligned(unsigned long val,
358 unsigned long ea, int nb,
359 struct pt_regs *regs)
365 err = __put_user(val, (unsigned char __user *) ea);
368 err = __put_user(val, (unsigned short __user *) ea);
371 err = __put_user(val, (unsigned int __user *) ea);
375 err = __put_user(val, (unsigned long __user *) ea);
385 * Copy from a buffer to userspace, using the largest possible
386 * aligned accesses, up to sizeof(long).
388 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
389 struct pt_regs *regs)
394 for (; nb > 0; nb -= c) {
400 err = __put_user(*dest, (unsigned char __user *) ea);
403 err = __put_user(*(u16 *)dest,
404 (unsigned short __user *) ea);
407 err = __put_user(*(u32 *)dest,
408 (unsigned int __user *) ea);
412 err = __put_user(*(unsigned long *)dest,
413 (unsigned long __user *) ea);
427 static nokprobe_inline int write_mem_unaligned(unsigned long val,
428 unsigned long ea, int nb,
429 struct pt_regs *regs)
433 u8 b[sizeof(unsigned long)];
438 i = IS_BE ? sizeof(unsigned long) - nb : 0;
439 return copy_mem_out(&u.b[i], ea, nb, regs);
443 * Write memory at address ea for nb bytes, return 0 for success
444 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
446 static int write_mem(unsigned long val, unsigned long ea, int nb,
447 struct pt_regs *regs)
449 if (!address_ok(regs, ea, nb))
451 if ((ea & (nb - 1)) == 0)
452 return write_mem_aligned(val, ea, nb, regs);
453 return write_mem_unaligned(val, ea, nb, regs);
455 NOKPROBE_SYMBOL(write_mem);
457 #ifdef CONFIG_PPC_FPU
459 * These access either the real FP register or the image in the
460 * thread_struct, depending on regs->msr & MSR_FP.
462 static int do_fp_load(struct instruction_op *op, unsigned long ea,
463 struct pt_regs *regs, bool cross_endian)
472 u8 b[2 * sizeof(double)];
475 nb = GETSIZE(op->type);
476 if (!address_ok(regs, ea, nb))
479 err = copy_mem_in(u.b, ea, nb, regs);
482 if (unlikely(cross_endian)) {
483 do_byte_reverse(u.b, min(nb, 8));
485 do_byte_reverse(&u.b[8], 8);
489 if (op->type & FPCONV)
490 conv_sp_to_dp(&u.f, &u.d[0]);
491 else if (op->type & SIGNEXT)
496 if (regs->msr & MSR_FP)
497 put_fpr(rn, &u.d[0]);
499 current->thread.TS_FPR(rn) = u.l[0];
503 if (regs->msr & MSR_FP)
504 put_fpr(rn, &u.d[1]);
506 current->thread.TS_FPR(rn) = u.l[1];
511 NOKPROBE_SYMBOL(do_fp_load);
513 static int do_fp_store(struct instruction_op *op, unsigned long ea,
514 struct pt_regs *regs, bool cross_endian)
522 u8 b[2 * sizeof(double)];
525 nb = GETSIZE(op->type);
526 if (!address_ok(regs, ea, nb))
530 if (regs->msr & MSR_FP)
531 get_fpr(rn, &u.d[0]);
533 u.l[0] = current->thread.TS_FPR(rn);
535 if (op->type & FPCONV)
536 conv_dp_to_sp(&u.d[0], &u.f);
542 if (regs->msr & MSR_FP)
543 get_fpr(rn, &u.d[1]);
545 u.l[1] = current->thread.TS_FPR(rn);
548 if (unlikely(cross_endian)) {
549 do_byte_reverse(u.b, min(nb, 8));
551 do_byte_reverse(&u.b[8], 8);
553 return copy_mem_out(u.b, ea, nb, regs);
555 NOKPROBE_SYMBOL(do_fp_store);
558 #ifdef CONFIG_ALTIVEC
559 /* For Altivec/VMX, no need to worry about alignment */
560 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
561 int size, struct pt_regs *regs,
567 u8 b[sizeof(__vector128)];
570 if (!address_ok(regs, ea & ~0xfUL, 16))
572 /* align to multiple of size */
574 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
577 if (unlikely(cross_endian))
578 do_byte_reverse(&u.b[ea & 0xf], size);
580 if (regs->msr & MSR_VEC)
583 current->thread.vr_state.vr[rn] = u.v;
588 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
589 int size, struct pt_regs *regs,
594 u8 b[sizeof(__vector128)];
597 if (!address_ok(regs, ea & ~0xfUL, 16))
599 /* align to multiple of size */
603 if (regs->msr & MSR_VEC)
606 u.v = current->thread.vr_state.vr[rn];
608 if (unlikely(cross_endian))
609 do_byte_reverse(&u.b[ea & 0xf], size);
610 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
612 #endif /* CONFIG_ALTIVEC */
615 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
616 int reg, bool cross_endian)
620 if (!address_ok(regs, ea, 16))
622 /* if aligned, should be atomic */
623 if ((ea & 0xf) == 0) {
624 err = do_lq(ea, ®s->gpr[reg]);
626 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs);
628 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs);
630 if (!err && unlikely(cross_endian))
631 do_byte_reverse(®s->gpr[reg], 16);
635 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
636 int reg, bool cross_endian)
639 unsigned long vals[2];
641 if (!address_ok(regs, ea, 16))
643 vals[0] = regs->gpr[reg];
644 vals[1] = regs->gpr[reg + 1];
645 if (unlikely(cross_endian))
646 do_byte_reverse(vals, 16);
648 /* if aligned, should be atomic */
650 return do_stq(ea, vals[0], vals[1]);
652 err = write_mem(vals[IS_LE], ea, 8, regs);
654 err = write_mem(vals[IS_BE], ea + 8, 8, regs);
657 #endif /* __powerpc64 */
660 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
661 const void *mem, bool rev)
665 const unsigned int *wp;
666 const unsigned short *hp;
667 const unsigned char *bp;
669 size = GETSIZE(op->type);
670 reg->d[0] = reg->d[1] = 0;
672 switch (op->element_size) {
674 /* whole vector; lxv[x] or lxvl[l] */
677 memcpy(reg, mem, size);
678 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
681 do_byte_reverse(reg, 16);
684 /* scalar loads, lxvd2x, lxvdsx */
685 read_size = (size >= 8) ? 8 : size;
686 i = IS_LE ? 8 : 8 - read_size;
687 memcpy(®->b[i], mem, read_size);
689 do_byte_reverse(®->b[i], 8);
691 if (op->type & SIGNEXT) {
692 /* size == 4 is the only case here */
693 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
694 } else if (op->vsx_flags & VSX_FPCONV) {
696 conv_sp_to_dp(®->fp[1 + IS_LE],
702 unsigned long v = *(unsigned long *)(mem + 8);
703 reg->d[IS_BE] = !rev ? v : byterev_8(v);
704 } else if (op->vsx_flags & VSX_SPLAT)
705 reg->d[IS_BE] = reg->d[IS_LE];
711 for (j = 0; j < size / 4; ++j) {
712 i = IS_LE ? 3 - j : j;
713 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
715 if (op->vsx_flags & VSX_SPLAT) {
716 u32 val = reg->w[IS_LE ? 3 : 0];
718 i = IS_LE ? 3 - j : j;
726 for (j = 0; j < size / 2; ++j) {
727 i = IS_LE ? 7 - j : j;
728 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
734 for (j = 0; j < size; ++j) {
735 i = IS_LE ? 15 - j : j;
741 EXPORT_SYMBOL_GPL(emulate_vsx_load);
742 NOKPROBE_SYMBOL(emulate_vsx_load);
744 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
747 int size, write_size;
754 size = GETSIZE(op->type);
756 switch (op->element_size) {
758 /* stxv, stxvx, stxvl, stxvll */
761 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
764 /* reverse 16 bytes */
765 buf.d[0] = byterev_8(reg->d[1]);
766 buf.d[1] = byterev_8(reg->d[0]);
769 memcpy(mem, reg, size);
772 /* scalar stores, stxvd2x */
773 write_size = (size >= 8) ? 8 : size;
774 i = IS_LE ? 8 : 8 - write_size;
775 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
776 buf.d[0] = buf.d[1] = 0;
778 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
782 memcpy(mem, ®->b[i], write_size);
784 memcpy(mem + 8, ®->d[IS_BE], 8);
786 do_byte_reverse(mem, write_size);
788 do_byte_reverse(mem + 8, 8);
794 for (j = 0; j < size / 4; ++j) {
795 i = IS_LE ? 3 - j : j;
796 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
802 for (j = 0; j < size / 2; ++j) {
803 i = IS_LE ? 7 - j : j;
804 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
810 for (j = 0; j < size; ++j) {
811 i = IS_LE ? 15 - j : j;
817 EXPORT_SYMBOL_GPL(emulate_vsx_store);
818 NOKPROBE_SYMBOL(emulate_vsx_store);
820 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
821 unsigned long ea, struct pt_regs *regs,
827 int size = GETSIZE(op->type);
829 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
832 emulate_vsx_load(op, &buf, mem, cross_endian);
835 /* FP regs + extensions */
836 if (regs->msr & MSR_FP) {
837 load_vsrn(reg, &buf);
839 current->thread.fp_state.fpr[reg][0] = buf.d[0];
840 current->thread.fp_state.fpr[reg][1] = buf.d[1];
843 if (regs->msr & MSR_VEC)
844 load_vsrn(reg, &buf);
846 current->thread.vr_state.vr[reg - 32] = buf.v;
852 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
853 unsigned long ea, struct pt_regs *regs,
859 int size = GETSIZE(op->type);
861 if (!address_ok(regs, ea, size))
866 /* FP regs + extensions */
867 if (regs->msr & MSR_FP) {
868 store_vsrn(reg, &buf);
870 buf.d[0] = current->thread.fp_state.fpr[reg][0];
871 buf.d[1] = current->thread.fp_state.fpr[reg][1];
874 if (regs->msr & MSR_VEC)
875 store_vsrn(reg, &buf);
877 buf.v = current->thread.vr_state.vr[reg - 32];
880 emulate_vsx_store(op, &buf, mem, cross_endian);
881 return copy_mem_out(mem, ea, size, regs);
883 #endif /* CONFIG_VSX */
885 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
888 unsigned long i, size;
891 size = ppc64_caches.l1d.block_size;
892 if (!(regs->msr & MSR_64BIT))
895 size = L1_CACHE_BYTES;
898 if (!address_ok(regs, ea, size))
900 for (i = 0; i < size; i += sizeof(long)) {
901 err = __put_user(0, (unsigned long __user *) (ea + i));
909 NOKPROBE_SYMBOL(emulate_dcbz);
911 #define __put_user_asmx(x, addr, err, op, cr) \
912 __asm__ __volatile__( \
914 ".machine power8\n" \
915 "1: " op " %2,0,%3\n" \
919 ".section .fixup,\"ax\"\n" \
924 : "=r" (err), "=r" (cr) \
925 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
927 #define __get_user_asmx(x, addr, err, op) \
928 __asm__ __volatile__( \
930 ".machine power8\n" \
931 "1: "op" %1,0,%2\n" \
934 ".section .fixup,\"ax\"\n" \
939 : "=r" (err), "=r" (x) \
940 : "r" (addr), "i" (-EFAULT), "0" (err))
942 #define __cacheop_user_asmx(addr, err, op) \
943 __asm__ __volatile__( \
946 ".section .fixup,\"ax\"\n" \
952 : "r" (addr), "i" (-EFAULT), "0" (err))
954 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
955 struct instruction_op *op)
960 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
962 if (!(regs->msr & MSR_64BIT))
966 op->ccval |= 0x80000000;
968 op->ccval |= 0x40000000;
970 op->ccval |= 0x20000000;
973 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
975 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
977 op->xerval |= XER_CA32;
979 op->xerval &= ~XER_CA32;
983 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
984 struct instruction_op *op, int rd,
985 unsigned long val1, unsigned long val2,
986 unsigned long carry_in)
988 unsigned long val = val1 + val2;
992 op->type = COMPUTE + SETREG + SETXER;
996 if (!(regs->msr & MSR_64BIT)) {
997 val = (unsigned int) val;
998 val1 = (unsigned int) val1;
1001 op->xerval = regs->xer;
1002 if (val < val1 || (carry_in && val == val1))
1003 op->xerval |= XER_CA;
1005 op->xerval &= ~XER_CA;
1007 set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1008 (carry_in && (unsigned int)val == (unsigned int)val1));
1011 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1012 struct instruction_op *op,
1013 long v1, long v2, int crfld)
1015 unsigned int crval, shift;
1017 op->type = COMPUTE + SETCC;
1018 crval = (regs->xer >> 31) & 1; /* get SO bit */
1025 shift = (7 - crfld) * 4;
1026 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1029 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1030 struct instruction_op *op,
1032 unsigned long v2, int crfld)
1034 unsigned int crval, shift;
1036 op->type = COMPUTE + SETCC;
1037 crval = (regs->xer >> 31) & 1; /* get SO bit */
1044 shift = (7 - crfld) * 4;
1045 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1048 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1049 struct instruction_op *op,
1050 unsigned long v1, unsigned long v2)
1052 unsigned long long out_val, mask;
1056 for (i = 0; i < 8; i++) {
1057 mask = 0xffUL << (i * 8);
1058 if ((v1 & mask) == (v2 & mask))
1065 * The size parameter is used to adjust the equivalent popcnt instruction.
1066 * popcntb = 8, popcntw = 32, popcntd = 64
1068 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1069 struct instruction_op *op,
1070 unsigned long v1, int size)
1072 unsigned long long out = v1;
1074 out -= (out >> 1) & 0x5555555555555555ULL;
1075 out = (0x3333333333333333ULL & out) +
1076 (0x3333333333333333ULL & (out >> 2));
1077 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1079 if (size == 8) { /* popcntb */
1085 if (size == 32) { /* popcntw */
1086 op->val = out & 0x0000003f0000003fULL;
1090 out = (out + (out >> 32)) & 0x7f;
1091 op->val = out; /* popcntd */
1095 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1096 struct instruction_op *op,
1097 unsigned long v1, unsigned long v2)
1099 unsigned char perm, idx;
1103 for (i = 0; i < 8; i++) {
1104 idx = (v1 >> (i * 8)) & 0xff;
1106 if (v2 & PPC_BIT(idx))
1111 #endif /* CONFIG_PPC64 */
1113 * The size parameter adjusts the equivalent prty instruction.
1114 * prtyw = 32, prtyd = 64
1116 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1117 struct instruction_op *op,
1118 unsigned long v, int size)
1120 unsigned long long res = v ^ (v >> 8);
1123 if (size == 32) { /* prtyw */
1124 op->val = res & 0x0000000100000001ULL;
1129 op->val = res & 1; /*prtyd */
1132 static nokprobe_inline int trap_compare(long v1, long v2)
1142 if ((unsigned long)v1 < (unsigned long)v2)
1144 else if ((unsigned long)v1 > (unsigned long)v2)
1150 * Elements of 32-bit rotate and mask instructions.
1152 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1153 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1154 #ifdef __powerpc64__
1155 #define MASK64_L(mb) (~0UL >> (mb))
1156 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1157 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1158 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1160 #define DATA32(x) (x)
1162 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1165 * Decode an instruction, and return information about it in *op
1166 * without changing *regs.
1167 * Integer arithmetic and logical instructions, branches, and barrier
1168 * instructions can be emulated just using the information in *op.
1170 * Return value is 1 if the instruction can be emulated just by
1171 * updating *regs with the information in *op, -1 if we need the
1172 * GPRs but *regs doesn't contain the full register set, or 0
1175 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1178 unsigned int opcode, ra, rb, rd, spr, u;
1179 unsigned long int imm;
1180 unsigned long int val, val2;
1181 unsigned int mb, me, sh;
1186 opcode = instr >> 26;
1190 imm = (signed short)(instr & 0xfffc);
1191 if ((instr & 2) == 0)
1193 op->val = truncate_if_32bit(regs->msr, imm);
1196 if (branch_taken(instr, regs, op))
1197 op->type |= BRTAKEN;
1201 if ((instr & 0xfe2) == 2)
1208 op->type = BRANCH | BRTAKEN;
1209 imm = instr & 0x03fffffc;
1210 if (imm & 0x02000000)
1212 if ((instr & 2) == 0)
1214 op->val = truncate_if_32bit(regs->msr, imm);
1219 switch ((instr >> 1) & 0x3ff) {
1221 op->type = COMPUTE + SETCC;
1222 rd = 7 - ((instr >> 23) & 0x7);
1223 ra = 7 - ((instr >> 18) & 0x7);
1226 val = (regs->ccr >> ra) & 0xf;
1227 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1231 case 528: /* bcctr */
1233 imm = (instr & 0x400)? regs->ctr: regs->link;
1234 op->val = truncate_if_32bit(regs->msr, imm);
1237 if (branch_taken(instr, regs, op))
1238 op->type |= BRTAKEN;
1241 case 18: /* rfid, scary */
1242 if (regs->msr & MSR_PR)
1247 case 150: /* isync */
1248 op->type = BARRIER | BARRIER_ISYNC;
1251 case 33: /* crnor */
1252 case 129: /* crandc */
1253 case 193: /* crxor */
1254 case 225: /* crnand */
1255 case 257: /* crand */
1256 case 289: /* creqv */
1257 case 417: /* crorc */
1258 case 449: /* cror */
1259 op->type = COMPUTE + SETCC;
1260 ra = (instr >> 16) & 0x1f;
1261 rb = (instr >> 11) & 0x1f;
1262 rd = (instr >> 21) & 0x1f;
1263 ra = (regs->ccr >> (31 - ra)) & 1;
1264 rb = (regs->ccr >> (31 - rb)) & 1;
1265 val = (instr >> (6 + ra * 2 + rb)) & 1;
1266 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1272 switch ((instr >> 1) & 0x3ff) {
1273 case 598: /* sync */
1274 op->type = BARRIER + BARRIER_SYNC;
1275 #ifdef __powerpc64__
1276 switch ((instr >> 21) & 3) {
1277 case 1: /* lwsync */
1278 op->type = BARRIER + BARRIER_LWSYNC;
1280 case 2: /* ptesync */
1281 op->type = BARRIER + BARRIER_PTESYNC;
1287 case 854: /* eieio */
1288 op->type = BARRIER + BARRIER_EIEIO;
1294 /* Following cases refer to regs->gpr[], so we need all regs */
1295 if (!FULL_REGS(regs))
1298 rd = (instr >> 21) & 0x1f;
1299 ra = (instr >> 16) & 0x1f;
1300 rb = (instr >> 11) & 0x1f;
1303 #ifdef __powerpc64__
1305 if (rd & trap_compare(regs->gpr[ra], (short) instr))
1310 if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
1315 op->val = regs->gpr[ra] * (short) instr;
1318 case 8: /* subfic */
1319 imm = (short) instr;
1320 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1323 case 10: /* cmpli */
1324 imm = (unsigned short) instr;
1325 val = regs->gpr[ra];
1326 #ifdef __powerpc64__
1328 val = (unsigned int) val;
1330 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1334 imm = (short) instr;
1335 val = regs->gpr[ra];
1336 #ifdef __powerpc64__
1340 do_cmp_signed(regs, op, val, imm, rd >> 2);
1343 case 12: /* addic */
1344 imm = (short) instr;
1345 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1348 case 13: /* addic. */
1349 imm = (short) instr;
1350 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1355 imm = (short) instr;
1357 imm += regs->gpr[ra];
1361 case 15: /* addis */
1362 imm = ((short) instr) << 16;
1364 imm += regs->gpr[ra];
1369 if (((instr >> 1) & 0x1f) == 2) {
1371 imm = (short) (instr & 0xffc1); /* d0 + d2 fields */
1372 imm |= (instr >> 15) & 0x3e; /* d1 field */
1373 op->val = regs->nip + (imm << 16) + 4;
1379 case 20: /* rlwimi */
1380 mb = (instr >> 6) & 0x1f;
1381 me = (instr >> 1) & 0x1f;
1382 val = DATA32(regs->gpr[rd]);
1383 imm = MASK32(mb, me);
1384 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1387 case 21: /* rlwinm */
1388 mb = (instr >> 6) & 0x1f;
1389 me = (instr >> 1) & 0x1f;
1390 val = DATA32(regs->gpr[rd]);
1391 op->val = ROTATE(val, rb) & MASK32(mb, me);
1394 case 23: /* rlwnm */
1395 mb = (instr >> 6) & 0x1f;
1396 me = (instr >> 1) & 0x1f;
1397 rb = regs->gpr[rb] & 0x1f;
1398 val = DATA32(regs->gpr[rd]);
1399 op->val = ROTATE(val, rb) & MASK32(mb, me);
1403 op->val = regs->gpr[rd] | (unsigned short) instr;
1404 goto logical_done_nocc;
1407 imm = (unsigned short) instr;
1408 op->val = regs->gpr[rd] | (imm << 16);
1409 goto logical_done_nocc;
1412 op->val = regs->gpr[rd] ^ (unsigned short) instr;
1413 goto logical_done_nocc;
1415 case 27: /* xoris */
1416 imm = (unsigned short) instr;
1417 op->val = regs->gpr[rd] ^ (imm << 16);
1418 goto logical_done_nocc;
1420 case 28: /* andi. */
1421 op->val = regs->gpr[rd] & (unsigned short) instr;
1423 goto logical_done_nocc;
1425 case 29: /* andis. */
1426 imm = (unsigned short) instr;
1427 op->val = regs->gpr[rd] & (imm << 16);
1429 goto logical_done_nocc;
1431 #ifdef __powerpc64__
1433 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
1434 val = regs->gpr[rd];
1435 if ((instr & 0x10) == 0) {
1436 sh = rb | ((instr & 2) << 4);
1437 val = ROTATE(val, sh);
1438 switch ((instr >> 2) & 3) {
1439 case 0: /* rldicl */
1440 val &= MASK64_L(mb);
1442 case 1: /* rldicr */
1443 val &= MASK64_R(mb);
1446 val &= MASK64(mb, 63 - sh);
1448 case 3: /* rldimi */
1449 imm = MASK64(mb, 63 - sh);
1450 val = (regs->gpr[ra] & ~imm) |
1456 sh = regs->gpr[rb] & 0x3f;
1457 val = ROTATE(val, sh);
1458 switch ((instr >> 1) & 7) {
1460 op->val = val & MASK64_L(mb);
1463 op->val = val & MASK64_R(mb);
1468 op->type = UNKNOWN; /* illegal instruction */
1472 /* isel occupies 32 minor opcodes */
1473 if (((instr >> 1) & 0x1f) == 15) {
1474 mb = (instr >> 6) & 0x1f; /* bc field */
1475 val = (regs->ccr >> (31 - mb)) & 1;
1476 val2 = (ra) ? regs->gpr[ra] : 0;
1478 op->val = (val) ? val2 : regs->gpr[rb];
1482 switch ((instr >> 1) & 0x3ff) {
1485 (rd & trap_compare((int)regs->gpr[ra],
1486 (int)regs->gpr[rb])))
1489 #ifdef __powerpc64__
1491 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1495 case 83: /* mfmsr */
1496 if (regs->msr & MSR_PR)
1501 case 146: /* mtmsr */
1502 if (regs->msr & MSR_PR)
1506 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1509 case 178: /* mtmsrd */
1510 if (regs->msr & MSR_PR)
1514 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1515 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1516 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1523 if ((instr >> 20) & 1) {
1525 for (sh = 0; sh < 8; ++sh) {
1526 if (instr & (0x80000 >> sh))
1531 op->val = regs->ccr & imm;
1534 case 144: /* mtcrf */
1535 op->type = COMPUTE + SETCC;
1537 val = regs->gpr[rd];
1538 op->ccval = regs->ccr;
1539 for (sh = 0; sh < 8; ++sh) {
1540 if (instr & (0x80000 >> sh))
1541 op->ccval = (op->ccval & ~imm) |
1547 case 339: /* mfspr */
1548 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1552 if (spr == SPRN_XER || spr == SPRN_LR ||
1557 case 467: /* mtspr */
1558 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1560 op->val = regs->gpr[rd];
1562 if (spr == SPRN_XER || spr == SPRN_LR ||
1568 * Compare instructions
1571 val = regs->gpr[ra];
1572 val2 = regs->gpr[rb];
1573 #ifdef __powerpc64__
1574 if ((rd & 1) == 0) {
1575 /* word (32-bit) compare */
1580 do_cmp_signed(regs, op, val, val2, rd >> 2);
1584 val = regs->gpr[ra];
1585 val2 = regs->gpr[rb];
1586 #ifdef __powerpc64__
1587 if ((rd & 1) == 0) {
1588 /* word (32-bit) compare */
1589 val = (unsigned int) val;
1590 val2 = (unsigned int) val2;
1593 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1596 case 508: /* cmpb */
1597 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1598 goto logical_done_nocc;
1601 * Arithmetic instructions
1604 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1607 #ifdef __powerpc64__
1608 case 9: /* mulhdu */
1609 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1610 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1614 add_with_carry(regs, op, rd, regs->gpr[ra],
1618 case 11: /* mulhwu */
1619 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1620 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1624 op->val = regs->gpr[rb] - regs->gpr[ra];
1626 #ifdef __powerpc64__
1627 case 73: /* mulhd */
1628 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1629 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1632 case 75: /* mulhw */
1633 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1634 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1638 op->val = -regs->gpr[ra];
1641 case 136: /* subfe */
1642 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1643 regs->gpr[rb], regs->xer & XER_CA);
1646 case 138: /* adde */
1647 add_with_carry(regs, op, rd, regs->gpr[ra],
1648 regs->gpr[rb], regs->xer & XER_CA);
1651 case 200: /* subfze */
1652 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1653 regs->xer & XER_CA);
1656 case 202: /* addze */
1657 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1658 regs->xer & XER_CA);
1661 case 232: /* subfme */
1662 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1663 regs->xer & XER_CA);
1665 #ifdef __powerpc64__
1666 case 233: /* mulld */
1667 op->val = regs->gpr[ra] * regs->gpr[rb];
1670 case 234: /* addme */
1671 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1672 regs->xer & XER_CA);
1675 case 235: /* mullw */
1676 op->val = (long)(int) regs->gpr[ra] *
1677 (int) regs->gpr[rb];
1682 op->val = regs->gpr[ra] + regs->gpr[rb];
1684 #ifdef __powerpc64__
1685 case 457: /* divdu */
1686 op->val = regs->gpr[ra] / regs->gpr[rb];
1689 case 459: /* divwu */
1690 op->val = (unsigned int) regs->gpr[ra] /
1691 (unsigned int) regs->gpr[rb];
1693 #ifdef __powerpc64__
1694 case 489: /* divd */
1695 op->val = (long int) regs->gpr[ra] /
1696 (long int) regs->gpr[rb];
1699 case 491: /* divw */
1700 op->val = (int) regs->gpr[ra] /
1701 (int) regs->gpr[rb];
1706 * Logical instructions
1708 case 26: /* cntlzw */
1709 val = (unsigned int) regs->gpr[rd];
1710 op->val = ( val ? __builtin_clz(val) : 32 );
1712 #ifdef __powerpc64__
1713 case 58: /* cntlzd */
1714 val = regs->gpr[rd];
1715 op->val = ( val ? __builtin_clzl(val) : 64 );
1719 op->val = regs->gpr[rd] & regs->gpr[rb];
1723 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1726 case 122: /* popcntb */
1727 do_popcnt(regs, op, regs->gpr[rd], 8);
1728 goto logical_done_nocc;
1731 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1734 case 154: /* prtyw */
1735 do_prty(regs, op, regs->gpr[rd], 32);
1736 goto logical_done_nocc;
1738 case 186: /* prtyd */
1739 do_prty(regs, op, regs->gpr[rd], 64);
1740 goto logical_done_nocc;
1742 case 252: /* bpermd */
1743 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1744 goto logical_done_nocc;
1747 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1751 op->val = regs->gpr[rd] ^ regs->gpr[rb];
1754 case 378: /* popcntw */
1755 do_popcnt(regs, op, regs->gpr[rd], 32);
1756 goto logical_done_nocc;
1759 op->val = regs->gpr[rd] | ~regs->gpr[rb];
1763 op->val = regs->gpr[rd] | regs->gpr[rb];
1766 case 476: /* nand */
1767 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1770 case 506: /* popcntd */
1771 do_popcnt(regs, op, regs->gpr[rd], 64);
1772 goto logical_done_nocc;
1774 case 922: /* extsh */
1775 op->val = (signed short) regs->gpr[rd];
1778 case 954: /* extsb */
1779 op->val = (signed char) regs->gpr[rd];
1781 #ifdef __powerpc64__
1782 case 986: /* extsw */
1783 op->val = (signed int) regs->gpr[rd];
1788 * Shift instructions
1791 sh = regs->gpr[rb] & 0x3f;
1793 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1799 sh = regs->gpr[rb] & 0x3f;
1801 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1806 case 792: /* sraw */
1807 op->type = COMPUTE + SETREG + SETXER;
1808 sh = regs->gpr[rb] & 0x3f;
1809 ival = (signed int) regs->gpr[rd];
1810 op->val = ival >> (sh < 32 ? sh : 31);
1811 op->xerval = regs->xer;
1812 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1813 op->xerval |= XER_CA;
1815 op->xerval &= ~XER_CA;
1816 set_ca32(op, op->xerval & XER_CA);
1819 case 824: /* srawi */
1820 op->type = COMPUTE + SETREG + SETXER;
1822 ival = (signed int) regs->gpr[rd];
1823 op->val = ival >> sh;
1824 op->xerval = regs->xer;
1825 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1826 op->xerval |= XER_CA;
1828 op->xerval &= ~XER_CA;
1829 set_ca32(op, op->xerval & XER_CA);
1832 #ifdef __powerpc64__
1834 sh = regs->gpr[rb] & 0x7f;
1836 op->val = regs->gpr[rd] << sh;
1842 sh = regs->gpr[rb] & 0x7f;
1844 op->val = regs->gpr[rd] >> sh;
1849 case 794: /* srad */
1850 op->type = COMPUTE + SETREG + SETXER;
1851 sh = regs->gpr[rb] & 0x7f;
1852 ival = (signed long int) regs->gpr[rd];
1853 op->val = ival >> (sh < 64 ? sh : 63);
1854 op->xerval = regs->xer;
1855 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1856 op->xerval |= XER_CA;
1858 op->xerval &= ~XER_CA;
1859 set_ca32(op, op->xerval & XER_CA);
1862 case 826: /* sradi with sh_5 = 0 */
1863 case 827: /* sradi with sh_5 = 1 */
1864 op->type = COMPUTE + SETREG + SETXER;
1865 sh = rb | ((instr & 2) << 4);
1866 ival = (signed long int) regs->gpr[rd];
1867 op->val = ival >> sh;
1868 op->xerval = regs->xer;
1869 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1870 op->xerval |= XER_CA;
1872 op->xerval &= ~XER_CA;
1873 set_ca32(op, op->xerval & XER_CA);
1875 #endif /* __powerpc64__ */
1878 * Cache instructions
1880 case 54: /* dcbst */
1881 op->type = MKOP(CACHEOP, DCBST, 0);
1882 op->ea = xform_ea(instr, regs);
1886 op->type = MKOP(CACHEOP, DCBF, 0);
1887 op->ea = xform_ea(instr, regs);
1890 case 246: /* dcbtst */
1891 op->type = MKOP(CACHEOP, DCBTST, 0);
1892 op->ea = xform_ea(instr, regs);
1896 case 278: /* dcbt */
1897 op->type = MKOP(CACHEOP, DCBTST, 0);
1898 op->ea = xform_ea(instr, regs);
1902 case 982: /* icbi */
1903 op->type = MKOP(CACHEOP, ICBI, 0);
1904 op->ea = xform_ea(instr, regs);
1907 case 1014: /* dcbz */
1908 op->type = MKOP(CACHEOP, DCBZ, 0);
1909 op->ea = xform_ea(instr, regs);
1919 op->update_reg = ra;
1921 op->val = regs->gpr[rd];
1922 u = (instr >> 20) & UPDATE;
1928 op->ea = xform_ea(instr, regs);
1929 switch ((instr >> 1) & 0x3ff) {
1930 case 20: /* lwarx */
1931 op->type = MKOP(LARX, 0, 4);
1934 case 150: /* stwcx. */
1935 op->type = MKOP(STCX, 0, 4);
1938 #ifdef __powerpc64__
1939 case 84: /* ldarx */
1940 op->type = MKOP(LARX, 0, 8);
1943 case 214: /* stdcx. */
1944 op->type = MKOP(STCX, 0, 8);
1947 case 52: /* lbarx */
1948 op->type = MKOP(LARX, 0, 1);
1951 case 694: /* stbcx. */
1952 op->type = MKOP(STCX, 0, 1);
1955 case 116: /* lharx */
1956 op->type = MKOP(LARX, 0, 2);
1959 case 726: /* sthcx. */
1960 op->type = MKOP(STCX, 0, 2);
1963 case 276: /* lqarx */
1964 if (!((rd & 1) || rd == ra || rd == rb))
1965 op->type = MKOP(LARX, 0, 16);
1968 case 182: /* stqcx. */
1970 op->type = MKOP(STCX, 0, 16);
1975 case 55: /* lwzux */
1976 op->type = MKOP(LOAD, u, 4);
1980 case 119: /* lbzux */
1981 op->type = MKOP(LOAD, u, 1);
1984 #ifdef CONFIG_ALTIVEC
1986 * Note: for the load/store vector element instructions,
1987 * bits of the EA say which field of the VMX register to use.
1990 op->type = MKOP(LOAD_VMX, 0, 1);
1991 op->element_size = 1;
1994 case 39: /* lvehx */
1995 op->type = MKOP(LOAD_VMX, 0, 2);
1996 op->element_size = 2;
1999 case 71: /* lvewx */
2000 op->type = MKOP(LOAD_VMX, 0, 4);
2001 op->element_size = 4;
2005 case 359: /* lvxl */
2006 op->type = MKOP(LOAD_VMX, 0, 16);
2007 op->element_size = 16;
2010 case 135: /* stvebx */
2011 op->type = MKOP(STORE_VMX, 0, 1);
2012 op->element_size = 1;
2015 case 167: /* stvehx */
2016 op->type = MKOP(STORE_VMX, 0, 2);
2017 op->element_size = 2;
2020 case 199: /* stvewx */
2021 op->type = MKOP(STORE_VMX, 0, 4);
2022 op->element_size = 4;
2025 case 231: /* stvx */
2026 case 487: /* stvxl */
2027 op->type = MKOP(STORE_VMX, 0, 16);
2029 #endif /* CONFIG_ALTIVEC */
2031 #ifdef __powerpc64__
2034 op->type = MKOP(LOAD, u, 8);
2037 case 149: /* stdx */
2038 case 181: /* stdux */
2039 op->type = MKOP(STORE, u, 8);
2043 case 151: /* stwx */
2044 case 183: /* stwux */
2045 op->type = MKOP(STORE, u, 4);
2048 case 215: /* stbx */
2049 case 247: /* stbux */
2050 op->type = MKOP(STORE, u, 1);
2053 case 279: /* lhzx */
2054 case 311: /* lhzux */
2055 op->type = MKOP(LOAD, u, 2);
2058 #ifdef __powerpc64__
2059 case 341: /* lwax */
2060 case 373: /* lwaux */
2061 op->type = MKOP(LOAD, SIGNEXT | u, 4);
2065 case 343: /* lhax */
2066 case 375: /* lhaux */
2067 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2070 case 407: /* sthx */
2071 case 439: /* sthux */
2072 op->type = MKOP(STORE, u, 2);
2075 #ifdef __powerpc64__
2076 case 532: /* ldbrx */
2077 op->type = MKOP(LOAD, BYTEREV, 8);
2081 case 533: /* lswx */
2082 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2085 case 534: /* lwbrx */
2086 op->type = MKOP(LOAD, BYTEREV, 4);
2089 case 597: /* lswi */
2091 rb = 32; /* # bytes to load */
2092 op->type = MKOP(LOAD_MULTI, 0, rb);
2093 op->ea = ra ? regs->gpr[ra] : 0;
2096 #ifdef CONFIG_PPC_FPU
2097 case 535: /* lfsx */
2098 case 567: /* lfsux */
2099 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2102 case 599: /* lfdx */
2103 case 631: /* lfdux */
2104 op->type = MKOP(LOAD_FP, u, 8);
2107 case 663: /* stfsx */
2108 case 695: /* stfsux */
2109 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2112 case 727: /* stfdx */
2113 case 759: /* stfdux */
2114 op->type = MKOP(STORE_FP, u, 8);
2117 #ifdef __powerpc64__
2118 case 791: /* lfdpx */
2119 op->type = MKOP(LOAD_FP, 0, 16);
2122 case 855: /* lfiwax */
2123 op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2126 case 887: /* lfiwzx */
2127 op->type = MKOP(LOAD_FP, 0, 4);
2130 case 919: /* stfdpx */
2131 op->type = MKOP(STORE_FP, 0, 16);
2134 case 983: /* stfiwx */
2135 op->type = MKOP(STORE_FP, 0, 4);
2137 #endif /* __powerpc64 */
2138 #endif /* CONFIG_PPC_FPU */
2140 #ifdef __powerpc64__
2141 case 660: /* stdbrx */
2142 op->type = MKOP(STORE, BYTEREV, 8);
2143 op->val = byterev_8(regs->gpr[rd]);
2147 case 661: /* stswx */
2148 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2151 case 662: /* stwbrx */
2152 op->type = MKOP(STORE, BYTEREV, 4);
2153 op->val = byterev_4(regs->gpr[rd]);
2156 case 725: /* stswi */
2158 rb = 32; /* # bytes to store */
2159 op->type = MKOP(STORE_MULTI, 0, rb);
2160 op->ea = ra ? regs->gpr[ra] : 0;
2163 case 790: /* lhbrx */
2164 op->type = MKOP(LOAD, BYTEREV, 2);
2167 case 918: /* sthbrx */
2168 op->type = MKOP(STORE, BYTEREV, 2);
2169 op->val = byterev_2(regs->gpr[rd]);
2173 case 12: /* lxsiwzx */
2174 op->reg = rd | ((instr & 1) << 5);
2175 op->type = MKOP(LOAD_VSX, 0, 4);
2176 op->element_size = 8;
2179 case 76: /* lxsiwax */
2180 op->reg = rd | ((instr & 1) << 5);
2181 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2182 op->element_size = 8;
2185 case 140: /* stxsiwx */
2186 op->reg = rd | ((instr & 1) << 5);
2187 op->type = MKOP(STORE_VSX, 0, 4);
2188 op->element_size = 8;
2191 case 268: /* lxvx */
2192 op->reg = rd | ((instr & 1) << 5);
2193 op->type = MKOP(LOAD_VSX, 0, 16);
2194 op->element_size = 16;
2195 op->vsx_flags = VSX_CHECK_VEC;
2198 case 269: /* lxvl */
2199 case 301: { /* lxvll */
2201 op->reg = rd | ((instr & 1) << 5);
2202 op->ea = ra ? regs->gpr[ra] : 0;
2203 nb = regs->gpr[rb] & 0xff;
2206 op->type = MKOP(LOAD_VSX, 0, nb);
2207 op->element_size = 16;
2208 op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2212 case 332: /* lxvdsx */
2213 op->reg = rd | ((instr & 1) << 5);
2214 op->type = MKOP(LOAD_VSX, 0, 8);
2215 op->element_size = 8;
2216 op->vsx_flags = VSX_SPLAT;
2219 case 364: /* lxvwsx */
2220 op->reg = rd | ((instr & 1) << 5);
2221 op->type = MKOP(LOAD_VSX, 0, 4);
2222 op->element_size = 4;
2223 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2226 case 396: /* stxvx */
2227 op->reg = rd | ((instr & 1) << 5);
2228 op->type = MKOP(STORE_VSX, 0, 16);
2229 op->element_size = 16;
2230 op->vsx_flags = VSX_CHECK_VEC;
2233 case 397: /* stxvl */
2234 case 429: { /* stxvll */
2236 op->reg = rd | ((instr & 1) << 5);
2237 op->ea = ra ? regs->gpr[ra] : 0;
2238 nb = regs->gpr[rb] & 0xff;
2241 op->type = MKOP(STORE_VSX, 0, nb);
2242 op->element_size = 16;
2243 op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2247 case 524: /* lxsspx */
2248 op->reg = rd | ((instr & 1) << 5);
2249 op->type = MKOP(LOAD_VSX, 0, 4);
2250 op->element_size = 8;
2251 op->vsx_flags = VSX_FPCONV;
2254 case 588: /* lxsdx */
2255 op->reg = rd | ((instr & 1) << 5);
2256 op->type = MKOP(LOAD_VSX, 0, 8);
2257 op->element_size = 8;
2260 case 652: /* stxsspx */
2261 op->reg = rd | ((instr & 1) << 5);
2262 op->type = MKOP(STORE_VSX, 0, 4);
2263 op->element_size = 8;
2264 op->vsx_flags = VSX_FPCONV;
2267 case 716: /* stxsdx */
2268 op->reg = rd | ((instr & 1) << 5);
2269 op->type = MKOP(STORE_VSX, 0, 8);
2270 op->element_size = 8;
2273 case 780: /* lxvw4x */
2274 op->reg = rd | ((instr & 1) << 5);
2275 op->type = MKOP(LOAD_VSX, 0, 16);
2276 op->element_size = 4;
2279 case 781: /* lxsibzx */
2280 op->reg = rd | ((instr & 1) << 5);
2281 op->type = MKOP(LOAD_VSX, 0, 1);
2282 op->element_size = 8;
2283 op->vsx_flags = VSX_CHECK_VEC;
2286 case 812: /* lxvh8x */
2287 op->reg = rd | ((instr & 1) << 5);
2288 op->type = MKOP(LOAD_VSX, 0, 16);
2289 op->element_size = 2;
2290 op->vsx_flags = VSX_CHECK_VEC;
2293 case 813: /* lxsihzx */
2294 op->reg = rd | ((instr & 1) << 5);
2295 op->type = MKOP(LOAD_VSX, 0, 2);
2296 op->element_size = 8;
2297 op->vsx_flags = VSX_CHECK_VEC;
2300 case 844: /* lxvd2x */
2301 op->reg = rd | ((instr & 1) << 5);
2302 op->type = MKOP(LOAD_VSX, 0, 16);
2303 op->element_size = 8;
2306 case 876: /* lxvb16x */
2307 op->reg = rd | ((instr & 1) << 5);
2308 op->type = MKOP(LOAD_VSX, 0, 16);
2309 op->element_size = 1;
2310 op->vsx_flags = VSX_CHECK_VEC;
2313 case 908: /* stxvw4x */
2314 op->reg = rd | ((instr & 1) << 5);
2315 op->type = MKOP(STORE_VSX, 0, 16);
2316 op->element_size = 4;
2319 case 909: /* stxsibx */
2320 op->reg = rd | ((instr & 1) << 5);
2321 op->type = MKOP(STORE_VSX, 0, 1);
2322 op->element_size = 8;
2323 op->vsx_flags = VSX_CHECK_VEC;
2326 case 940: /* stxvh8x */
2327 op->reg = rd | ((instr & 1) << 5);
2328 op->type = MKOP(STORE_VSX, 0, 16);
2329 op->element_size = 2;
2330 op->vsx_flags = VSX_CHECK_VEC;
2333 case 941: /* stxsihx */
2334 op->reg = rd | ((instr & 1) << 5);
2335 op->type = MKOP(STORE_VSX, 0, 2);
2336 op->element_size = 8;
2337 op->vsx_flags = VSX_CHECK_VEC;
2340 case 972: /* stxvd2x */
2341 op->reg = rd | ((instr & 1) << 5);
2342 op->type = MKOP(STORE_VSX, 0, 16);
2343 op->element_size = 8;
2346 case 1004: /* stxvb16x */
2347 op->reg = rd | ((instr & 1) << 5);
2348 op->type = MKOP(STORE_VSX, 0, 16);
2349 op->element_size = 1;
2350 op->vsx_flags = VSX_CHECK_VEC;
2353 #endif /* CONFIG_VSX */
2359 op->type = MKOP(LOAD, u, 4);
2360 op->ea = dform_ea(instr, regs);
2365 op->type = MKOP(LOAD, u, 1);
2366 op->ea = dform_ea(instr, regs);
2371 op->type = MKOP(STORE, u, 4);
2372 op->ea = dform_ea(instr, regs);
2377 op->type = MKOP(STORE, u, 1);
2378 op->ea = dform_ea(instr, regs);
2383 op->type = MKOP(LOAD, u, 2);
2384 op->ea = dform_ea(instr, regs);
2389 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2390 op->ea = dform_ea(instr, regs);
2395 op->type = MKOP(STORE, u, 2);
2396 op->ea = dform_ea(instr, regs);
2401 break; /* invalid form, ra in range to load */
2402 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2403 op->ea = dform_ea(instr, regs);
2407 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2408 op->ea = dform_ea(instr, regs);
2411 #ifdef CONFIG_PPC_FPU
2414 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2415 op->ea = dform_ea(instr, regs);
2420 op->type = MKOP(LOAD_FP, u, 8);
2421 op->ea = dform_ea(instr, regs);
2425 case 53: /* stfsu */
2426 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2427 op->ea = dform_ea(instr, regs);
2431 case 55: /* stfdu */
2432 op->type = MKOP(STORE_FP, u, 8);
2433 op->ea = dform_ea(instr, regs);
2437 #ifdef __powerpc64__
2439 if (!((rd & 1) || (rd == ra)))
2440 op->type = MKOP(LOAD, 0, 16);
2441 op->ea = dqform_ea(instr, regs);
2446 case 57: /* lfdp, lxsd, lxssp */
2447 op->ea = dsform_ea(instr, regs);
2448 switch (instr & 3) {
2451 break; /* reg must be even */
2452 op->type = MKOP(LOAD_FP, 0, 16);
2456 op->type = MKOP(LOAD_VSX, 0, 8);
2457 op->element_size = 8;
2458 op->vsx_flags = VSX_CHECK_VEC;
2462 op->type = MKOP(LOAD_VSX, 0, 4);
2463 op->element_size = 8;
2464 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2468 #endif /* CONFIG_VSX */
2470 #ifdef __powerpc64__
2471 case 58: /* ld[u], lwa */
2472 op->ea = dsform_ea(instr, regs);
2473 switch (instr & 3) {
2475 op->type = MKOP(LOAD, 0, 8);
2478 op->type = MKOP(LOAD, UPDATE, 8);
2481 op->type = MKOP(LOAD, SIGNEXT, 4);
2488 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2489 switch (instr & 7) {
2490 case 0: /* stfdp with LSB of DS field = 0 */
2491 case 4: /* stfdp with LSB of DS field = 1 */
2492 op->ea = dsform_ea(instr, regs);
2493 op->type = MKOP(STORE_FP, 0, 16);
2497 op->ea = dqform_ea(instr, regs);
2500 op->type = MKOP(LOAD_VSX, 0, 16);
2501 op->element_size = 16;
2502 op->vsx_flags = VSX_CHECK_VEC;
2505 case 2: /* stxsd with LSB of DS field = 0 */
2506 case 6: /* stxsd with LSB of DS field = 1 */
2507 op->ea = dsform_ea(instr, regs);
2509 op->type = MKOP(STORE_VSX, 0, 8);
2510 op->element_size = 8;
2511 op->vsx_flags = VSX_CHECK_VEC;
2514 case 3: /* stxssp with LSB of DS field = 0 */
2515 case 7: /* stxssp with LSB of DS field = 1 */
2516 op->ea = dsform_ea(instr, regs);
2518 op->type = MKOP(STORE_VSX, 0, 4);
2519 op->element_size = 8;
2520 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2524 op->ea = dqform_ea(instr, regs);
2527 op->type = MKOP(STORE_VSX, 0, 16);
2528 op->element_size = 16;
2529 op->vsx_flags = VSX_CHECK_VEC;
2533 #endif /* CONFIG_VSX */
2535 #ifdef __powerpc64__
2536 case 62: /* std[u] */
2537 op->ea = dsform_ea(instr, regs);
2538 switch (instr & 3) {
2540 op->type = MKOP(STORE, 0, 8);
2543 op->type = MKOP(STORE, UPDATE, 8);
2547 op->type = MKOP(STORE, 0, 16);
2551 #endif /* __powerpc64__ */
2556 if ((GETTYPE(op->type) == LOAD_VSX ||
2557 GETTYPE(op->type) == STORE_VSX) &&
2558 !cpu_has_feature(CPU_FTR_VSX)) {
2561 #endif /* CONFIG_VSX */
2582 op->type = INTERRUPT | 0x700;
2583 op->val = SRR1_PROGPRIV;
2587 op->type = INTERRUPT | 0x700;
2588 op->val = SRR1_PROGTRAP;
2591 EXPORT_SYMBOL_GPL(analyse_instr);
2592 NOKPROBE_SYMBOL(analyse_instr);
2595 * For PPC32 we always use stwu with r1 to change the stack pointer.
2596 * So this emulated store may corrupt the exception frame, now we
2597 * have to provide the exception frame trampoline, which is pushed
2598 * below the kprobed function stack. So we only update gpr[1] but
2599 * don't emulate the real store operation. We will do real store
2600 * operation safely in exception return code by checking this flag.
2602 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2606 * Check if we will touch kernel stack overflow
2608 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2609 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2612 #endif /* CONFIG_PPC32 */
2614 * Check if we already set since that means we'll
2615 * lose the previous value.
2617 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2618 set_thread_flag(TIF_EMULATE_STACK_STORE);
2622 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2626 *valp = (signed short) *valp;
2629 *valp = (signed int) *valp;
2634 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2638 *valp = byterev_2(*valp);
2641 *valp = byterev_4(*valp);
2643 #ifdef __powerpc64__
2645 *valp = byterev_8(*valp);
2652 * Emulate an instruction that can be executed just by updating
2655 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2657 unsigned long next_pc;
2659 next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
2660 switch (GETTYPE(op->type)) {
2662 if (op->type & SETREG)
2663 regs->gpr[op->reg] = op->val;
2664 if (op->type & SETCC)
2665 regs->ccr = op->ccval;
2666 if (op->type & SETXER)
2667 regs->xer = op->xerval;
2671 if (op->type & SETLK)
2672 regs->link = next_pc;
2673 if (op->type & BRTAKEN)
2675 if (op->type & DECCTR)
2680 switch (op->type & BARRIER_MASK) {
2691 case BARRIER_LWSYNC:
2692 asm volatile("lwsync" : : : "memory");
2694 case BARRIER_PTESYNC:
2695 asm volatile("ptesync" : : : "memory");
2704 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
2707 regs->gpr[op->reg] = regs->link;
2710 regs->gpr[op->reg] = regs->ctr;
2720 regs->xer = op->val & 0xffffffffUL;
2723 regs->link = op->val;
2726 regs->ctr = op->val;
2736 regs->nip = next_pc;
2738 NOKPROBE_SYMBOL(emulate_update_regs);
2741 * Emulate a previously-analysed load or store instruction.
2742 * Return values are:
2743 * 0 = instruction emulated successfully
2744 * -EFAULT = address out of range or access faulted (regs->dar
2745 * contains the faulting address)
2746 * -EACCES = misaligned access, instruction requires alignment
2747 * -EINVAL = unknown operation in *op
2749 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
2751 int err, size, type;
2759 size = GETSIZE(op->type);
2760 type = GETTYPE(op->type);
2761 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
2762 ea = truncate_if_32bit(regs->msr, op->ea);
2766 if (ea & (size - 1))
2767 return -EACCES; /* can't handle misaligned */
2768 if (!address_ok(regs, ea, size))
2773 #ifdef __powerpc64__
2775 __get_user_asmx(val, ea, err, "lbarx");
2778 __get_user_asmx(val, ea, err, "lharx");
2782 __get_user_asmx(val, ea, err, "lwarx");
2784 #ifdef __powerpc64__
2786 __get_user_asmx(val, ea, err, "ldarx");
2789 err = do_lqarx(ea, ®s->gpr[op->reg]);
2800 regs->gpr[op->reg] = val;
2804 if (ea & (size - 1))
2805 return -EACCES; /* can't handle misaligned */
2806 if (!address_ok(regs, ea, size))
2810 #ifdef __powerpc64__
2812 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
2815 __put_user_asmx(op->val, ea, err, "sthcx.", cr);
2819 __put_user_asmx(op->val, ea, err, "stwcx.", cr);
2821 #ifdef __powerpc64__
2823 __put_user_asmx(op->val, ea, err, "stdcx.", cr);
2826 err = do_stqcx(ea, regs->gpr[op->reg],
2827 regs->gpr[op->reg + 1], &cr);
2834 regs->ccr = (regs->ccr & 0x0fffffff) |
2836 ((regs->xer >> 3) & 0x10000000);
2842 #ifdef __powerpc64__
2844 err = emulate_lq(regs, ea, op->reg, cross_endian);
2848 err = read_mem(®s->gpr[op->reg], ea, size, regs);
2850 if (op->type & SIGNEXT)
2851 do_signext(®s->gpr[op->reg], size);
2852 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
2853 do_byterev(®s->gpr[op->reg], size);
2857 #ifdef CONFIG_PPC_FPU
2860 * If the instruction is in userspace, we can emulate it even
2861 * if the VMX state is not live, because we have the state
2862 * stored in the thread_struct. If the instruction is in
2863 * the kernel, we must not touch the state in the thread_struct.
2865 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2867 err = do_fp_load(op, ea, regs, cross_endian);
2870 #ifdef CONFIG_ALTIVEC
2872 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2874 err = do_vec_load(op->reg, ea, size, regs, cross_endian);
2879 unsigned long msrbit = MSR_VSX;
2882 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2883 * when the target of the instruction is a vector register.
2885 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
2887 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2889 err = do_vsx_load(op, ea, regs, cross_endian);
2894 if (!address_ok(regs, ea, size))
2897 for (i = 0; i < size; i += 4) {
2898 unsigned int v32 = 0;
2903 err = copy_mem_in((u8 *) &v32, ea, nb, regs);
2906 if (unlikely(cross_endian))
2907 v32 = byterev_4(v32);
2908 regs->gpr[rd] = v32;
2910 /* reg number wraps from 31 to 0 for lsw[ix] */
2911 rd = (rd + 1) & 0x1f;
2916 #ifdef __powerpc64__
2918 err = emulate_stq(regs, ea, op->reg, cross_endian);
2922 if ((op->type & UPDATE) && size == sizeof(long) &&
2923 op->reg == 1 && op->update_reg == 1 &&
2924 !(regs->msr & MSR_PR) &&
2925 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
2926 err = handle_stack_update(ea, regs);
2929 if (unlikely(cross_endian))
2930 do_byterev(&op->val, size);
2931 err = write_mem(op->val, ea, size, regs);
2934 #ifdef CONFIG_PPC_FPU
2936 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2938 err = do_fp_store(op, ea, regs, cross_endian);
2941 #ifdef CONFIG_ALTIVEC
2943 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2945 err = do_vec_store(op->reg, ea, size, regs, cross_endian);
2950 unsigned long msrbit = MSR_VSX;
2953 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2954 * when the target of the instruction is a vector register.
2956 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
2958 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2960 err = do_vsx_store(op, ea, regs, cross_endian);
2965 if (!address_ok(regs, ea, size))
2968 for (i = 0; i < size; i += 4) {
2969 unsigned int v32 = regs->gpr[rd];
2974 if (unlikely(cross_endian))
2975 v32 = byterev_4(v32);
2976 err = copy_mem_out((u8 *) &v32, ea, nb, regs);
2980 /* reg number wraps from 31 to 0 for stsw[ix] */
2981 rd = (rd + 1) & 0x1f;
2992 if (op->type & UPDATE)
2993 regs->gpr[op->update_reg] = op->ea;
2997 NOKPROBE_SYMBOL(emulate_loadstore);
3000 * Emulate instructions that cause a transfer of control,
3001 * loads and stores, and a few other instructions.
3002 * Returns 1 if the step was emulated, 0 if not,
3003 * or -1 if the instruction is one that should not be stepped,
3004 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3006 int emulate_step(struct pt_regs *regs, unsigned int instr)
3008 struct instruction_op op;
3013 r = analyse_instr(&op, regs, instr);
3017 emulate_update_regs(regs, &op);
3022 type = GETTYPE(op.type);
3024 if (OP_IS_LOAD_STORE(type)) {
3025 err = emulate_loadstore(regs, &op);
3033 ea = truncate_if_32bit(regs->msr, op.ea);
3034 if (!address_ok(regs, ea, 8))
3036 switch (op.type & CACHEOP_MASK) {
3038 __cacheop_user_asmx(ea, err, "dcbst");
3041 __cacheop_user_asmx(ea, err, "dcbf");
3045 prefetchw((void *) ea);
3049 prefetch((void *) ea);
3052 __cacheop_user_asmx(ea, err, "icbi");
3055 err = emulate_dcbz(ea, regs);
3065 regs->gpr[op.reg] = regs->msr & MSR_MASK;
3069 val = regs->gpr[op.reg];
3070 if ((val & MSR_RI) == 0)
3071 /* can't step mtmsr[d] that would clear MSR_RI */
3073 /* here op.val is the mask of bits to change */
3074 regs->msr = (regs->msr & ~op.val) | (val & op.val);
3078 case SYSCALL: /* sc */
3080 * N.B. this uses knowledge about how the syscall
3081 * entry code works. If that is changed, this will
3082 * need to be changed also.
3084 if (regs->gpr[0] == 0x1ebe &&
3085 cpu_has_feature(CPU_FTR_REAL_LE)) {
3086 regs->msr ^= MSR_LE;
3089 regs->gpr[9] = regs->gpr[13];
3090 regs->gpr[10] = MSR_KERNEL;
3091 regs->gpr[11] = regs->nip + 4;
3092 regs->gpr[12] = regs->msr & MSR_MASK;
3093 regs->gpr[13] = (unsigned long) get_paca();
3094 regs->nip = (unsigned long) &system_call_common;
3095 regs->msr = MSR_KERNEL;
3105 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
3108 NOKPROBE_SYMBOL(emulate_step);