2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
34 #include <asm/kprobes.h>
36 #define AARCH64_INSN_SF_BIT BIT(31)
37 #define AARCH64_INSN_N_BIT BIT(22)
39 static int aarch64_insn_encoding_class[] = {
40 AARCH64_INSN_CLS_UNKNOWN,
41 AARCH64_INSN_CLS_UNKNOWN,
42 AARCH64_INSN_CLS_UNKNOWN,
43 AARCH64_INSN_CLS_UNKNOWN,
44 AARCH64_INSN_CLS_LDST,
45 AARCH64_INSN_CLS_DP_REG,
46 AARCH64_INSN_CLS_LDST,
47 AARCH64_INSN_CLS_DP_FPSIMD,
48 AARCH64_INSN_CLS_DP_IMM,
49 AARCH64_INSN_CLS_DP_IMM,
50 AARCH64_INSN_CLS_BR_SYS,
51 AARCH64_INSN_CLS_BR_SYS,
52 AARCH64_INSN_CLS_LDST,
53 AARCH64_INSN_CLS_DP_REG,
54 AARCH64_INSN_CLS_LDST,
55 AARCH64_INSN_CLS_DP_FPSIMD,
58 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
60 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
63 /* NOP is an alias of HINT */
64 bool __kprobes aarch64_insn_is_nop(u32 insn)
66 if (!aarch64_insn_is_hint(insn))
69 switch (insn & 0xFE0) {
70 case AARCH64_INSN_HINT_YIELD:
71 case AARCH64_INSN_HINT_WFE:
72 case AARCH64_INSN_HINT_WFI:
73 case AARCH64_INSN_HINT_SEV:
74 case AARCH64_INSN_HINT_SEVL:
81 bool aarch64_insn_is_branch_imm(u32 insn)
83 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
84 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
85 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
86 aarch64_insn_is_bcond(insn));
89 static DEFINE_RAW_SPINLOCK(patch_lock);
91 static void __kprobes *patch_map(void *addr, int fixmap)
93 unsigned long uintaddr = (uintptr_t) addr;
94 bool module = !core_kernel_text(uintaddr);
97 if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
98 page = vmalloc_to_page(addr);
100 page = phys_to_page(__pa_symbol(addr));
105 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
106 (uintaddr & ~PAGE_MASK));
109 static void __kprobes patch_unmap(int fixmap)
111 clear_fixmap(fixmap);
114 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
117 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
122 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
124 *insnp = le32_to_cpu(val);
129 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
132 unsigned long flags = 0;
135 raw_spin_lock_irqsave(&patch_lock, flags);
136 waddr = patch_map(addr, FIX_TEXT_POKE0);
138 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
140 patch_unmap(FIX_TEXT_POKE0);
141 raw_spin_unlock_irqrestore(&patch_lock, flags);
146 int __kprobes aarch64_insn_write(void *addr, u32 insn)
148 return __aarch64_insn_write(addr, cpu_to_le32(insn));
151 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
153 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
156 return aarch64_insn_is_b(insn) ||
157 aarch64_insn_is_bl(insn) ||
158 aarch64_insn_is_svc(insn) ||
159 aarch64_insn_is_hvc(insn) ||
160 aarch64_insn_is_smc(insn) ||
161 aarch64_insn_is_brk(insn) ||
162 aarch64_insn_is_nop(insn);
165 bool __kprobes aarch64_insn_uses_literal(u32 insn)
167 /* ldr/ldrsw (literal), prfm */
169 return aarch64_insn_is_ldr_lit(insn) ||
170 aarch64_insn_is_ldrsw_lit(insn) ||
171 aarch64_insn_is_adr_adrp(insn) ||
172 aarch64_insn_is_prfm_lit(insn);
175 bool __kprobes aarch64_insn_is_branch(u32 insn)
177 /* b, bl, cb*, tb*, b.cond, br, blr */
179 return aarch64_insn_is_b(insn) ||
180 aarch64_insn_is_bl(insn) ||
181 aarch64_insn_is_cbz(insn) ||
182 aarch64_insn_is_cbnz(insn) ||
183 aarch64_insn_is_tbz(insn) ||
184 aarch64_insn_is_tbnz(insn) ||
185 aarch64_insn_is_ret(insn) ||
186 aarch64_insn_is_br(insn) ||
187 aarch64_insn_is_blr(insn) ||
188 aarch64_insn_is_bcond(insn);
192 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
193 * Section B2.6.5 "Concurrent modification and execution of instructions":
194 * Concurrent modification and execution of instructions can lead to the
195 * resulting instruction performing any behavior that can be achieved by
196 * executing any sequence of instructions that can be executed from the
197 * same Exception level, except where the instruction before modification
198 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
199 * or SMC instruction.
201 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
203 return __aarch64_insn_hotpatch_safe(old_insn) &&
204 __aarch64_insn_hotpatch_safe(new_insn);
207 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
212 /* A64 instructions must be word aligned */
213 if ((uintptr_t)tp & 0x3)
216 ret = aarch64_insn_write(tp, insn);
218 flush_icache_range((uintptr_t)tp,
219 (uintptr_t)tp + AARCH64_INSN_SIZE);
224 struct aarch64_insn_patch {
231 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
234 struct aarch64_insn_patch *pp = arg;
236 /* The last CPU becomes master */
237 if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
238 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
239 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
242 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
243 * which ends with "dsb; isb" pair guaranteeing global
246 /* Notify other processors with an additional increment. */
247 atomic_inc(&pp->cpu_count);
249 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
258 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
260 struct aarch64_insn_patch patch = {
264 .cpu_count = ATOMIC_INIT(0),
270 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
274 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
279 /* Unsafe to patch multiple instructions without synchronizaiton */
281 ret = aarch64_insn_read(addrs[0], &insn);
285 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
287 * ARMv8 architecture doesn't guarantee all CPUs see
288 * the new instruction after returning from function
289 * aarch64_insn_patch_text_nosync(). So send IPIs to
290 * all other CPUs to achieve instruction
293 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
294 kick_all_cpus_sync();
299 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
302 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
303 u32 *maskp, int *shiftp)
309 case AARCH64_INSN_IMM_26:
313 case AARCH64_INSN_IMM_19:
317 case AARCH64_INSN_IMM_16:
321 case AARCH64_INSN_IMM_14:
325 case AARCH64_INSN_IMM_12:
329 case AARCH64_INSN_IMM_9:
333 case AARCH64_INSN_IMM_7:
337 case AARCH64_INSN_IMM_6:
338 case AARCH64_INSN_IMM_S:
342 case AARCH64_INSN_IMM_R:
356 #define ADR_IMM_HILOSPLIT 2
357 #define ADR_IMM_SIZE SZ_2M
358 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
360 #define ADR_IMM_LOSHIFT 29
361 #define ADR_IMM_HISHIFT 5
363 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
365 u32 immlo, immhi, mask;
369 case AARCH64_INSN_IMM_ADR:
371 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
372 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
373 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
374 mask = ADR_IMM_SIZE - 1;
377 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
378 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
384 return (insn >> shift) & mask;
387 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
390 u32 immlo, immhi, mask;
393 if (insn == AARCH64_BREAK_FAULT)
394 return AARCH64_BREAK_FAULT;
397 case AARCH64_INSN_IMM_ADR:
399 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
400 imm >>= ADR_IMM_HILOSPLIT;
401 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
403 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
404 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
407 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
408 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
410 return AARCH64_BREAK_FAULT;
414 /* Update the immediate field. */
415 insn &= ~(mask << shift);
416 insn |= (imm & mask) << shift;
421 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
427 case AARCH64_INSN_REGTYPE_RT:
428 case AARCH64_INSN_REGTYPE_RD:
431 case AARCH64_INSN_REGTYPE_RN:
434 case AARCH64_INSN_REGTYPE_RT2:
435 case AARCH64_INSN_REGTYPE_RA:
438 case AARCH64_INSN_REGTYPE_RM:
442 pr_err("%s: unknown register type encoding %d\n", __func__,
447 return (insn >> shift) & GENMASK(4, 0);
450 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
452 enum aarch64_insn_register reg)
456 if (insn == AARCH64_BREAK_FAULT)
457 return AARCH64_BREAK_FAULT;
459 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
460 pr_err("%s: unknown register encoding %d\n", __func__, reg);
461 return AARCH64_BREAK_FAULT;
465 case AARCH64_INSN_REGTYPE_RT:
466 case AARCH64_INSN_REGTYPE_RD:
469 case AARCH64_INSN_REGTYPE_RN:
472 case AARCH64_INSN_REGTYPE_RT2:
473 case AARCH64_INSN_REGTYPE_RA:
476 case AARCH64_INSN_REGTYPE_RM:
477 case AARCH64_INSN_REGTYPE_RS:
481 pr_err("%s: unknown register type encoding %d\n", __func__,
483 return AARCH64_BREAK_FAULT;
486 insn &= ~(GENMASK(4, 0) << shift);
487 insn |= reg << shift;
492 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
498 case AARCH64_INSN_SIZE_8:
501 case AARCH64_INSN_SIZE_16:
504 case AARCH64_INSN_SIZE_32:
507 case AARCH64_INSN_SIZE_64:
511 pr_err("%s: unknown size encoding %d\n", __func__, type);
512 return AARCH64_BREAK_FAULT;
515 insn &= ~GENMASK(31, 30);
521 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
526 if ((pc & 0x3) || (addr & 0x3)) {
527 pr_err("%s: A64 instructions must be word aligned\n", __func__);
531 offset = ((long)addr - (long)pc);
533 if (offset < -range || offset >= range) {
534 pr_err("%s: offset out of range\n", __func__);
541 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
542 enum aarch64_insn_branch_type type)
548 * B/BL support [-128M, 128M) offset
549 * ARM64 virtual address arrangement guarantees all kernel and module
550 * texts are within +/-128M.
552 offset = branch_imm_common(pc, addr, SZ_128M);
553 if (offset >= SZ_128M)
554 return AARCH64_BREAK_FAULT;
557 case AARCH64_INSN_BRANCH_LINK:
558 insn = aarch64_insn_get_bl_value();
560 case AARCH64_INSN_BRANCH_NOLINK:
561 insn = aarch64_insn_get_b_value();
564 pr_err("%s: unknown branch encoding %d\n", __func__, type);
565 return AARCH64_BREAK_FAULT;
568 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
572 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
573 enum aarch64_insn_register reg,
574 enum aarch64_insn_variant variant,
575 enum aarch64_insn_branch_type type)
580 offset = branch_imm_common(pc, addr, SZ_1M);
582 return AARCH64_BREAK_FAULT;
585 case AARCH64_INSN_BRANCH_COMP_ZERO:
586 insn = aarch64_insn_get_cbz_value();
588 case AARCH64_INSN_BRANCH_COMP_NONZERO:
589 insn = aarch64_insn_get_cbnz_value();
592 pr_err("%s: unknown branch encoding %d\n", __func__, type);
593 return AARCH64_BREAK_FAULT;
597 case AARCH64_INSN_VARIANT_32BIT:
599 case AARCH64_INSN_VARIANT_64BIT:
600 insn |= AARCH64_INSN_SF_BIT;
603 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
604 return AARCH64_BREAK_FAULT;
607 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
609 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
613 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
614 enum aarch64_insn_condition cond)
619 offset = branch_imm_common(pc, addr, SZ_1M);
621 insn = aarch64_insn_get_bcond_value();
623 if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
624 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
625 return AARCH64_BREAK_FAULT;
629 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
633 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
635 return aarch64_insn_get_hint_value() | op;
638 u32 __kprobes aarch64_insn_gen_nop(void)
640 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
643 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
644 enum aarch64_insn_branch_type type)
649 case AARCH64_INSN_BRANCH_NOLINK:
650 insn = aarch64_insn_get_br_value();
652 case AARCH64_INSN_BRANCH_LINK:
653 insn = aarch64_insn_get_blr_value();
655 case AARCH64_INSN_BRANCH_RETURN:
656 insn = aarch64_insn_get_ret_value();
659 pr_err("%s: unknown branch encoding %d\n", __func__, type);
660 return AARCH64_BREAK_FAULT;
663 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
666 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
667 enum aarch64_insn_register base,
668 enum aarch64_insn_register offset,
669 enum aarch64_insn_size_type size,
670 enum aarch64_insn_ldst_type type)
675 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
676 insn = aarch64_insn_get_ldr_reg_value();
678 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
679 insn = aarch64_insn_get_str_reg_value();
682 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
683 return AARCH64_BREAK_FAULT;
686 insn = aarch64_insn_encode_ldst_size(size, insn);
688 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
690 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
693 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
697 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
698 enum aarch64_insn_register reg2,
699 enum aarch64_insn_register base,
701 enum aarch64_insn_variant variant,
702 enum aarch64_insn_ldst_type type)
708 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
709 insn = aarch64_insn_get_ldp_pre_value();
711 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
712 insn = aarch64_insn_get_stp_pre_value();
714 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
715 insn = aarch64_insn_get_ldp_post_value();
717 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
718 insn = aarch64_insn_get_stp_post_value();
721 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
722 return AARCH64_BREAK_FAULT;
726 case AARCH64_INSN_VARIANT_32BIT:
727 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
728 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
730 return AARCH64_BREAK_FAULT;
734 case AARCH64_INSN_VARIANT_64BIT:
735 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
736 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
738 return AARCH64_BREAK_FAULT;
741 insn |= AARCH64_INSN_SF_BIT;
744 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
745 return AARCH64_BREAK_FAULT;
748 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
751 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
754 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
757 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
761 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
762 enum aarch64_insn_register base,
763 enum aarch64_insn_register state,
764 enum aarch64_insn_size_type size,
765 enum aarch64_insn_ldst_type type)
770 case AARCH64_INSN_LDST_LOAD_EX:
771 insn = aarch64_insn_get_load_ex_value();
773 case AARCH64_INSN_LDST_STORE_EX:
774 insn = aarch64_insn_get_store_ex_value();
777 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
778 return AARCH64_BREAK_FAULT;
781 insn = aarch64_insn_encode_ldst_size(size, insn);
783 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
786 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
789 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
790 AARCH64_INSN_REG_ZR);
792 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
796 u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
797 enum aarch64_insn_register address,
798 enum aarch64_insn_register value,
799 enum aarch64_insn_size_type size)
801 u32 insn = aarch64_insn_get_ldadd_value();
804 case AARCH64_INSN_SIZE_32:
805 case AARCH64_INSN_SIZE_64:
808 pr_err("%s: unimplemented size encoding %d\n", __func__, size);
809 return AARCH64_BREAK_FAULT;
812 insn = aarch64_insn_encode_ldst_size(size, insn);
814 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
817 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
820 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
824 u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
825 enum aarch64_insn_register value,
826 enum aarch64_insn_size_type size)
829 * STADD is simply encoded as an alias for LDADD with XZR as
830 * the destination register.
832 return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
836 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
837 enum aarch64_insn_prfm_target target,
838 enum aarch64_insn_prfm_policy policy,
841 u32 imm_type = 0, imm_target = 0, imm_policy = 0;
844 case AARCH64_INSN_PRFM_TYPE_PLD:
846 case AARCH64_INSN_PRFM_TYPE_PLI:
849 case AARCH64_INSN_PRFM_TYPE_PST:
853 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
854 return AARCH64_BREAK_FAULT;
858 case AARCH64_INSN_PRFM_TARGET_L1:
860 case AARCH64_INSN_PRFM_TARGET_L2:
863 case AARCH64_INSN_PRFM_TARGET_L3:
867 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
868 return AARCH64_BREAK_FAULT;
872 case AARCH64_INSN_PRFM_POLICY_KEEP:
874 case AARCH64_INSN_PRFM_POLICY_STRM:
878 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
879 return AARCH64_BREAK_FAULT;
882 /* In this case, imm5 is encoded into Rt field. */
883 insn &= ~GENMASK(4, 0);
884 insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
889 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
890 enum aarch64_insn_prfm_type type,
891 enum aarch64_insn_prfm_target target,
892 enum aarch64_insn_prfm_policy policy)
894 u32 insn = aarch64_insn_get_prfm_value();
896 insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
898 insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
900 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
903 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
906 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
907 enum aarch64_insn_register src,
908 int imm, enum aarch64_insn_variant variant,
909 enum aarch64_insn_adsb_type type)
914 case AARCH64_INSN_ADSB_ADD:
915 insn = aarch64_insn_get_add_imm_value();
917 case AARCH64_INSN_ADSB_SUB:
918 insn = aarch64_insn_get_sub_imm_value();
920 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
921 insn = aarch64_insn_get_adds_imm_value();
923 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
924 insn = aarch64_insn_get_subs_imm_value();
927 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
928 return AARCH64_BREAK_FAULT;
932 case AARCH64_INSN_VARIANT_32BIT:
934 case AARCH64_INSN_VARIANT_64BIT:
935 insn |= AARCH64_INSN_SF_BIT;
938 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
939 return AARCH64_BREAK_FAULT;
942 if (imm & ~(SZ_4K - 1)) {
943 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
944 return AARCH64_BREAK_FAULT;
947 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
949 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
951 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
954 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
955 enum aarch64_insn_register src,
957 enum aarch64_insn_variant variant,
958 enum aarch64_insn_bitfield_type type)
964 case AARCH64_INSN_BITFIELD_MOVE:
965 insn = aarch64_insn_get_bfm_value();
967 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
968 insn = aarch64_insn_get_ubfm_value();
970 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
971 insn = aarch64_insn_get_sbfm_value();
974 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
975 return AARCH64_BREAK_FAULT;
979 case AARCH64_INSN_VARIANT_32BIT:
980 mask = GENMASK(4, 0);
982 case AARCH64_INSN_VARIANT_64BIT:
983 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
984 mask = GENMASK(5, 0);
987 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
988 return AARCH64_BREAK_FAULT;
992 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
993 return AARCH64_BREAK_FAULT;
996 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
997 return AARCH64_BREAK_FAULT;
1000 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1002 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1004 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1006 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1009 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
1011 enum aarch64_insn_variant variant,
1012 enum aarch64_insn_movewide_type type)
1017 case AARCH64_INSN_MOVEWIDE_ZERO:
1018 insn = aarch64_insn_get_movz_value();
1020 case AARCH64_INSN_MOVEWIDE_KEEP:
1021 insn = aarch64_insn_get_movk_value();
1023 case AARCH64_INSN_MOVEWIDE_INVERSE:
1024 insn = aarch64_insn_get_movn_value();
1027 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
1028 return AARCH64_BREAK_FAULT;
1031 if (imm & ~(SZ_64K - 1)) {
1032 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
1033 return AARCH64_BREAK_FAULT;
1037 case AARCH64_INSN_VARIANT_32BIT:
1038 if (shift != 0 && shift != 16) {
1039 pr_err("%s: invalid shift encoding %d\n", __func__,
1041 return AARCH64_BREAK_FAULT;
1044 case AARCH64_INSN_VARIANT_64BIT:
1045 insn |= AARCH64_INSN_SF_BIT;
1046 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1047 pr_err("%s: invalid shift encoding %d\n", __func__,
1049 return AARCH64_BREAK_FAULT;
1053 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1054 return AARCH64_BREAK_FAULT;
1057 insn |= (shift >> 4) << 21;
1059 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1061 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1064 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1065 enum aarch64_insn_register src,
1066 enum aarch64_insn_register reg,
1068 enum aarch64_insn_variant variant,
1069 enum aarch64_insn_adsb_type type)
1074 case AARCH64_INSN_ADSB_ADD:
1075 insn = aarch64_insn_get_add_value();
1077 case AARCH64_INSN_ADSB_SUB:
1078 insn = aarch64_insn_get_sub_value();
1080 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1081 insn = aarch64_insn_get_adds_value();
1083 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1084 insn = aarch64_insn_get_subs_value();
1087 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1088 return AARCH64_BREAK_FAULT;
1092 case AARCH64_INSN_VARIANT_32BIT:
1093 if (shift & ~(SZ_32 - 1)) {
1094 pr_err("%s: invalid shift encoding %d\n", __func__,
1096 return AARCH64_BREAK_FAULT;
1099 case AARCH64_INSN_VARIANT_64BIT:
1100 insn |= AARCH64_INSN_SF_BIT;
1101 if (shift & ~(SZ_64 - 1)) {
1102 pr_err("%s: invalid shift encoding %d\n", __func__,
1104 return AARCH64_BREAK_FAULT;
1108 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1109 return AARCH64_BREAK_FAULT;
1113 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1115 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1117 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1119 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1122 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1123 enum aarch64_insn_register src,
1124 enum aarch64_insn_variant variant,
1125 enum aarch64_insn_data1_type type)
1130 case AARCH64_INSN_DATA1_REVERSE_16:
1131 insn = aarch64_insn_get_rev16_value();
1133 case AARCH64_INSN_DATA1_REVERSE_32:
1134 insn = aarch64_insn_get_rev32_value();
1136 case AARCH64_INSN_DATA1_REVERSE_64:
1137 if (variant != AARCH64_INSN_VARIANT_64BIT) {
1138 pr_err("%s: invalid variant for reverse64 %d\n",
1140 return AARCH64_BREAK_FAULT;
1142 insn = aarch64_insn_get_rev64_value();
1145 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1146 return AARCH64_BREAK_FAULT;
1150 case AARCH64_INSN_VARIANT_32BIT:
1152 case AARCH64_INSN_VARIANT_64BIT:
1153 insn |= AARCH64_INSN_SF_BIT;
1156 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1157 return AARCH64_BREAK_FAULT;
1160 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1162 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1165 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1166 enum aarch64_insn_register src,
1167 enum aarch64_insn_register reg,
1168 enum aarch64_insn_variant variant,
1169 enum aarch64_insn_data2_type type)
1174 case AARCH64_INSN_DATA2_UDIV:
1175 insn = aarch64_insn_get_udiv_value();
1177 case AARCH64_INSN_DATA2_SDIV:
1178 insn = aarch64_insn_get_sdiv_value();
1180 case AARCH64_INSN_DATA2_LSLV:
1181 insn = aarch64_insn_get_lslv_value();
1183 case AARCH64_INSN_DATA2_LSRV:
1184 insn = aarch64_insn_get_lsrv_value();
1186 case AARCH64_INSN_DATA2_ASRV:
1187 insn = aarch64_insn_get_asrv_value();
1189 case AARCH64_INSN_DATA2_RORV:
1190 insn = aarch64_insn_get_rorv_value();
1193 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1194 return AARCH64_BREAK_FAULT;
1198 case AARCH64_INSN_VARIANT_32BIT:
1200 case AARCH64_INSN_VARIANT_64BIT:
1201 insn |= AARCH64_INSN_SF_BIT;
1204 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1205 return AARCH64_BREAK_FAULT;
1208 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1210 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1212 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1215 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1216 enum aarch64_insn_register src,
1217 enum aarch64_insn_register reg1,
1218 enum aarch64_insn_register reg2,
1219 enum aarch64_insn_variant variant,
1220 enum aarch64_insn_data3_type type)
1225 case AARCH64_INSN_DATA3_MADD:
1226 insn = aarch64_insn_get_madd_value();
1228 case AARCH64_INSN_DATA3_MSUB:
1229 insn = aarch64_insn_get_msub_value();
1232 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1233 return AARCH64_BREAK_FAULT;
1237 case AARCH64_INSN_VARIANT_32BIT:
1239 case AARCH64_INSN_VARIANT_64BIT:
1240 insn |= AARCH64_INSN_SF_BIT;
1243 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1244 return AARCH64_BREAK_FAULT;
1247 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1249 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1251 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1254 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1258 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1259 enum aarch64_insn_register src,
1260 enum aarch64_insn_register reg,
1262 enum aarch64_insn_variant variant,
1263 enum aarch64_insn_logic_type type)
1268 case AARCH64_INSN_LOGIC_AND:
1269 insn = aarch64_insn_get_and_value();
1271 case AARCH64_INSN_LOGIC_BIC:
1272 insn = aarch64_insn_get_bic_value();
1274 case AARCH64_INSN_LOGIC_ORR:
1275 insn = aarch64_insn_get_orr_value();
1277 case AARCH64_INSN_LOGIC_ORN:
1278 insn = aarch64_insn_get_orn_value();
1280 case AARCH64_INSN_LOGIC_EOR:
1281 insn = aarch64_insn_get_eor_value();
1283 case AARCH64_INSN_LOGIC_EON:
1284 insn = aarch64_insn_get_eon_value();
1286 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1287 insn = aarch64_insn_get_ands_value();
1289 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1290 insn = aarch64_insn_get_bics_value();
1293 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1294 return AARCH64_BREAK_FAULT;
1298 case AARCH64_INSN_VARIANT_32BIT:
1299 if (shift & ~(SZ_32 - 1)) {
1300 pr_err("%s: invalid shift encoding %d\n", __func__,
1302 return AARCH64_BREAK_FAULT;
1305 case AARCH64_INSN_VARIANT_64BIT:
1306 insn |= AARCH64_INSN_SF_BIT;
1307 if (shift & ~(SZ_64 - 1)) {
1308 pr_err("%s: invalid shift encoding %d\n", __func__,
1310 return AARCH64_BREAK_FAULT;
1314 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1315 return AARCH64_BREAK_FAULT;
1319 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1321 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1323 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1325 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1329 * Decode the imm field of a branch, and return the byte offset as a
1330 * signed value (so it can be used when computing a new branch
1333 s32 aarch64_get_branch_offset(u32 insn)
1337 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1338 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1339 return (imm << 6) >> 4;
1342 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1343 aarch64_insn_is_bcond(insn)) {
1344 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1345 return (imm << 13) >> 11;
1348 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1349 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1350 return (imm << 18) >> 16;
1353 /* Unhandled instruction */
1358 * Encode the displacement of a branch in the imm field and return the
1359 * updated instruction.
1361 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1363 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1364 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1367 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1368 aarch64_insn_is_bcond(insn))
1369 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1372 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1373 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1376 /* Unhandled instruction */
1380 s32 aarch64_insn_adrp_get_offset(u32 insn)
1382 BUG_ON(!aarch64_insn_is_adrp(insn));
1383 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1386 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1388 BUG_ON(!aarch64_insn_is_adrp(insn));
1389 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1394 * Extract the Op/CR data from a msr/mrs instruction.
1396 u32 aarch64_insn_extract_system_reg(u32 insn)
1398 return (insn & 0x1FFFE0) >> 5;
1401 bool aarch32_insn_is_wide(u32 insn)
1403 return insn >= 0xe800;
1407 * Macros/defines for extracting register numbers from instruction.
1409 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1411 return (insn & (0xf << offset)) >> offset;
1414 #define OPC2_MASK 0x7
1415 #define OPC2_OFFSET 5
1416 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1418 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1421 #define CRM_MASK 0xf
1422 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1424 return insn & CRM_MASK;
1427 static bool __kprobes __check_eq(unsigned long pstate)
1429 return (pstate & PSR_Z_BIT) != 0;
1432 static bool __kprobes __check_ne(unsigned long pstate)
1434 return (pstate & PSR_Z_BIT) == 0;
1437 static bool __kprobes __check_cs(unsigned long pstate)
1439 return (pstate & PSR_C_BIT) != 0;
1442 static bool __kprobes __check_cc(unsigned long pstate)
1444 return (pstate & PSR_C_BIT) == 0;
1447 static bool __kprobes __check_mi(unsigned long pstate)
1449 return (pstate & PSR_N_BIT) != 0;
1452 static bool __kprobes __check_pl(unsigned long pstate)
1454 return (pstate & PSR_N_BIT) == 0;
1457 static bool __kprobes __check_vs(unsigned long pstate)
1459 return (pstate & PSR_V_BIT) != 0;
1462 static bool __kprobes __check_vc(unsigned long pstate)
1464 return (pstate & PSR_V_BIT) == 0;
1467 static bool __kprobes __check_hi(unsigned long pstate)
1469 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1470 return (pstate & PSR_C_BIT) != 0;
1473 static bool __kprobes __check_ls(unsigned long pstate)
1475 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1476 return (pstate & PSR_C_BIT) == 0;
1479 static bool __kprobes __check_ge(unsigned long pstate)
1481 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1482 return (pstate & PSR_N_BIT) == 0;
1485 static bool __kprobes __check_lt(unsigned long pstate)
1487 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1488 return (pstate & PSR_N_BIT) != 0;
1491 static bool __kprobes __check_gt(unsigned long pstate)
1493 /*PSR_N_BIT ^= PSR_V_BIT */
1494 unsigned long temp = pstate ^ (pstate << 3);
1496 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1497 return (temp & PSR_N_BIT) == 0;
1500 static bool __kprobes __check_le(unsigned long pstate)
1502 /*PSR_N_BIT ^= PSR_V_BIT */
1503 unsigned long temp = pstate ^ (pstate << 3);
1505 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1506 return (temp & PSR_N_BIT) != 0;
1509 static bool __kprobes __check_al(unsigned long pstate)
1515 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1516 * it behaves identically to 0b1110 ("al").
1518 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1519 __check_eq, __check_ne, __check_cs, __check_cc,
1520 __check_mi, __check_pl, __check_vs, __check_vc,
1521 __check_hi, __check_ls, __check_ge, __check_lt,
1522 __check_gt, __check_le, __check_al, __check_al