GNU Linux-libre 5.4.274-gnu1
[releases.git] / arch / arm64 / kernel / insn.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/compiler.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/stop_machine.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
18
19 #include <asm/cacheflush.h>
20 #include <asm/debug-monitors.h>
21 #include <asm/fixmap.h>
22 #include <asm/insn.h>
23 #include <asm/kprobes.h>
24 #include <asm/sections.h>
25
26 #define AARCH64_INSN_SF_BIT     BIT(31)
27 #define AARCH64_INSN_N_BIT      BIT(22)
28 #define AARCH64_INSN_LSL_12     BIT(22)
29
30 static const int aarch64_insn_encoding_class[] = {
31         AARCH64_INSN_CLS_UNKNOWN,
32         AARCH64_INSN_CLS_UNKNOWN,
33         AARCH64_INSN_CLS_UNKNOWN,
34         AARCH64_INSN_CLS_UNKNOWN,
35         AARCH64_INSN_CLS_LDST,
36         AARCH64_INSN_CLS_DP_REG,
37         AARCH64_INSN_CLS_LDST,
38         AARCH64_INSN_CLS_DP_FPSIMD,
39         AARCH64_INSN_CLS_DP_IMM,
40         AARCH64_INSN_CLS_DP_IMM,
41         AARCH64_INSN_CLS_BR_SYS,
42         AARCH64_INSN_CLS_BR_SYS,
43         AARCH64_INSN_CLS_LDST,
44         AARCH64_INSN_CLS_DP_REG,
45         AARCH64_INSN_CLS_LDST,
46         AARCH64_INSN_CLS_DP_FPSIMD,
47 };
48
49 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
50 {
51         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
52 }
53
54 /* NOP is an alias of HINT */
55 bool __kprobes aarch64_insn_is_nop(u32 insn)
56 {
57         if (!aarch64_insn_is_hint(insn))
58                 return false;
59
60         switch (insn & 0xFE0) {
61         case AARCH64_INSN_HINT_YIELD:
62         case AARCH64_INSN_HINT_WFE:
63         case AARCH64_INSN_HINT_WFI:
64         case AARCH64_INSN_HINT_SEV:
65         case AARCH64_INSN_HINT_SEVL:
66                 return false;
67         default:
68                 return true;
69         }
70 }
71
72 bool aarch64_insn_is_branch_imm(u32 insn)
73 {
74         return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
75                 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
76                 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
77                 aarch64_insn_is_bcond(insn));
78 }
79
80 static DEFINE_RAW_SPINLOCK(patch_lock);
81
82 static bool is_exit_text(unsigned long addr)
83 {
84         /* discarded with init text/data */
85         return system_state < SYSTEM_RUNNING &&
86                 addr >= (unsigned long)__exittext_begin &&
87                 addr < (unsigned long)__exittext_end;
88 }
89
90 static bool is_image_text(unsigned long addr)
91 {
92         return core_kernel_text(addr) || is_exit_text(addr);
93 }
94
95 static void __kprobes *patch_map(void *addr, int fixmap)
96 {
97         unsigned long uintaddr = (uintptr_t) addr;
98         bool image = is_image_text(uintaddr);
99         struct page *page;
100
101         if (image)
102                 page = phys_to_page(__pa_symbol(addr));
103         else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
104                 page = vmalloc_to_page(addr);
105         else
106                 return addr;
107
108         BUG_ON(!page);
109         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
110                         (uintaddr & ~PAGE_MASK));
111 }
112
113 static void __kprobes patch_unmap(int fixmap)
114 {
115         clear_fixmap(fixmap);
116 }
117 /*
118  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
119  * little-endian.
120  */
121 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
122 {
123         int ret;
124         __le32 val;
125
126         ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
127         if (!ret)
128                 *insnp = le32_to_cpu(val);
129
130         return ret;
131 }
132
133 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
134 {
135         void *waddr = addr;
136         unsigned long flags = 0;
137         int ret;
138
139         raw_spin_lock_irqsave(&patch_lock, flags);
140         waddr = patch_map(addr, FIX_TEXT_POKE0);
141
142         ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
143
144         patch_unmap(FIX_TEXT_POKE0);
145         raw_spin_unlock_irqrestore(&patch_lock, flags);
146
147         return ret;
148 }
149
150 int __kprobes aarch64_insn_write(void *addr, u32 insn)
151 {
152         return __aarch64_insn_write(addr, cpu_to_le32(insn));
153 }
154
155 bool __kprobes aarch64_insn_uses_literal(u32 insn)
156 {
157         /* ldr/ldrsw (literal), prfm */
158
159         return aarch64_insn_is_ldr_lit(insn) ||
160                 aarch64_insn_is_ldrsw_lit(insn) ||
161                 aarch64_insn_is_adr_adrp(insn) ||
162                 aarch64_insn_is_prfm_lit(insn);
163 }
164
165 bool __kprobes aarch64_insn_is_branch(u32 insn)
166 {
167         /* b, bl, cb*, tb*, b.cond, br, blr */
168
169         return aarch64_insn_is_b(insn) ||
170                 aarch64_insn_is_bl(insn) ||
171                 aarch64_insn_is_cbz(insn) ||
172                 aarch64_insn_is_cbnz(insn) ||
173                 aarch64_insn_is_tbz(insn) ||
174                 aarch64_insn_is_tbnz(insn) ||
175                 aarch64_insn_is_ret(insn) ||
176                 aarch64_insn_is_br(insn) ||
177                 aarch64_insn_is_blr(insn) ||
178                 aarch64_insn_is_bcond(insn);
179 }
180
181 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
182 {
183         u32 *tp = addr;
184         int ret;
185
186         /* A64 instructions must be word aligned */
187         if ((uintptr_t)tp & 0x3)
188                 return -EINVAL;
189
190         ret = aarch64_insn_write(tp, insn);
191         if (ret == 0)
192                 __flush_icache_range((uintptr_t)tp,
193                                      (uintptr_t)tp + AARCH64_INSN_SIZE);
194
195         return ret;
196 }
197
198 struct aarch64_insn_patch {
199         void            **text_addrs;
200         u32             *new_insns;
201         int             insn_cnt;
202         atomic_t        cpu_count;
203 };
204
205 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
206 {
207         int i, ret = 0;
208         struct aarch64_insn_patch *pp = arg;
209
210         /* The last CPU becomes master */
211         if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
212                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
213                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
214                                                              pp->new_insns[i]);
215                 /* Notify other processors with an additional increment. */
216                 atomic_inc(&pp->cpu_count);
217         } else {
218                 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
219                         cpu_relax();
220                 isb();
221         }
222
223         return ret;
224 }
225
226 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
227 {
228         struct aarch64_insn_patch patch = {
229                 .text_addrs = addrs,
230                 .new_insns = insns,
231                 .insn_cnt = cnt,
232                 .cpu_count = ATOMIC_INIT(0),
233         };
234
235         if (cnt <= 0)
236                 return -EINVAL;
237
238         return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
239                                        cpu_online_mask);
240 }
241
242 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
243                                                 u32 *maskp, int *shiftp)
244 {
245         u32 mask;
246         int shift;
247
248         switch (type) {
249         case AARCH64_INSN_IMM_26:
250                 mask = BIT(26) - 1;
251                 shift = 0;
252                 break;
253         case AARCH64_INSN_IMM_19:
254                 mask = BIT(19) - 1;
255                 shift = 5;
256                 break;
257         case AARCH64_INSN_IMM_16:
258                 mask = BIT(16) - 1;
259                 shift = 5;
260                 break;
261         case AARCH64_INSN_IMM_14:
262                 mask = BIT(14) - 1;
263                 shift = 5;
264                 break;
265         case AARCH64_INSN_IMM_12:
266                 mask = BIT(12) - 1;
267                 shift = 10;
268                 break;
269         case AARCH64_INSN_IMM_9:
270                 mask = BIT(9) - 1;
271                 shift = 12;
272                 break;
273         case AARCH64_INSN_IMM_7:
274                 mask = BIT(7) - 1;
275                 shift = 15;
276                 break;
277         case AARCH64_INSN_IMM_6:
278         case AARCH64_INSN_IMM_S:
279                 mask = BIT(6) - 1;
280                 shift = 10;
281                 break;
282         case AARCH64_INSN_IMM_R:
283                 mask = BIT(6) - 1;
284                 shift = 16;
285                 break;
286         case AARCH64_INSN_IMM_N:
287                 mask = 1;
288                 shift = 22;
289                 break;
290         default:
291                 return -EINVAL;
292         }
293
294         *maskp = mask;
295         *shiftp = shift;
296
297         return 0;
298 }
299
300 #define ADR_IMM_HILOSPLIT       2
301 #define ADR_IMM_SIZE            SZ_2M
302 #define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
303 #define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
304 #define ADR_IMM_LOSHIFT         29
305 #define ADR_IMM_HISHIFT         5
306
307 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
308 {
309         u32 immlo, immhi, mask;
310         int shift;
311
312         switch (type) {
313         case AARCH64_INSN_IMM_ADR:
314                 shift = 0;
315                 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
316                 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
317                 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
318                 mask = ADR_IMM_SIZE - 1;
319                 break;
320         default:
321                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
322                         pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
323                                type);
324                         return 0;
325                 }
326         }
327
328         return (insn >> shift) & mask;
329 }
330
331 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
332                                   u32 insn, u64 imm)
333 {
334         u32 immlo, immhi, mask;
335         int shift;
336
337         if (insn == AARCH64_BREAK_FAULT)
338                 return AARCH64_BREAK_FAULT;
339
340         switch (type) {
341         case AARCH64_INSN_IMM_ADR:
342                 shift = 0;
343                 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
344                 imm >>= ADR_IMM_HILOSPLIT;
345                 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
346                 imm = immlo | immhi;
347                 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
348                         (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
349                 break;
350         default:
351                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
352                         pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
353                                type);
354                         return AARCH64_BREAK_FAULT;
355                 }
356         }
357
358         /* Update the immediate field. */
359         insn &= ~(mask << shift);
360         insn |= (imm & mask) << shift;
361
362         return insn;
363 }
364
365 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
366                                         u32 insn)
367 {
368         int shift;
369
370         switch (type) {
371         case AARCH64_INSN_REGTYPE_RT:
372         case AARCH64_INSN_REGTYPE_RD:
373                 shift = 0;
374                 break;
375         case AARCH64_INSN_REGTYPE_RN:
376                 shift = 5;
377                 break;
378         case AARCH64_INSN_REGTYPE_RT2:
379         case AARCH64_INSN_REGTYPE_RA:
380                 shift = 10;
381                 break;
382         case AARCH64_INSN_REGTYPE_RM:
383                 shift = 16;
384                 break;
385         default:
386                 pr_err("%s: unknown register type encoding %d\n", __func__,
387                        type);
388                 return 0;
389         }
390
391         return (insn >> shift) & GENMASK(4, 0);
392 }
393
394 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
395                                         u32 insn,
396                                         enum aarch64_insn_register reg)
397 {
398         int shift;
399
400         if (insn == AARCH64_BREAK_FAULT)
401                 return AARCH64_BREAK_FAULT;
402
403         if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
404                 pr_err("%s: unknown register encoding %d\n", __func__, reg);
405                 return AARCH64_BREAK_FAULT;
406         }
407
408         switch (type) {
409         case AARCH64_INSN_REGTYPE_RT:
410         case AARCH64_INSN_REGTYPE_RD:
411                 shift = 0;
412                 break;
413         case AARCH64_INSN_REGTYPE_RN:
414                 shift = 5;
415                 break;
416         case AARCH64_INSN_REGTYPE_RT2:
417         case AARCH64_INSN_REGTYPE_RA:
418                 shift = 10;
419                 break;
420         case AARCH64_INSN_REGTYPE_RM:
421         case AARCH64_INSN_REGTYPE_RS:
422                 shift = 16;
423                 break;
424         default:
425                 pr_err("%s: unknown register type encoding %d\n", __func__,
426                        type);
427                 return AARCH64_BREAK_FAULT;
428         }
429
430         insn &= ~(GENMASK(4, 0) << shift);
431         insn |= reg << shift;
432
433         return insn;
434 }
435
436 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
437                                          u32 insn)
438 {
439         u32 size;
440
441         switch (type) {
442         case AARCH64_INSN_SIZE_8:
443                 size = 0;
444                 break;
445         case AARCH64_INSN_SIZE_16:
446                 size = 1;
447                 break;
448         case AARCH64_INSN_SIZE_32:
449                 size = 2;
450                 break;
451         case AARCH64_INSN_SIZE_64:
452                 size = 3;
453                 break;
454         default:
455                 pr_err("%s: unknown size encoding %d\n", __func__, type);
456                 return AARCH64_BREAK_FAULT;
457         }
458
459         insn &= ~GENMASK(31, 30);
460         insn |= size << 30;
461
462         return insn;
463 }
464
465 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
466                                      long range)
467 {
468         long offset;
469
470         if ((pc & 0x3) || (addr & 0x3)) {
471                 pr_err("%s: A64 instructions must be word aligned\n", __func__);
472                 return range;
473         }
474
475         offset = ((long)addr - (long)pc);
476
477         if (offset < -range || offset >= range) {
478                 pr_err("%s: offset out of range\n", __func__);
479                 return range;
480         }
481
482         return offset;
483 }
484
485 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
486                                           enum aarch64_insn_branch_type type)
487 {
488         u32 insn;
489         long offset;
490
491         /*
492          * B/BL support [-128M, 128M) offset
493          * ARM64 virtual address arrangement guarantees all kernel and module
494          * texts are within +/-128M.
495          */
496         offset = branch_imm_common(pc, addr, SZ_128M);
497         if (offset >= SZ_128M)
498                 return AARCH64_BREAK_FAULT;
499
500         switch (type) {
501         case AARCH64_INSN_BRANCH_LINK:
502                 insn = aarch64_insn_get_bl_value();
503                 break;
504         case AARCH64_INSN_BRANCH_NOLINK:
505                 insn = aarch64_insn_get_b_value();
506                 break;
507         default:
508                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
509                 return AARCH64_BREAK_FAULT;
510         }
511
512         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
513                                              offset >> 2);
514 }
515
516 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
517                                      enum aarch64_insn_register reg,
518                                      enum aarch64_insn_variant variant,
519                                      enum aarch64_insn_branch_type type)
520 {
521         u32 insn;
522         long offset;
523
524         offset = branch_imm_common(pc, addr, SZ_1M);
525         if (offset >= SZ_1M)
526                 return AARCH64_BREAK_FAULT;
527
528         switch (type) {
529         case AARCH64_INSN_BRANCH_COMP_ZERO:
530                 insn = aarch64_insn_get_cbz_value();
531                 break;
532         case AARCH64_INSN_BRANCH_COMP_NONZERO:
533                 insn = aarch64_insn_get_cbnz_value();
534                 break;
535         default:
536                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
537                 return AARCH64_BREAK_FAULT;
538         }
539
540         switch (variant) {
541         case AARCH64_INSN_VARIANT_32BIT:
542                 break;
543         case AARCH64_INSN_VARIANT_64BIT:
544                 insn |= AARCH64_INSN_SF_BIT;
545                 break;
546         default:
547                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
548                 return AARCH64_BREAK_FAULT;
549         }
550
551         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
552
553         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
554                                              offset >> 2);
555 }
556
557 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
558                                      enum aarch64_insn_condition cond)
559 {
560         u32 insn;
561         long offset;
562
563         offset = branch_imm_common(pc, addr, SZ_1M);
564
565         insn = aarch64_insn_get_bcond_value();
566
567         if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
568                 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
569                 return AARCH64_BREAK_FAULT;
570         }
571         insn |= cond;
572
573         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
574                                              offset >> 2);
575 }
576
577 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
578 {
579         return aarch64_insn_get_hint_value() | op;
580 }
581
582 u32 __kprobes aarch64_insn_gen_nop(void)
583 {
584         return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
585 }
586
587 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
588                                 enum aarch64_insn_branch_type type)
589 {
590         u32 insn;
591
592         switch (type) {
593         case AARCH64_INSN_BRANCH_NOLINK:
594                 insn = aarch64_insn_get_br_value();
595                 break;
596         case AARCH64_INSN_BRANCH_LINK:
597                 insn = aarch64_insn_get_blr_value();
598                 break;
599         case AARCH64_INSN_BRANCH_RETURN:
600                 insn = aarch64_insn_get_ret_value();
601                 break;
602         default:
603                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
604                 return AARCH64_BREAK_FAULT;
605         }
606
607         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
608 }
609
610 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
611                                     enum aarch64_insn_register base,
612                                     enum aarch64_insn_register offset,
613                                     enum aarch64_insn_size_type size,
614                                     enum aarch64_insn_ldst_type type)
615 {
616         u32 insn;
617
618         switch (type) {
619         case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
620                 insn = aarch64_insn_get_ldr_reg_value();
621                 break;
622         case AARCH64_INSN_LDST_STORE_REG_OFFSET:
623                 insn = aarch64_insn_get_str_reg_value();
624                 break;
625         default:
626                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
627                 return AARCH64_BREAK_FAULT;
628         }
629
630         insn = aarch64_insn_encode_ldst_size(size, insn);
631
632         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
633
634         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
635                                             base);
636
637         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
638                                             offset);
639 }
640
641 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
642                                      enum aarch64_insn_register reg2,
643                                      enum aarch64_insn_register base,
644                                      int offset,
645                                      enum aarch64_insn_variant variant,
646                                      enum aarch64_insn_ldst_type type)
647 {
648         u32 insn;
649         int shift;
650
651         switch (type) {
652         case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
653                 insn = aarch64_insn_get_ldp_pre_value();
654                 break;
655         case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
656                 insn = aarch64_insn_get_stp_pre_value();
657                 break;
658         case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
659                 insn = aarch64_insn_get_ldp_post_value();
660                 break;
661         case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
662                 insn = aarch64_insn_get_stp_post_value();
663                 break;
664         default:
665                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
666                 return AARCH64_BREAK_FAULT;
667         }
668
669         switch (variant) {
670         case AARCH64_INSN_VARIANT_32BIT:
671                 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
672                         pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
673                                __func__, offset);
674                         return AARCH64_BREAK_FAULT;
675                 }
676                 shift = 2;
677                 break;
678         case AARCH64_INSN_VARIANT_64BIT:
679                 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
680                         pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
681                                __func__, offset);
682                         return AARCH64_BREAK_FAULT;
683                 }
684                 shift = 3;
685                 insn |= AARCH64_INSN_SF_BIT;
686                 break;
687         default:
688                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
689                 return AARCH64_BREAK_FAULT;
690         }
691
692         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
693                                             reg1);
694
695         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
696                                             reg2);
697
698         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
699                                             base);
700
701         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
702                                              offset >> shift);
703 }
704
705 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
706                                    enum aarch64_insn_register base,
707                                    enum aarch64_insn_register state,
708                                    enum aarch64_insn_size_type size,
709                                    enum aarch64_insn_ldst_type type)
710 {
711         u32 insn;
712
713         switch (type) {
714         case AARCH64_INSN_LDST_LOAD_EX:
715                 insn = aarch64_insn_get_load_ex_value();
716                 break;
717         case AARCH64_INSN_LDST_STORE_EX:
718                 insn = aarch64_insn_get_store_ex_value();
719                 break;
720         default:
721                 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
722                 return AARCH64_BREAK_FAULT;
723         }
724
725         insn = aarch64_insn_encode_ldst_size(size, insn);
726
727         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
728                                             reg);
729
730         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
731                                             base);
732
733         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
734                                             AARCH64_INSN_REG_ZR);
735
736         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
737                                             state);
738 }
739
740 u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
741                            enum aarch64_insn_register address,
742                            enum aarch64_insn_register value,
743                            enum aarch64_insn_size_type size)
744 {
745         u32 insn = aarch64_insn_get_ldadd_value();
746
747         switch (size) {
748         case AARCH64_INSN_SIZE_32:
749         case AARCH64_INSN_SIZE_64:
750                 break;
751         default:
752                 pr_err("%s: unimplemented size encoding %d\n", __func__, size);
753                 return AARCH64_BREAK_FAULT;
754         }
755
756         insn = aarch64_insn_encode_ldst_size(size, insn);
757
758         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
759                                             result);
760
761         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
762                                             address);
763
764         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
765                                             value);
766 }
767
768 u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
769                            enum aarch64_insn_register value,
770                            enum aarch64_insn_size_type size)
771 {
772         /*
773          * STADD is simply encoded as an alias for LDADD with XZR as
774          * the destination register.
775          */
776         return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
777                                       value, size);
778 }
779
780 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
781                                         enum aarch64_insn_prfm_target target,
782                                         enum aarch64_insn_prfm_policy policy,
783                                         u32 insn)
784 {
785         u32 imm_type = 0, imm_target = 0, imm_policy = 0;
786
787         switch (type) {
788         case AARCH64_INSN_PRFM_TYPE_PLD:
789                 break;
790         case AARCH64_INSN_PRFM_TYPE_PLI:
791                 imm_type = BIT(0);
792                 break;
793         case AARCH64_INSN_PRFM_TYPE_PST:
794                 imm_type = BIT(1);
795                 break;
796         default:
797                 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
798                 return AARCH64_BREAK_FAULT;
799         }
800
801         switch (target) {
802         case AARCH64_INSN_PRFM_TARGET_L1:
803                 break;
804         case AARCH64_INSN_PRFM_TARGET_L2:
805                 imm_target = BIT(0);
806                 break;
807         case AARCH64_INSN_PRFM_TARGET_L3:
808                 imm_target = BIT(1);
809                 break;
810         default:
811                 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
812                 return AARCH64_BREAK_FAULT;
813         }
814
815         switch (policy) {
816         case AARCH64_INSN_PRFM_POLICY_KEEP:
817                 break;
818         case AARCH64_INSN_PRFM_POLICY_STRM:
819                 imm_policy = BIT(0);
820                 break;
821         default:
822                 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
823                 return AARCH64_BREAK_FAULT;
824         }
825
826         /* In this case, imm5 is encoded into Rt field. */
827         insn &= ~GENMASK(4, 0);
828         insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
829
830         return insn;
831 }
832
833 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
834                               enum aarch64_insn_prfm_type type,
835                               enum aarch64_insn_prfm_target target,
836                               enum aarch64_insn_prfm_policy policy)
837 {
838         u32 insn = aarch64_insn_get_prfm_value();
839
840         insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
841
842         insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
843
844         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
845                                             base);
846
847         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
848 }
849
850 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
851                                  enum aarch64_insn_register src,
852                                  int imm, enum aarch64_insn_variant variant,
853                                  enum aarch64_insn_adsb_type type)
854 {
855         u32 insn;
856
857         switch (type) {
858         case AARCH64_INSN_ADSB_ADD:
859                 insn = aarch64_insn_get_add_imm_value();
860                 break;
861         case AARCH64_INSN_ADSB_SUB:
862                 insn = aarch64_insn_get_sub_imm_value();
863                 break;
864         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
865                 insn = aarch64_insn_get_adds_imm_value();
866                 break;
867         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
868                 insn = aarch64_insn_get_subs_imm_value();
869                 break;
870         default:
871                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
872                 return AARCH64_BREAK_FAULT;
873         }
874
875         switch (variant) {
876         case AARCH64_INSN_VARIANT_32BIT:
877                 break;
878         case AARCH64_INSN_VARIANT_64BIT:
879                 insn |= AARCH64_INSN_SF_BIT;
880                 break;
881         default:
882                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
883                 return AARCH64_BREAK_FAULT;
884         }
885
886         /* We can't encode more than a 24bit value (12bit + 12bit shift) */
887         if (imm & ~(BIT(24) - 1))
888                 goto out;
889
890         /* If we have something in the top 12 bits... */
891         if (imm & ~(SZ_4K - 1)) {
892                 /* ... and in the low 12 bits -> error */
893                 if (imm & (SZ_4K - 1))
894                         goto out;
895
896                 imm >>= 12;
897                 insn |= AARCH64_INSN_LSL_12;
898         }
899
900         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
901
902         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
903
904         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
905
906 out:
907         pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
908         return AARCH64_BREAK_FAULT;
909 }
910
911 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
912                               enum aarch64_insn_register src,
913                               int immr, int imms,
914                               enum aarch64_insn_variant variant,
915                               enum aarch64_insn_bitfield_type type)
916 {
917         u32 insn;
918         u32 mask;
919
920         switch (type) {
921         case AARCH64_INSN_BITFIELD_MOVE:
922                 insn = aarch64_insn_get_bfm_value();
923                 break;
924         case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
925                 insn = aarch64_insn_get_ubfm_value();
926                 break;
927         case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
928                 insn = aarch64_insn_get_sbfm_value();
929                 break;
930         default:
931                 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
932                 return AARCH64_BREAK_FAULT;
933         }
934
935         switch (variant) {
936         case AARCH64_INSN_VARIANT_32BIT:
937                 mask = GENMASK(4, 0);
938                 break;
939         case AARCH64_INSN_VARIANT_64BIT:
940                 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
941                 mask = GENMASK(5, 0);
942                 break;
943         default:
944                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
945                 return AARCH64_BREAK_FAULT;
946         }
947
948         if (immr & ~mask) {
949                 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
950                 return AARCH64_BREAK_FAULT;
951         }
952         if (imms & ~mask) {
953                 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
954                 return AARCH64_BREAK_FAULT;
955         }
956
957         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
958
959         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
960
961         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
962
963         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
964 }
965
966 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
967                               int imm, int shift,
968                               enum aarch64_insn_variant variant,
969                               enum aarch64_insn_movewide_type type)
970 {
971         u32 insn;
972
973         switch (type) {
974         case AARCH64_INSN_MOVEWIDE_ZERO:
975                 insn = aarch64_insn_get_movz_value();
976                 break;
977         case AARCH64_INSN_MOVEWIDE_KEEP:
978                 insn = aarch64_insn_get_movk_value();
979                 break;
980         case AARCH64_INSN_MOVEWIDE_INVERSE:
981                 insn = aarch64_insn_get_movn_value();
982                 break;
983         default:
984                 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
985                 return AARCH64_BREAK_FAULT;
986         }
987
988         if (imm & ~(SZ_64K - 1)) {
989                 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
990                 return AARCH64_BREAK_FAULT;
991         }
992
993         switch (variant) {
994         case AARCH64_INSN_VARIANT_32BIT:
995                 if (shift != 0 && shift != 16) {
996                         pr_err("%s: invalid shift encoding %d\n", __func__,
997                                shift);
998                         return AARCH64_BREAK_FAULT;
999                 }
1000                 break;
1001         case AARCH64_INSN_VARIANT_64BIT:
1002                 insn |= AARCH64_INSN_SF_BIT;
1003                 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1004                         pr_err("%s: invalid shift encoding %d\n", __func__,
1005                                shift);
1006                         return AARCH64_BREAK_FAULT;
1007                 }
1008                 break;
1009         default:
1010                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1011                 return AARCH64_BREAK_FAULT;
1012         }
1013
1014         insn |= (shift >> 4) << 21;
1015
1016         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1017
1018         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1019 }
1020
1021 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1022                                          enum aarch64_insn_register src,
1023                                          enum aarch64_insn_register reg,
1024                                          int shift,
1025                                          enum aarch64_insn_variant variant,
1026                                          enum aarch64_insn_adsb_type type)
1027 {
1028         u32 insn;
1029
1030         switch (type) {
1031         case AARCH64_INSN_ADSB_ADD:
1032                 insn = aarch64_insn_get_add_value();
1033                 break;
1034         case AARCH64_INSN_ADSB_SUB:
1035                 insn = aarch64_insn_get_sub_value();
1036                 break;
1037         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1038                 insn = aarch64_insn_get_adds_value();
1039                 break;
1040         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1041                 insn = aarch64_insn_get_subs_value();
1042                 break;
1043         default:
1044                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1045                 return AARCH64_BREAK_FAULT;
1046         }
1047
1048         switch (variant) {
1049         case AARCH64_INSN_VARIANT_32BIT:
1050                 if (shift & ~(SZ_32 - 1)) {
1051                         pr_err("%s: invalid shift encoding %d\n", __func__,
1052                                shift);
1053                         return AARCH64_BREAK_FAULT;
1054                 }
1055                 break;
1056         case AARCH64_INSN_VARIANT_64BIT:
1057                 insn |= AARCH64_INSN_SF_BIT;
1058                 if (shift & ~(SZ_64 - 1)) {
1059                         pr_err("%s: invalid shift encoding %d\n", __func__,
1060                                shift);
1061                         return AARCH64_BREAK_FAULT;
1062                 }
1063                 break;
1064         default:
1065                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1066                 return AARCH64_BREAK_FAULT;
1067         }
1068
1069
1070         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1071
1072         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1073
1074         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1075
1076         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1077 }
1078
1079 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1080                            enum aarch64_insn_register src,
1081                            enum aarch64_insn_variant variant,
1082                            enum aarch64_insn_data1_type type)
1083 {
1084         u32 insn;
1085
1086         switch (type) {
1087         case AARCH64_INSN_DATA1_REVERSE_16:
1088                 insn = aarch64_insn_get_rev16_value();
1089                 break;
1090         case AARCH64_INSN_DATA1_REVERSE_32:
1091                 insn = aarch64_insn_get_rev32_value();
1092                 break;
1093         case AARCH64_INSN_DATA1_REVERSE_64:
1094                 if (variant != AARCH64_INSN_VARIANT_64BIT) {
1095                         pr_err("%s: invalid variant for reverse64 %d\n",
1096                                __func__, variant);
1097                         return AARCH64_BREAK_FAULT;
1098                 }
1099                 insn = aarch64_insn_get_rev64_value();
1100                 break;
1101         default:
1102                 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1103                 return AARCH64_BREAK_FAULT;
1104         }
1105
1106         switch (variant) {
1107         case AARCH64_INSN_VARIANT_32BIT:
1108                 break;
1109         case AARCH64_INSN_VARIANT_64BIT:
1110                 insn |= AARCH64_INSN_SF_BIT;
1111                 break;
1112         default:
1113                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1114                 return AARCH64_BREAK_FAULT;
1115         }
1116
1117         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1118
1119         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1120 }
1121
1122 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1123                            enum aarch64_insn_register src,
1124                            enum aarch64_insn_register reg,
1125                            enum aarch64_insn_variant variant,
1126                            enum aarch64_insn_data2_type type)
1127 {
1128         u32 insn;
1129
1130         switch (type) {
1131         case AARCH64_INSN_DATA2_UDIV:
1132                 insn = aarch64_insn_get_udiv_value();
1133                 break;
1134         case AARCH64_INSN_DATA2_SDIV:
1135                 insn = aarch64_insn_get_sdiv_value();
1136                 break;
1137         case AARCH64_INSN_DATA2_LSLV:
1138                 insn = aarch64_insn_get_lslv_value();
1139                 break;
1140         case AARCH64_INSN_DATA2_LSRV:
1141                 insn = aarch64_insn_get_lsrv_value();
1142                 break;
1143         case AARCH64_INSN_DATA2_ASRV:
1144                 insn = aarch64_insn_get_asrv_value();
1145                 break;
1146         case AARCH64_INSN_DATA2_RORV:
1147                 insn = aarch64_insn_get_rorv_value();
1148                 break;
1149         default:
1150                 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1151                 return AARCH64_BREAK_FAULT;
1152         }
1153
1154         switch (variant) {
1155         case AARCH64_INSN_VARIANT_32BIT:
1156                 break;
1157         case AARCH64_INSN_VARIANT_64BIT:
1158                 insn |= AARCH64_INSN_SF_BIT;
1159                 break;
1160         default:
1161                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1162                 return AARCH64_BREAK_FAULT;
1163         }
1164
1165         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1166
1167         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1168
1169         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1170 }
1171
1172 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1173                            enum aarch64_insn_register src,
1174                            enum aarch64_insn_register reg1,
1175                            enum aarch64_insn_register reg2,
1176                            enum aarch64_insn_variant variant,
1177                            enum aarch64_insn_data3_type type)
1178 {
1179         u32 insn;
1180
1181         switch (type) {
1182         case AARCH64_INSN_DATA3_MADD:
1183                 insn = aarch64_insn_get_madd_value();
1184                 break;
1185         case AARCH64_INSN_DATA3_MSUB:
1186                 insn = aarch64_insn_get_msub_value();
1187                 break;
1188         default:
1189                 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1190                 return AARCH64_BREAK_FAULT;
1191         }
1192
1193         switch (variant) {
1194         case AARCH64_INSN_VARIANT_32BIT:
1195                 break;
1196         case AARCH64_INSN_VARIANT_64BIT:
1197                 insn |= AARCH64_INSN_SF_BIT;
1198                 break;
1199         default:
1200                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1201                 return AARCH64_BREAK_FAULT;
1202         }
1203
1204         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1205
1206         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1207
1208         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1209                                             reg1);
1210
1211         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1212                                             reg2);
1213 }
1214
1215 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1216                                          enum aarch64_insn_register src,
1217                                          enum aarch64_insn_register reg,
1218                                          int shift,
1219                                          enum aarch64_insn_variant variant,
1220                                          enum aarch64_insn_logic_type type)
1221 {
1222         u32 insn;
1223
1224         switch (type) {
1225         case AARCH64_INSN_LOGIC_AND:
1226                 insn = aarch64_insn_get_and_value();
1227                 break;
1228         case AARCH64_INSN_LOGIC_BIC:
1229                 insn = aarch64_insn_get_bic_value();
1230                 break;
1231         case AARCH64_INSN_LOGIC_ORR:
1232                 insn = aarch64_insn_get_orr_value();
1233                 break;
1234         case AARCH64_INSN_LOGIC_ORN:
1235                 insn = aarch64_insn_get_orn_value();
1236                 break;
1237         case AARCH64_INSN_LOGIC_EOR:
1238                 insn = aarch64_insn_get_eor_value();
1239                 break;
1240         case AARCH64_INSN_LOGIC_EON:
1241                 insn = aarch64_insn_get_eon_value();
1242                 break;
1243         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1244                 insn = aarch64_insn_get_ands_value();
1245                 break;
1246         case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1247                 insn = aarch64_insn_get_bics_value();
1248                 break;
1249         default:
1250                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1251                 return AARCH64_BREAK_FAULT;
1252         }
1253
1254         switch (variant) {
1255         case AARCH64_INSN_VARIANT_32BIT:
1256                 if (shift & ~(SZ_32 - 1)) {
1257                         pr_err("%s: invalid shift encoding %d\n", __func__,
1258                                shift);
1259                         return AARCH64_BREAK_FAULT;
1260                 }
1261                 break;
1262         case AARCH64_INSN_VARIANT_64BIT:
1263                 insn |= AARCH64_INSN_SF_BIT;
1264                 if (shift & ~(SZ_64 - 1)) {
1265                         pr_err("%s: invalid shift encoding %d\n", __func__,
1266                                shift);
1267                         return AARCH64_BREAK_FAULT;
1268                 }
1269                 break;
1270         default:
1271                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1272                 return AARCH64_BREAK_FAULT;
1273         }
1274
1275
1276         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1277
1278         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1279
1280         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1281
1282         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1283 }
1284
1285 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1286                          enum aarch64_insn_register reg,
1287                          enum aarch64_insn_adr_type type)
1288 {
1289         u32 insn;
1290         s32 offset;
1291
1292         switch (type) {
1293         case AARCH64_INSN_ADR_TYPE_ADR:
1294                 insn = aarch64_insn_get_adr_value();
1295                 offset = addr - pc;
1296                 break;
1297         case AARCH64_INSN_ADR_TYPE_ADRP:
1298                 insn = aarch64_insn_get_adrp_value();
1299                 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1300                 break;
1301         default:
1302                 pr_err("%s: unknown adr encoding %d\n", __func__, type);
1303                 return AARCH64_BREAK_FAULT;
1304         }
1305
1306         if (offset < -SZ_1M || offset >= SZ_1M)
1307                 return AARCH64_BREAK_FAULT;
1308
1309         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1310
1311         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1312 }
1313
1314 /*
1315  * Decode the imm field of a branch, and return the byte offset as a
1316  * signed value (so it can be used when computing a new branch
1317  * target).
1318  */
1319 s32 aarch64_get_branch_offset(u32 insn)
1320 {
1321         s32 imm;
1322
1323         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1324                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1325                 return (imm << 6) >> 4;
1326         }
1327
1328         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1329             aarch64_insn_is_bcond(insn)) {
1330                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1331                 return (imm << 13) >> 11;
1332         }
1333
1334         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1335                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1336                 return (imm << 18) >> 16;
1337         }
1338
1339         /* Unhandled instruction */
1340         BUG();
1341 }
1342
1343 /*
1344  * Encode the displacement of a branch in the imm field and return the
1345  * updated instruction.
1346  */
1347 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1348 {
1349         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1350                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1351                                                      offset >> 2);
1352
1353         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1354             aarch64_insn_is_bcond(insn))
1355                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1356                                                      offset >> 2);
1357
1358         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1359                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1360                                                      offset >> 2);
1361
1362         /* Unhandled instruction */
1363         BUG();
1364 }
1365
1366 s32 aarch64_insn_adrp_get_offset(u32 insn)
1367 {
1368         BUG_ON(!aarch64_insn_is_adrp(insn));
1369         return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1370 }
1371
1372 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1373 {
1374         BUG_ON(!aarch64_insn_is_adrp(insn));
1375         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1376                                                 offset >> 12);
1377 }
1378
1379 /*
1380  * Extract the Op/CR data from a msr/mrs instruction.
1381  */
1382 u32 aarch64_insn_extract_system_reg(u32 insn)
1383 {
1384         return (insn & 0x1FFFE0) >> 5;
1385 }
1386
1387 bool aarch32_insn_is_wide(u32 insn)
1388 {
1389         return insn >= 0xe800;
1390 }
1391
1392 /*
1393  * Macros/defines for extracting register numbers from instruction.
1394  */
1395 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1396 {
1397         return (insn & (0xf << offset)) >> offset;
1398 }
1399
1400 #define OPC2_MASK       0x7
1401 #define OPC2_OFFSET     5
1402 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1403 {
1404         return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1405 }
1406
1407 #define CRM_MASK        0xf
1408 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1409 {
1410         return insn & CRM_MASK;
1411 }
1412
1413 static bool __kprobes __check_eq(unsigned long pstate)
1414 {
1415         return (pstate & PSR_Z_BIT) != 0;
1416 }
1417
1418 static bool __kprobes __check_ne(unsigned long pstate)
1419 {
1420         return (pstate & PSR_Z_BIT) == 0;
1421 }
1422
1423 static bool __kprobes __check_cs(unsigned long pstate)
1424 {
1425         return (pstate & PSR_C_BIT) != 0;
1426 }
1427
1428 static bool __kprobes __check_cc(unsigned long pstate)
1429 {
1430         return (pstate & PSR_C_BIT) == 0;
1431 }
1432
1433 static bool __kprobes __check_mi(unsigned long pstate)
1434 {
1435         return (pstate & PSR_N_BIT) != 0;
1436 }
1437
1438 static bool __kprobes __check_pl(unsigned long pstate)
1439 {
1440         return (pstate & PSR_N_BIT) == 0;
1441 }
1442
1443 static bool __kprobes __check_vs(unsigned long pstate)
1444 {
1445         return (pstate & PSR_V_BIT) != 0;
1446 }
1447
1448 static bool __kprobes __check_vc(unsigned long pstate)
1449 {
1450         return (pstate & PSR_V_BIT) == 0;
1451 }
1452
1453 static bool __kprobes __check_hi(unsigned long pstate)
1454 {
1455         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1456         return (pstate & PSR_C_BIT) != 0;
1457 }
1458
1459 static bool __kprobes __check_ls(unsigned long pstate)
1460 {
1461         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1462         return (pstate & PSR_C_BIT) == 0;
1463 }
1464
1465 static bool __kprobes __check_ge(unsigned long pstate)
1466 {
1467         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1468         return (pstate & PSR_N_BIT) == 0;
1469 }
1470
1471 static bool __kprobes __check_lt(unsigned long pstate)
1472 {
1473         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1474         return (pstate & PSR_N_BIT) != 0;
1475 }
1476
1477 static bool __kprobes __check_gt(unsigned long pstate)
1478 {
1479         /*PSR_N_BIT ^= PSR_V_BIT */
1480         unsigned long temp = pstate ^ (pstate << 3);
1481
1482         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1483         return (temp & PSR_N_BIT) == 0;
1484 }
1485
1486 static bool __kprobes __check_le(unsigned long pstate)
1487 {
1488         /*PSR_N_BIT ^= PSR_V_BIT */
1489         unsigned long temp = pstate ^ (pstate << 3);
1490
1491         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1492         return (temp & PSR_N_BIT) != 0;
1493 }
1494
1495 static bool __kprobes __check_al(unsigned long pstate)
1496 {
1497         return true;
1498 }
1499
1500 /*
1501  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1502  * it behaves identically to 0b1110 ("al").
1503  */
1504 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1505         __check_eq, __check_ne, __check_cs, __check_cc,
1506         __check_mi, __check_pl, __check_vs, __check_vc,
1507         __check_hi, __check_ls, __check_ge, __check_lt,
1508         __check_gt, __check_le, __check_al, __check_al
1509 };
1510
1511 static bool range_of_ones(u64 val)
1512 {
1513         /* Doesn't handle full ones or full zeroes */
1514         u64 sval = val >> __ffs64(val);
1515
1516         /* One of Sean Eron Anderson's bithack tricks */
1517         return ((sval + 1) & (sval)) == 0;
1518 }
1519
1520 static u32 aarch64_encode_immediate(u64 imm,
1521                                     enum aarch64_insn_variant variant,
1522                                     u32 insn)
1523 {
1524         unsigned int immr, imms, n, ones, ror, esz, tmp;
1525         u64 mask;
1526
1527         switch (variant) {
1528         case AARCH64_INSN_VARIANT_32BIT:
1529                 esz = 32;
1530                 break;
1531         case AARCH64_INSN_VARIANT_64BIT:
1532                 insn |= AARCH64_INSN_SF_BIT;
1533                 esz = 64;
1534                 break;
1535         default:
1536                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1537                 return AARCH64_BREAK_FAULT;
1538         }
1539
1540         mask = GENMASK(esz - 1, 0);
1541
1542         /* Can't encode full zeroes, full ones, or value wider than the mask */
1543         if (!imm || imm == mask || imm & ~mask)
1544                 return AARCH64_BREAK_FAULT;
1545
1546         /*
1547          * Inverse of Replicate(). Try to spot a repeating pattern
1548          * with a pow2 stride.
1549          */
1550         for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1551                 u64 emask = BIT(tmp) - 1;
1552
1553                 if ((imm & emask) != ((imm >> tmp) & emask))
1554                         break;
1555
1556                 esz = tmp;
1557                 mask = emask;
1558         }
1559
1560         /* N is only set if we're encoding a 64bit value */
1561         n = esz == 64;
1562
1563         /* Trim imm to the element size */
1564         imm &= mask;
1565
1566         /* That's how many ones we need to encode */
1567         ones = hweight64(imm);
1568
1569         /*
1570          * imms is set to (ones - 1), prefixed with a string of ones
1571          * and a zero if they fit. Cap it to 6 bits.
1572          */
1573         imms  = ones - 1;
1574         imms |= 0xf << ffs(esz);
1575         imms &= BIT(6) - 1;
1576
1577         /* Compute the rotation */
1578         if (range_of_ones(imm)) {
1579                 /*
1580                  * Pattern: 0..01..10..0
1581                  *
1582                  * Compute how many rotate we need to align it right
1583                  */
1584                 ror = __ffs64(imm);
1585         } else {
1586                 /*
1587                  * Pattern: 0..01..10..01..1
1588                  *
1589                  * Fill the unused top bits with ones, and check if
1590                  * the result is a valid immediate (all ones with a
1591                  * contiguous ranges of zeroes).
1592                  */
1593                 imm |= ~mask;
1594                 if (!range_of_ones(~imm))
1595                         return AARCH64_BREAK_FAULT;
1596
1597                 /*
1598                  * Compute the rotation to get a continuous set of
1599                  * ones, with the first bit set at position 0
1600                  */
1601                 ror = fls(~imm);
1602         }
1603
1604         /*
1605          * immr is the number of bits we need to rotate back to the
1606          * original set of ones. Note that this is relative to the
1607          * element size...
1608          */
1609         immr = (esz - ror) % esz;
1610
1611         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1612         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1613         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1614 }
1615
1616 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1617                                        enum aarch64_insn_variant variant,
1618                                        enum aarch64_insn_register Rn,
1619                                        enum aarch64_insn_register Rd,
1620                                        u64 imm)
1621 {
1622         u32 insn;
1623
1624         switch (type) {
1625         case AARCH64_INSN_LOGIC_AND:
1626                 insn = aarch64_insn_get_and_imm_value();
1627                 break;
1628         case AARCH64_INSN_LOGIC_ORR:
1629                 insn = aarch64_insn_get_orr_imm_value();
1630                 break;
1631         case AARCH64_INSN_LOGIC_EOR:
1632                 insn = aarch64_insn_get_eor_imm_value();
1633                 break;
1634         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1635                 insn = aarch64_insn_get_ands_imm_value();
1636                 break;
1637         default:
1638                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1639                 return AARCH64_BREAK_FAULT;
1640         }
1641
1642         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1643         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1644         return aarch64_encode_immediate(imm, variant, insn);
1645 }
1646
1647 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1648                           enum aarch64_insn_register Rm,
1649                           enum aarch64_insn_register Rn,
1650                           enum aarch64_insn_register Rd,
1651                           u8 lsb)
1652 {
1653         u32 insn;
1654
1655         insn = aarch64_insn_get_extr_value();
1656
1657         switch (variant) {
1658         case AARCH64_INSN_VARIANT_32BIT:
1659                 if (lsb > 31)
1660                         return AARCH64_BREAK_FAULT;
1661                 break;
1662         case AARCH64_INSN_VARIANT_64BIT:
1663                 if (lsb > 63)
1664                         return AARCH64_BREAK_FAULT;
1665                 insn |= AARCH64_INSN_SF_BIT;
1666                 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1667                 break;
1668         default:
1669                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1670                 return AARCH64_BREAK_FAULT;
1671         }
1672
1673         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1674         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1675         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1676         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1677 }