GNU Linux-libre 4.19.281-gnu1
[releases.git] / arch / arm64 / kernel / insn.c
1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/insn.h>
34 #include <asm/kprobes.h>
35
36 #define AARCH64_INSN_SF_BIT     BIT(31)
37 #define AARCH64_INSN_N_BIT      BIT(22)
38 #define AARCH64_INSN_LSL_12     BIT(22)
39
40 static int aarch64_insn_encoding_class[] = {
41         AARCH64_INSN_CLS_UNKNOWN,
42         AARCH64_INSN_CLS_UNKNOWN,
43         AARCH64_INSN_CLS_UNKNOWN,
44         AARCH64_INSN_CLS_UNKNOWN,
45         AARCH64_INSN_CLS_LDST,
46         AARCH64_INSN_CLS_DP_REG,
47         AARCH64_INSN_CLS_LDST,
48         AARCH64_INSN_CLS_DP_FPSIMD,
49         AARCH64_INSN_CLS_DP_IMM,
50         AARCH64_INSN_CLS_DP_IMM,
51         AARCH64_INSN_CLS_BR_SYS,
52         AARCH64_INSN_CLS_BR_SYS,
53         AARCH64_INSN_CLS_LDST,
54         AARCH64_INSN_CLS_DP_REG,
55         AARCH64_INSN_CLS_LDST,
56         AARCH64_INSN_CLS_DP_FPSIMD,
57 };
58
59 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
60 {
61         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
62 }
63
64 /* NOP is an alias of HINT */
65 bool __kprobes aarch64_insn_is_nop(u32 insn)
66 {
67         if (!aarch64_insn_is_hint(insn))
68                 return false;
69
70         switch (insn & 0xFE0) {
71         case AARCH64_INSN_HINT_YIELD:
72         case AARCH64_INSN_HINT_WFE:
73         case AARCH64_INSN_HINT_WFI:
74         case AARCH64_INSN_HINT_SEV:
75         case AARCH64_INSN_HINT_SEVL:
76                 return false;
77         default:
78                 return true;
79         }
80 }
81
82 bool aarch64_insn_is_branch_imm(u32 insn)
83 {
84         return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
85                 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
86                 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
87                 aarch64_insn_is_bcond(insn));
88 }
89
90 static DEFINE_RAW_SPINLOCK(patch_lock);
91
92 static void __kprobes *patch_map(void *addr, int fixmap)
93 {
94         unsigned long uintaddr = (uintptr_t) addr;
95         bool module = !core_kernel_text(uintaddr);
96         struct page *page;
97
98         if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
99                 page = vmalloc_to_page(addr);
100         else if (!module)
101                 page = phys_to_page(__pa_symbol(addr));
102         else
103                 return addr;
104
105         BUG_ON(!page);
106         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
107                         (uintaddr & ~PAGE_MASK));
108 }
109
110 static void __kprobes patch_unmap(int fixmap)
111 {
112         clear_fixmap(fixmap);
113 }
114 /*
115  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
116  * little-endian.
117  */
118 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
119 {
120         int ret;
121         __le32 val;
122
123         ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
124         if (!ret)
125                 *insnp = le32_to_cpu(val);
126
127         return ret;
128 }
129
130 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
131 {
132         void *waddr = addr;
133         unsigned long flags = 0;
134         int ret;
135
136         raw_spin_lock_irqsave(&patch_lock, flags);
137         waddr = patch_map(addr, FIX_TEXT_POKE0);
138
139         ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
140
141         patch_unmap(FIX_TEXT_POKE0);
142         raw_spin_unlock_irqrestore(&patch_lock, flags);
143
144         return ret;
145 }
146
147 int __kprobes aarch64_insn_write(void *addr, u32 insn)
148 {
149         return __aarch64_insn_write(addr, cpu_to_le32(insn));
150 }
151
152 bool __kprobes aarch64_insn_uses_literal(u32 insn)
153 {
154         /* ldr/ldrsw (literal), prfm */
155
156         return aarch64_insn_is_ldr_lit(insn) ||
157                 aarch64_insn_is_ldrsw_lit(insn) ||
158                 aarch64_insn_is_adr_adrp(insn) ||
159                 aarch64_insn_is_prfm_lit(insn);
160 }
161
162 bool __kprobes aarch64_insn_is_branch(u32 insn)
163 {
164         /* b, bl, cb*, tb*, b.cond, br, blr */
165
166         return aarch64_insn_is_b(insn) ||
167                 aarch64_insn_is_bl(insn) ||
168                 aarch64_insn_is_cbz(insn) ||
169                 aarch64_insn_is_cbnz(insn) ||
170                 aarch64_insn_is_tbz(insn) ||
171                 aarch64_insn_is_tbnz(insn) ||
172                 aarch64_insn_is_ret(insn) ||
173                 aarch64_insn_is_br(insn) ||
174                 aarch64_insn_is_blr(insn) ||
175                 aarch64_insn_is_bcond(insn);
176 }
177
178 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
179 {
180         u32 *tp = addr;
181         int ret;
182
183         /* A64 instructions must be word aligned */
184         if ((uintptr_t)tp & 0x3)
185                 return -EINVAL;
186
187         ret = aarch64_insn_write(tp, insn);
188         if (ret == 0)
189                 __flush_icache_range((uintptr_t)tp,
190                                      (uintptr_t)tp + AARCH64_INSN_SIZE);
191
192         return ret;
193 }
194
195 struct aarch64_insn_patch {
196         void            **text_addrs;
197         u32             *new_insns;
198         int             insn_cnt;
199         atomic_t        cpu_count;
200 };
201
202 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
203 {
204         int i, ret = 0;
205         struct aarch64_insn_patch *pp = arg;
206
207         /* The last CPU becomes master */
208         if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
209                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
210                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
211                                                              pp->new_insns[i]);
212                 /* Notify other processors with an additional increment. */
213                 atomic_inc(&pp->cpu_count);
214         } else {
215                 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
216                         cpu_relax();
217                 isb();
218         }
219
220         return ret;
221 }
222
223 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
224 {
225         struct aarch64_insn_patch patch = {
226                 .text_addrs = addrs,
227                 .new_insns = insns,
228                 .insn_cnt = cnt,
229                 .cpu_count = ATOMIC_INIT(0),
230         };
231
232         if (cnt <= 0)
233                 return -EINVAL;
234
235         return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
236                                        cpu_online_mask);
237 }
238
239 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
240                                                 u32 *maskp, int *shiftp)
241 {
242         u32 mask;
243         int shift;
244
245         switch (type) {
246         case AARCH64_INSN_IMM_26:
247                 mask = BIT(26) - 1;
248                 shift = 0;
249                 break;
250         case AARCH64_INSN_IMM_19:
251                 mask = BIT(19) - 1;
252                 shift = 5;
253                 break;
254         case AARCH64_INSN_IMM_16:
255                 mask = BIT(16) - 1;
256                 shift = 5;
257                 break;
258         case AARCH64_INSN_IMM_14:
259                 mask = BIT(14) - 1;
260                 shift = 5;
261                 break;
262         case AARCH64_INSN_IMM_12:
263                 mask = BIT(12) - 1;
264                 shift = 10;
265                 break;
266         case AARCH64_INSN_IMM_9:
267                 mask = BIT(9) - 1;
268                 shift = 12;
269                 break;
270         case AARCH64_INSN_IMM_7:
271                 mask = BIT(7) - 1;
272                 shift = 15;
273                 break;
274         case AARCH64_INSN_IMM_6:
275         case AARCH64_INSN_IMM_S:
276                 mask = BIT(6) - 1;
277                 shift = 10;
278                 break;
279         case AARCH64_INSN_IMM_R:
280                 mask = BIT(6) - 1;
281                 shift = 16;
282                 break;
283         case AARCH64_INSN_IMM_N:
284                 mask = 1;
285                 shift = 22;
286                 break;
287         default:
288                 return -EINVAL;
289         }
290
291         *maskp = mask;
292         *shiftp = shift;
293
294         return 0;
295 }
296
297 #define ADR_IMM_HILOSPLIT       2
298 #define ADR_IMM_SIZE            SZ_2M
299 #define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
300 #define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
301 #define ADR_IMM_LOSHIFT         29
302 #define ADR_IMM_HISHIFT         5
303
304 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
305 {
306         u32 immlo, immhi, mask;
307         int shift;
308
309         switch (type) {
310         case AARCH64_INSN_IMM_ADR:
311                 shift = 0;
312                 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
313                 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
314                 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
315                 mask = ADR_IMM_SIZE - 1;
316                 break;
317         default:
318                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
319                         pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
320                                type);
321                         return 0;
322                 }
323         }
324
325         return (insn >> shift) & mask;
326 }
327
328 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
329                                   u32 insn, u64 imm)
330 {
331         u32 immlo, immhi, mask;
332         int shift;
333
334         if (insn == AARCH64_BREAK_FAULT)
335                 return AARCH64_BREAK_FAULT;
336
337         switch (type) {
338         case AARCH64_INSN_IMM_ADR:
339                 shift = 0;
340                 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
341                 imm >>= ADR_IMM_HILOSPLIT;
342                 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
343                 imm = immlo | immhi;
344                 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
345                         (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
346                 break;
347         default:
348                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
349                         pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
350                                type);
351                         return AARCH64_BREAK_FAULT;
352                 }
353         }
354
355         /* Update the immediate field. */
356         insn &= ~(mask << shift);
357         insn |= (imm & mask) << shift;
358
359         return insn;
360 }
361
362 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
363                                         u32 insn)
364 {
365         int shift;
366
367         switch (type) {
368         case AARCH64_INSN_REGTYPE_RT:
369         case AARCH64_INSN_REGTYPE_RD:
370                 shift = 0;
371                 break;
372         case AARCH64_INSN_REGTYPE_RN:
373                 shift = 5;
374                 break;
375         case AARCH64_INSN_REGTYPE_RT2:
376         case AARCH64_INSN_REGTYPE_RA:
377                 shift = 10;
378                 break;
379         case AARCH64_INSN_REGTYPE_RM:
380                 shift = 16;
381                 break;
382         default:
383                 pr_err("%s: unknown register type encoding %d\n", __func__,
384                        type);
385                 return 0;
386         }
387
388         return (insn >> shift) & GENMASK(4, 0);
389 }
390
391 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
392                                         u32 insn,
393                                         enum aarch64_insn_register reg)
394 {
395         int shift;
396
397         if (insn == AARCH64_BREAK_FAULT)
398                 return AARCH64_BREAK_FAULT;
399
400         if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
401                 pr_err("%s: unknown register encoding %d\n", __func__, reg);
402                 return AARCH64_BREAK_FAULT;
403         }
404
405         switch (type) {
406         case AARCH64_INSN_REGTYPE_RT:
407         case AARCH64_INSN_REGTYPE_RD:
408                 shift = 0;
409                 break;
410         case AARCH64_INSN_REGTYPE_RN:
411                 shift = 5;
412                 break;
413         case AARCH64_INSN_REGTYPE_RT2:
414         case AARCH64_INSN_REGTYPE_RA:
415                 shift = 10;
416                 break;
417         case AARCH64_INSN_REGTYPE_RM:
418         case AARCH64_INSN_REGTYPE_RS:
419                 shift = 16;
420                 break;
421         default:
422                 pr_err("%s: unknown register type encoding %d\n", __func__,
423                        type);
424                 return AARCH64_BREAK_FAULT;
425         }
426
427         insn &= ~(GENMASK(4, 0) << shift);
428         insn |= reg << shift;
429
430         return insn;
431 }
432
433 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
434                                          u32 insn)
435 {
436         u32 size;
437
438         switch (type) {
439         case AARCH64_INSN_SIZE_8:
440                 size = 0;
441                 break;
442         case AARCH64_INSN_SIZE_16:
443                 size = 1;
444                 break;
445         case AARCH64_INSN_SIZE_32:
446                 size = 2;
447                 break;
448         case AARCH64_INSN_SIZE_64:
449                 size = 3;
450                 break;
451         default:
452                 pr_err("%s: unknown size encoding %d\n", __func__, type);
453                 return AARCH64_BREAK_FAULT;
454         }
455
456         insn &= ~GENMASK(31, 30);
457         insn |= size << 30;
458
459         return insn;
460 }
461
462 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
463                                      long range)
464 {
465         long offset;
466
467         if ((pc & 0x3) || (addr & 0x3)) {
468                 pr_err("%s: A64 instructions must be word aligned\n", __func__);
469                 return range;
470         }
471
472         offset = ((long)addr - (long)pc);
473
474         if (offset < -range || offset >= range) {
475                 pr_err("%s: offset out of range\n", __func__);
476                 return range;
477         }
478
479         return offset;
480 }
481
482 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
483                                           enum aarch64_insn_branch_type type)
484 {
485         u32 insn;
486         long offset;
487
488         /*
489          * B/BL support [-128M, 128M) offset
490          * ARM64 virtual address arrangement guarantees all kernel and module
491          * texts are within +/-128M.
492          */
493         offset = branch_imm_common(pc, addr, SZ_128M);
494         if (offset >= SZ_128M)
495                 return AARCH64_BREAK_FAULT;
496
497         switch (type) {
498         case AARCH64_INSN_BRANCH_LINK:
499                 insn = aarch64_insn_get_bl_value();
500                 break;
501         case AARCH64_INSN_BRANCH_NOLINK:
502                 insn = aarch64_insn_get_b_value();
503                 break;
504         default:
505                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
506                 return AARCH64_BREAK_FAULT;
507         }
508
509         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
510                                              offset >> 2);
511 }
512
513 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
514                                      enum aarch64_insn_register reg,
515                                      enum aarch64_insn_variant variant,
516                                      enum aarch64_insn_branch_type type)
517 {
518         u32 insn;
519         long offset;
520
521         offset = branch_imm_common(pc, addr, SZ_1M);
522         if (offset >= SZ_1M)
523                 return AARCH64_BREAK_FAULT;
524
525         switch (type) {
526         case AARCH64_INSN_BRANCH_COMP_ZERO:
527                 insn = aarch64_insn_get_cbz_value();
528                 break;
529         case AARCH64_INSN_BRANCH_COMP_NONZERO:
530                 insn = aarch64_insn_get_cbnz_value();
531                 break;
532         default:
533                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
534                 return AARCH64_BREAK_FAULT;
535         }
536
537         switch (variant) {
538         case AARCH64_INSN_VARIANT_32BIT:
539                 break;
540         case AARCH64_INSN_VARIANT_64BIT:
541                 insn |= AARCH64_INSN_SF_BIT;
542                 break;
543         default:
544                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
545                 return AARCH64_BREAK_FAULT;
546         }
547
548         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
549
550         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
551                                              offset >> 2);
552 }
553
554 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
555                                      enum aarch64_insn_condition cond)
556 {
557         u32 insn;
558         long offset;
559
560         offset = branch_imm_common(pc, addr, SZ_1M);
561
562         insn = aarch64_insn_get_bcond_value();
563
564         if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
565                 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
566                 return AARCH64_BREAK_FAULT;
567         }
568         insn |= cond;
569
570         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
571                                              offset >> 2);
572 }
573
574 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
575 {
576         return aarch64_insn_get_hint_value() | op;
577 }
578
579 u32 __kprobes aarch64_insn_gen_nop(void)
580 {
581         return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
582 }
583
584 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
585                                 enum aarch64_insn_branch_type type)
586 {
587         u32 insn;
588
589         switch (type) {
590         case AARCH64_INSN_BRANCH_NOLINK:
591                 insn = aarch64_insn_get_br_value();
592                 break;
593         case AARCH64_INSN_BRANCH_LINK:
594                 insn = aarch64_insn_get_blr_value();
595                 break;
596         case AARCH64_INSN_BRANCH_RETURN:
597                 insn = aarch64_insn_get_ret_value();
598                 break;
599         default:
600                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
601                 return AARCH64_BREAK_FAULT;
602         }
603
604         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
605 }
606
607 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
608                                     enum aarch64_insn_register base,
609                                     enum aarch64_insn_register offset,
610                                     enum aarch64_insn_size_type size,
611                                     enum aarch64_insn_ldst_type type)
612 {
613         u32 insn;
614
615         switch (type) {
616         case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
617                 insn = aarch64_insn_get_ldr_reg_value();
618                 break;
619         case AARCH64_INSN_LDST_STORE_REG_OFFSET:
620                 insn = aarch64_insn_get_str_reg_value();
621                 break;
622         default:
623                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
624                 return AARCH64_BREAK_FAULT;
625         }
626
627         insn = aarch64_insn_encode_ldst_size(size, insn);
628
629         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
630
631         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
632                                             base);
633
634         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
635                                             offset);
636 }
637
638 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
639                                      enum aarch64_insn_register reg2,
640                                      enum aarch64_insn_register base,
641                                      int offset,
642                                      enum aarch64_insn_variant variant,
643                                      enum aarch64_insn_ldst_type type)
644 {
645         u32 insn;
646         int shift;
647
648         switch (type) {
649         case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
650                 insn = aarch64_insn_get_ldp_pre_value();
651                 break;
652         case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
653                 insn = aarch64_insn_get_stp_pre_value();
654                 break;
655         case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
656                 insn = aarch64_insn_get_ldp_post_value();
657                 break;
658         case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
659                 insn = aarch64_insn_get_stp_post_value();
660                 break;
661         default:
662                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
663                 return AARCH64_BREAK_FAULT;
664         }
665
666         switch (variant) {
667         case AARCH64_INSN_VARIANT_32BIT:
668                 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
669                         pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
670                                __func__, offset);
671                         return AARCH64_BREAK_FAULT;
672                 }
673                 shift = 2;
674                 break;
675         case AARCH64_INSN_VARIANT_64BIT:
676                 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
677                         pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
678                                __func__, offset);
679                         return AARCH64_BREAK_FAULT;
680                 }
681                 shift = 3;
682                 insn |= AARCH64_INSN_SF_BIT;
683                 break;
684         default:
685                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
686                 return AARCH64_BREAK_FAULT;
687         }
688
689         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
690                                             reg1);
691
692         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
693                                             reg2);
694
695         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
696                                             base);
697
698         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
699                                              offset >> shift);
700 }
701
702 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
703                                    enum aarch64_insn_register base,
704                                    enum aarch64_insn_register state,
705                                    enum aarch64_insn_size_type size,
706                                    enum aarch64_insn_ldst_type type)
707 {
708         u32 insn;
709
710         switch (type) {
711         case AARCH64_INSN_LDST_LOAD_EX:
712                 insn = aarch64_insn_get_load_ex_value();
713                 break;
714         case AARCH64_INSN_LDST_STORE_EX:
715                 insn = aarch64_insn_get_store_ex_value();
716                 break;
717         default:
718                 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
719                 return AARCH64_BREAK_FAULT;
720         }
721
722         insn = aarch64_insn_encode_ldst_size(size, insn);
723
724         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
725                                             reg);
726
727         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
728                                             base);
729
730         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
731                                             AARCH64_INSN_REG_ZR);
732
733         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
734                                             state);
735 }
736
737 u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
738                            enum aarch64_insn_register address,
739                            enum aarch64_insn_register value,
740                            enum aarch64_insn_size_type size)
741 {
742         u32 insn = aarch64_insn_get_ldadd_value();
743
744         switch (size) {
745         case AARCH64_INSN_SIZE_32:
746         case AARCH64_INSN_SIZE_64:
747                 break;
748         default:
749                 pr_err("%s: unimplemented size encoding %d\n", __func__, size);
750                 return AARCH64_BREAK_FAULT;
751         }
752
753         insn = aarch64_insn_encode_ldst_size(size, insn);
754
755         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
756                                             result);
757
758         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
759                                             address);
760
761         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
762                                             value);
763 }
764
765 u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
766                            enum aarch64_insn_register value,
767                            enum aarch64_insn_size_type size)
768 {
769         /*
770          * STADD is simply encoded as an alias for LDADD with XZR as
771          * the destination register.
772          */
773         return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
774                                       value, size);
775 }
776
777 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
778                                         enum aarch64_insn_prfm_target target,
779                                         enum aarch64_insn_prfm_policy policy,
780                                         u32 insn)
781 {
782         u32 imm_type = 0, imm_target = 0, imm_policy = 0;
783
784         switch (type) {
785         case AARCH64_INSN_PRFM_TYPE_PLD:
786                 break;
787         case AARCH64_INSN_PRFM_TYPE_PLI:
788                 imm_type = BIT(0);
789                 break;
790         case AARCH64_INSN_PRFM_TYPE_PST:
791                 imm_type = BIT(1);
792                 break;
793         default:
794                 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
795                 return AARCH64_BREAK_FAULT;
796         }
797
798         switch (target) {
799         case AARCH64_INSN_PRFM_TARGET_L1:
800                 break;
801         case AARCH64_INSN_PRFM_TARGET_L2:
802                 imm_target = BIT(0);
803                 break;
804         case AARCH64_INSN_PRFM_TARGET_L3:
805                 imm_target = BIT(1);
806                 break;
807         default:
808                 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
809                 return AARCH64_BREAK_FAULT;
810         }
811
812         switch (policy) {
813         case AARCH64_INSN_PRFM_POLICY_KEEP:
814                 break;
815         case AARCH64_INSN_PRFM_POLICY_STRM:
816                 imm_policy = BIT(0);
817                 break;
818         default:
819                 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
820                 return AARCH64_BREAK_FAULT;
821         }
822
823         /* In this case, imm5 is encoded into Rt field. */
824         insn &= ~GENMASK(4, 0);
825         insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
826
827         return insn;
828 }
829
830 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
831                               enum aarch64_insn_prfm_type type,
832                               enum aarch64_insn_prfm_target target,
833                               enum aarch64_insn_prfm_policy policy)
834 {
835         u32 insn = aarch64_insn_get_prfm_value();
836
837         insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
838
839         insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
840
841         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
842                                             base);
843
844         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
845 }
846
847 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
848                                  enum aarch64_insn_register src,
849                                  int imm, enum aarch64_insn_variant variant,
850                                  enum aarch64_insn_adsb_type type)
851 {
852         u32 insn;
853
854         switch (type) {
855         case AARCH64_INSN_ADSB_ADD:
856                 insn = aarch64_insn_get_add_imm_value();
857                 break;
858         case AARCH64_INSN_ADSB_SUB:
859                 insn = aarch64_insn_get_sub_imm_value();
860                 break;
861         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
862                 insn = aarch64_insn_get_adds_imm_value();
863                 break;
864         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
865                 insn = aarch64_insn_get_subs_imm_value();
866                 break;
867         default:
868                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
869                 return AARCH64_BREAK_FAULT;
870         }
871
872         switch (variant) {
873         case AARCH64_INSN_VARIANT_32BIT:
874                 break;
875         case AARCH64_INSN_VARIANT_64BIT:
876                 insn |= AARCH64_INSN_SF_BIT;
877                 break;
878         default:
879                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
880                 return AARCH64_BREAK_FAULT;
881         }
882
883         /* We can't encode more than a 24bit value (12bit + 12bit shift) */
884         if (imm & ~(BIT(24) - 1))
885                 goto out;
886
887         /* If we have something in the top 12 bits... */
888         if (imm & ~(SZ_4K - 1)) {
889                 /* ... and in the low 12 bits -> error */
890                 if (imm & (SZ_4K - 1))
891                         goto out;
892
893                 imm >>= 12;
894                 insn |= AARCH64_INSN_LSL_12;
895         }
896
897         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
898
899         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
900
901         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
902
903 out:
904         pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
905         return AARCH64_BREAK_FAULT;
906 }
907
908 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
909                               enum aarch64_insn_register src,
910                               int immr, int imms,
911                               enum aarch64_insn_variant variant,
912                               enum aarch64_insn_bitfield_type type)
913 {
914         u32 insn;
915         u32 mask;
916
917         switch (type) {
918         case AARCH64_INSN_BITFIELD_MOVE:
919                 insn = aarch64_insn_get_bfm_value();
920                 break;
921         case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
922                 insn = aarch64_insn_get_ubfm_value();
923                 break;
924         case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
925                 insn = aarch64_insn_get_sbfm_value();
926                 break;
927         default:
928                 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
929                 return AARCH64_BREAK_FAULT;
930         }
931
932         switch (variant) {
933         case AARCH64_INSN_VARIANT_32BIT:
934                 mask = GENMASK(4, 0);
935                 break;
936         case AARCH64_INSN_VARIANT_64BIT:
937                 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
938                 mask = GENMASK(5, 0);
939                 break;
940         default:
941                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
942                 return AARCH64_BREAK_FAULT;
943         }
944
945         if (immr & ~mask) {
946                 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
947                 return AARCH64_BREAK_FAULT;
948         }
949         if (imms & ~mask) {
950                 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
951                 return AARCH64_BREAK_FAULT;
952         }
953
954         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
955
956         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
957
958         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
959
960         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
961 }
962
963 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
964                               int imm, int shift,
965                               enum aarch64_insn_variant variant,
966                               enum aarch64_insn_movewide_type type)
967 {
968         u32 insn;
969
970         switch (type) {
971         case AARCH64_INSN_MOVEWIDE_ZERO:
972                 insn = aarch64_insn_get_movz_value();
973                 break;
974         case AARCH64_INSN_MOVEWIDE_KEEP:
975                 insn = aarch64_insn_get_movk_value();
976                 break;
977         case AARCH64_INSN_MOVEWIDE_INVERSE:
978                 insn = aarch64_insn_get_movn_value();
979                 break;
980         default:
981                 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
982                 return AARCH64_BREAK_FAULT;
983         }
984
985         if (imm & ~(SZ_64K - 1)) {
986                 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
987                 return AARCH64_BREAK_FAULT;
988         }
989
990         switch (variant) {
991         case AARCH64_INSN_VARIANT_32BIT:
992                 if (shift != 0 && shift != 16) {
993                         pr_err("%s: invalid shift encoding %d\n", __func__,
994                                shift);
995                         return AARCH64_BREAK_FAULT;
996                 }
997                 break;
998         case AARCH64_INSN_VARIANT_64BIT:
999                 insn |= AARCH64_INSN_SF_BIT;
1000                 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1001                         pr_err("%s: invalid shift encoding %d\n", __func__,
1002                                shift);
1003                         return AARCH64_BREAK_FAULT;
1004                 }
1005                 break;
1006         default:
1007                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1008                 return AARCH64_BREAK_FAULT;
1009         }
1010
1011         insn |= (shift >> 4) << 21;
1012
1013         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1014
1015         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1016 }
1017
1018 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1019                                          enum aarch64_insn_register src,
1020                                          enum aarch64_insn_register reg,
1021                                          int shift,
1022                                          enum aarch64_insn_variant variant,
1023                                          enum aarch64_insn_adsb_type type)
1024 {
1025         u32 insn;
1026
1027         switch (type) {
1028         case AARCH64_INSN_ADSB_ADD:
1029                 insn = aarch64_insn_get_add_value();
1030                 break;
1031         case AARCH64_INSN_ADSB_SUB:
1032                 insn = aarch64_insn_get_sub_value();
1033                 break;
1034         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1035                 insn = aarch64_insn_get_adds_value();
1036                 break;
1037         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1038                 insn = aarch64_insn_get_subs_value();
1039                 break;
1040         default:
1041                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1042                 return AARCH64_BREAK_FAULT;
1043         }
1044
1045         switch (variant) {
1046         case AARCH64_INSN_VARIANT_32BIT:
1047                 if (shift & ~(SZ_32 - 1)) {
1048                         pr_err("%s: invalid shift encoding %d\n", __func__,
1049                                shift);
1050                         return AARCH64_BREAK_FAULT;
1051                 }
1052                 break;
1053         case AARCH64_INSN_VARIANT_64BIT:
1054                 insn |= AARCH64_INSN_SF_BIT;
1055                 if (shift & ~(SZ_64 - 1)) {
1056                         pr_err("%s: invalid shift encoding %d\n", __func__,
1057                                shift);
1058                         return AARCH64_BREAK_FAULT;
1059                 }
1060                 break;
1061         default:
1062                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1063                 return AARCH64_BREAK_FAULT;
1064         }
1065
1066
1067         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1068
1069         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1070
1071         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1072
1073         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1074 }
1075
1076 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1077                            enum aarch64_insn_register src,
1078                            enum aarch64_insn_variant variant,
1079                            enum aarch64_insn_data1_type type)
1080 {
1081         u32 insn;
1082
1083         switch (type) {
1084         case AARCH64_INSN_DATA1_REVERSE_16:
1085                 insn = aarch64_insn_get_rev16_value();
1086                 break;
1087         case AARCH64_INSN_DATA1_REVERSE_32:
1088                 insn = aarch64_insn_get_rev32_value();
1089                 break;
1090         case AARCH64_INSN_DATA1_REVERSE_64:
1091                 if (variant != AARCH64_INSN_VARIANT_64BIT) {
1092                         pr_err("%s: invalid variant for reverse64 %d\n",
1093                                __func__, variant);
1094                         return AARCH64_BREAK_FAULT;
1095                 }
1096                 insn = aarch64_insn_get_rev64_value();
1097                 break;
1098         default:
1099                 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1100                 return AARCH64_BREAK_FAULT;
1101         }
1102
1103         switch (variant) {
1104         case AARCH64_INSN_VARIANT_32BIT:
1105                 break;
1106         case AARCH64_INSN_VARIANT_64BIT:
1107                 insn |= AARCH64_INSN_SF_BIT;
1108                 break;
1109         default:
1110                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1111                 return AARCH64_BREAK_FAULT;
1112         }
1113
1114         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1115
1116         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1117 }
1118
1119 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1120                            enum aarch64_insn_register src,
1121                            enum aarch64_insn_register reg,
1122                            enum aarch64_insn_variant variant,
1123                            enum aarch64_insn_data2_type type)
1124 {
1125         u32 insn;
1126
1127         switch (type) {
1128         case AARCH64_INSN_DATA2_UDIV:
1129                 insn = aarch64_insn_get_udiv_value();
1130                 break;
1131         case AARCH64_INSN_DATA2_SDIV:
1132                 insn = aarch64_insn_get_sdiv_value();
1133                 break;
1134         case AARCH64_INSN_DATA2_LSLV:
1135                 insn = aarch64_insn_get_lslv_value();
1136                 break;
1137         case AARCH64_INSN_DATA2_LSRV:
1138                 insn = aarch64_insn_get_lsrv_value();
1139                 break;
1140         case AARCH64_INSN_DATA2_ASRV:
1141                 insn = aarch64_insn_get_asrv_value();
1142                 break;
1143         case AARCH64_INSN_DATA2_RORV:
1144                 insn = aarch64_insn_get_rorv_value();
1145                 break;
1146         default:
1147                 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1148                 return AARCH64_BREAK_FAULT;
1149         }
1150
1151         switch (variant) {
1152         case AARCH64_INSN_VARIANT_32BIT:
1153                 break;
1154         case AARCH64_INSN_VARIANT_64BIT:
1155                 insn |= AARCH64_INSN_SF_BIT;
1156                 break;
1157         default:
1158                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1159                 return AARCH64_BREAK_FAULT;
1160         }
1161
1162         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1163
1164         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1165
1166         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1167 }
1168
1169 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1170                            enum aarch64_insn_register src,
1171                            enum aarch64_insn_register reg1,
1172                            enum aarch64_insn_register reg2,
1173                            enum aarch64_insn_variant variant,
1174                            enum aarch64_insn_data3_type type)
1175 {
1176         u32 insn;
1177
1178         switch (type) {
1179         case AARCH64_INSN_DATA3_MADD:
1180                 insn = aarch64_insn_get_madd_value();
1181                 break;
1182         case AARCH64_INSN_DATA3_MSUB:
1183                 insn = aarch64_insn_get_msub_value();
1184                 break;
1185         default:
1186                 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1187                 return AARCH64_BREAK_FAULT;
1188         }
1189
1190         switch (variant) {
1191         case AARCH64_INSN_VARIANT_32BIT:
1192                 break;
1193         case AARCH64_INSN_VARIANT_64BIT:
1194                 insn |= AARCH64_INSN_SF_BIT;
1195                 break;
1196         default:
1197                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1198                 return AARCH64_BREAK_FAULT;
1199         }
1200
1201         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1202
1203         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1204
1205         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1206                                             reg1);
1207
1208         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1209                                             reg2);
1210 }
1211
1212 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1213                                          enum aarch64_insn_register src,
1214                                          enum aarch64_insn_register reg,
1215                                          int shift,
1216                                          enum aarch64_insn_variant variant,
1217                                          enum aarch64_insn_logic_type type)
1218 {
1219         u32 insn;
1220
1221         switch (type) {
1222         case AARCH64_INSN_LOGIC_AND:
1223                 insn = aarch64_insn_get_and_value();
1224                 break;
1225         case AARCH64_INSN_LOGIC_BIC:
1226                 insn = aarch64_insn_get_bic_value();
1227                 break;
1228         case AARCH64_INSN_LOGIC_ORR:
1229                 insn = aarch64_insn_get_orr_value();
1230                 break;
1231         case AARCH64_INSN_LOGIC_ORN:
1232                 insn = aarch64_insn_get_orn_value();
1233                 break;
1234         case AARCH64_INSN_LOGIC_EOR:
1235                 insn = aarch64_insn_get_eor_value();
1236                 break;
1237         case AARCH64_INSN_LOGIC_EON:
1238                 insn = aarch64_insn_get_eon_value();
1239                 break;
1240         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1241                 insn = aarch64_insn_get_ands_value();
1242                 break;
1243         case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1244                 insn = aarch64_insn_get_bics_value();
1245                 break;
1246         default:
1247                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1248                 return AARCH64_BREAK_FAULT;
1249         }
1250
1251         switch (variant) {
1252         case AARCH64_INSN_VARIANT_32BIT:
1253                 if (shift & ~(SZ_32 - 1)) {
1254                         pr_err("%s: invalid shift encoding %d\n", __func__,
1255                                shift);
1256                         return AARCH64_BREAK_FAULT;
1257                 }
1258                 break;
1259         case AARCH64_INSN_VARIANT_64BIT:
1260                 insn |= AARCH64_INSN_SF_BIT;
1261                 if (shift & ~(SZ_64 - 1)) {
1262                         pr_err("%s: invalid shift encoding %d\n", __func__,
1263                                shift);
1264                         return AARCH64_BREAK_FAULT;
1265                 }
1266                 break;
1267         default:
1268                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1269                 return AARCH64_BREAK_FAULT;
1270         }
1271
1272
1273         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1274
1275         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1276
1277         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1278
1279         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1280 }
1281
1282 /*
1283  * Decode the imm field of a branch, and return the byte offset as a
1284  * signed value (so it can be used when computing a new branch
1285  * target).
1286  */
1287 s32 aarch64_get_branch_offset(u32 insn)
1288 {
1289         s32 imm;
1290
1291         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1292                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1293                 return (imm << 6) >> 4;
1294         }
1295
1296         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1297             aarch64_insn_is_bcond(insn)) {
1298                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1299                 return (imm << 13) >> 11;
1300         }
1301
1302         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1303                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1304                 return (imm << 18) >> 16;
1305         }
1306
1307         /* Unhandled instruction */
1308         BUG();
1309 }
1310
1311 /*
1312  * Encode the displacement of a branch in the imm field and return the
1313  * updated instruction.
1314  */
1315 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1316 {
1317         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1318                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1319                                                      offset >> 2);
1320
1321         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1322             aarch64_insn_is_bcond(insn))
1323                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1324                                                      offset >> 2);
1325
1326         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1327                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1328                                                      offset >> 2);
1329
1330         /* Unhandled instruction */
1331         BUG();
1332 }
1333
1334 s32 aarch64_insn_adrp_get_offset(u32 insn)
1335 {
1336         BUG_ON(!aarch64_insn_is_adrp(insn));
1337         return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1338 }
1339
1340 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1341 {
1342         BUG_ON(!aarch64_insn_is_adrp(insn));
1343         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1344                                                 offset >> 12);
1345 }
1346
1347 /*
1348  * Extract the Op/CR data from a msr/mrs instruction.
1349  */
1350 u32 aarch64_insn_extract_system_reg(u32 insn)
1351 {
1352         return (insn & 0x1FFFE0) >> 5;
1353 }
1354
1355 bool aarch32_insn_is_wide(u32 insn)
1356 {
1357         return insn >= 0xe800;
1358 }
1359
1360 /*
1361  * Macros/defines for extracting register numbers from instruction.
1362  */
1363 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1364 {
1365         return (insn & (0xf << offset)) >> offset;
1366 }
1367
1368 #define OPC2_MASK       0x7
1369 #define OPC2_OFFSET     5
1370 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1371 {
1372         return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1373 }
1374
1375 #define CRM_MASK        0xf
1376 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1377 {
1378         return insn & CRM_MASK;
1379 }
1380
1381 static bool __kprobes __check_eq(unsigned long pstate)
1382 {
1383         return (pstate & PSR_Z_BIT) != 0;
1384 }
1385
1386 static bool __kprobes __check_ne(unsigned long pstate)
1387 {
1388         return (pstate & PSR_Z_BIT) == 0;
1389 }
1390
1391 static bool __kprobes __check_cs(unsigned long pstate)
1392 {
1393         return (pstate & PSR_C_BIT) != 0;
1394 }
1395
1396 static bool __kprobes __check_cc(unsigned long pstate)
1397 {
1398         return (pstate & PSR_C_BIT) == 0;
1399 }
1400
1401 static bool __kprobes __check_mi(unsigned long pstate)
1402 {
1403         return (pstate & PSR_N_BIT) != 0;
1404 }
1405
1406 static bool __kprobes __check_pl(unsigned long pstate)
1407 {
1408         return (pstate & PSR_N_BIT) == 0;
1409 }
1410
1411 static bool __kprobes __check_vs(unsigned long pstate)
1412 {
1413         return (pstate & PSR_V_BIT) != 0;
1414 }
1415
1416 static bool __kprobes __check_vc(unsigned long pstate)
1417 {
1418         return (pstate & PSR_V_BIT) == 0;
1419 }
1420
1421 static bool __kprobes __check_hi(unsigned long pstate)
1422 {
1423         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1424         return (pstate & PSR_C_BIT) != 0;
1425 }
1426
1427 static bool __kprobes __check_ls(unsigned long pstate)
1428 {
1429         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1430         return (pstate & PSR_C_BIT) == 0;
1431 }
1432
1433 static bool __kprobes __check_ge(unsigned long pstate)
1434 {
1435         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1436         return (pstate & PSR_N_BIT) == 0;
1437 }
1438
1439 static bool __kprobes __check_lt(unsigned long pstate)
1440 {
1441         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1442         return (pstate & PSR_N_BIT) != 0;
1443 }
1444
1445 static bool __kprobes __check_gt(unsigned long pstate)
1446 {
1447         /*PSR_N_BIT ^= PSR_V_BIT */
1448         unsigned long temp = pstate ^ (pstate << 3);
1449
1450         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1451         return (temp & PSR_N_BIT) == 0;
1452 }
1453
1454 static bool __kprobes __check_le(unsigned long pstate)
1455 {
1456         /*PSR_N_BIT ^= PSR_V_BIT */
1457         unsigned long temp = pstate ^ (pstate << 3);
1458
1459         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1460         return (temp & PSR_N_BIT) != 0;
1461 }
1462
1463 static bool __kprobes __check_al(unsigned long pstate)
1464 {
1465         return true;
1466 }
1467
1468 /*
1469  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1470  * it behaves identically to 0b1110 ("al").
1471  */
1472 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1473         __check_eq, __check_ne, __check_cs, __check_cc,
1474         __check_mi, __check_pl, __check_vs, __check_vc,
1475         __check_hi, __check_ls, __check_ge, __check_lt,
1476         __check_gt, __check_le, __check_al, __check_al
1477 };
1478
1479 static bool range_of_ones(u64 val)
1480 {
1481         /* Doesn't handle full ones or full zeroes */
1482         u64 sval = val >> __ffs64(val);
1483
1484         /* One of Sean Eron Anderson's bithack tricks */
1485         return ((sval + 1) & (sval)) == 0;
1486 }
1487
1488 static u32 aarch64_encode_immediate(u64 imm,
1489                                     enum aarch64_insn_variant variant,
1490                                     u32 insn)
1491 {
1492         unsigned int immr, imms, n, ones, ror, esz, tmp;
1493         u64 mask;
1494
1495         switch (variant) {
1496         case AARCH64_INSN_VARIANT_32BIT:
1497                 esz = 32;
1498                 break;
1499         case AARCH64_INSN_VARIANT_64BIT:
1500                 insn |= AARCH64_INSN_SF_BIT;
1501                 esz = 64;
1502                 break;
1503         default:
1504                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1505                 return AARCH64_BREAK_FAULT;
1506         }
1507
1508         mask = GENMASK(esz - 1, 0);
1509
1510         /* Can't encode full zeroes, full ones, or value wider than the mask */
1511         if (!imm || imm == mask || imm & ~mask)
1512                 return AARCH64_BREAK_FAULT;
1513
1514         /*
1515          * Inverse of Replicate(). Try to spot a repeating pattern
1516          * with a pow2 stride.
1517          */
1518         for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1519                 u64 emask = BIT(tmp) - 1;
1520
1521                 if ((imm & emask) != ((imm >> tmp) & emask))
1522                         break;
1523
1524                 esz = tmp;
1525                 mask = emask;
1526         }
1527
1528         /* N is only set if we're encoding a 64bit value */
1529         n = esz == 64;
1530
1531         /* Trim imm to the element size */
1532         imm &= mask;
1533
1534         /* That's how many ones we need to encode */
1535         ones = hweight64(imm);
1536
1537         /*
1538          * imms is set to (ones - 1), prefixed with a string of ones
1539          * and a zero if they fit. Cap it to 6 bits.
1540          */
1541         imms  = ones - 1;
1542         imms |= 0xf << ffs(esz);
1543         imms &= BIT(6) - 1;
1544
1545         /* Compute the rotation */
1546         if (range_of_ones(imm)) {
1547                 /*
1548                  * Pattern: 0..01..10..0
1549                  *
1550                  * Compute how many rotate we need to align it right
1551                  */
1552                 ror = __ffs64(imm);
1553         } else {
1554                 /*
1555                  * Pattern: 0..01..10..01..1
1556                  *
1557                  * Fill the unused top bits with ones, and check if
1558                  * the result is a valid immediate (all ones with a
1559                  * contiguous ranges of zeroes).
1560                  */
1561                 imm |= ~mask;
1562                 if (!range_of_ones(~imm))
1563                         return AARCH64_BREAK_FAULT;
1564
1565                 /*
1566                  * Compute the rotation to get a continuous set of
1567                  * ones, with the first bit set at position 0
1568                  */
1569                 ror = fls(~imm);
1570         }
1571
1572         /*
1573          * immr is the number of bits we need to rotate back to the
1574          * original set of ones. Note that this is relative to the
1575          * element size...
1576          */
1577         immr = (esz - ror) % esz;
1578
1579         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1580         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1581         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1582 }
1583
1584 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1585                                        enum aarch64_insn_variant variant,
1586                                        enum aarch64_insn_register Rn,
1587                                        enum aarch64_insn_register Rd,
1588                                        u64 imm)
1589 {
1590         u32 insn;
1591
1592         switch (type) {
1593         case AARCH64_INSN_LOGIC_AND:
1594                 insn = aarch64_insn_get_and_imm_value();
1595                 break;
1596         case AARCH64_INSN_LOGIC_ORR:
1597                 insn = aarch64_insn_get_orr_imm_value();
1598                 break;
1599         case AARCH64_INSN_LOGIC_EOR:
1600                 insn = aarch64_insn_get_eor_imm_value();
1601                 break;
1602         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1603                 insn = aarch64_insn_get_ands_imm_value();
1604                 break;
1605         default:
1606                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1607                 return AARCH64_BREAK_FAULT;
1608         }
1609
1610         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1611         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1612         return aarch64_encode_immediate(imm, variant, insn);
1613 }
1614
1615 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1616                           enum aarch64_insn_register Rm,
1617                           enum aarch64_insn_register Rn,
1618                           enum aarch64_insn_register Rd,
1619                           u8 lsb)
1620 {
1621         u32 insn;
1622
1623         insn = aarch64_insn_get_extr_value();
1624
1625         switch (variant) {
1626         case AARCH64_INSN_VARIANT_32BIT:
1627                 if (lsb > 31)
1628                         return AARCH64_BREAK_FAULT;
1629                 break;
1630         case AARCH64_INSN_VARIANT_64BIT:
1631                 if (lsb > 63)
1632                         return AARCH64_BREAK_FAULT;
1633                 insn |= AARCH64_INSN_SF_BIT;
1634                 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1635                 break;
1636         default:
1637                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1638                 return AARCH64_BREAK_FAULT;
1639         }
1640
1641         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1642         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1643         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1644         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1645 }