GNU Linux-libre 5.15.72-gnu
[releases.git] / arch / arm64 / kernel / armv8_deprecated.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2014 ARM Limited
4  */
5
6 #include <linux/cpu.h>
7 #include <linux/init.h>
8 #include <linux/list.h>
9 #include <linux/perf_event.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/sysctl.h>
13 #include <linux/uaccess.h>
14
15 #include <asm/cpufeature.h>
16 #include <asm/insn.h>
17 #include <asm/sysreg.h>
18 #include <asm/system_misc.h>
19 #include <asm/traps.h>
20 #include <asm/kprobes.h>
21
22 #define CREATE_TRACE_POINTS
23 #include "trace-events-emulation.h"
24
25 /*
26  * The runtime support for deprecated instruction support can be in one of
27  * following three states -
28  *
29  * 0 = undef
30  * 1 = emulate (software emulation)
31  * 2 = hw (supported in hardware)
32  */
33 enum insn_emulation_mode {
34         INSN_UNDEF,
35         INSN_EMULATE,
36         INSN_HW,
37 };
38
39 enum legacy_insn_status {
40         INSN_DEPRECATED,
41         INSN_OBSOLETE,
42 };
43
44 struct insn_emulation_ops {
45         const char              *name;
46         enum legacy_insn_status status;
47         struct undef_hook       *hooks;
48         int                     (*set_hw_mode)(bool enable);
49 };
50
51 struct insn_emulation {
52         struct list_head node;
53         struct insn_emulation_ops *ops;
54         int current_mode;
55         int min;
56         int max;
57 };
58
59 static LIST_HEAD(insn_emulation);
60 static int nr_insn_emulated __initdata;
61 static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
62 static DEFINE_MUTEX(insn_emulation_mutex);
63
64 static void register_emulation_hooks(struct insn_emulation_ops *ops)
65 {
66         struct undef_hook *hook;
67
68         BUG_ON(!ops->hooks);
69
70         for (hook = ops->hooks; hook->instr_mask; hook++)
71                 register_undef_hook(hook);
72
73         pr_notice("Registered %s emulation handler\n", ops->name);
74 }
75
76 static void remove_emulation_hooks(struct insn_emulation_ops *ops)
77 {
78         struct undef_hook *hook;
79
80         BUG_ON(!ops->hooks);
81
82         for (hook = ops->hooks; hook->instr_mask; hook++)
83                 unregister_undef_hook(hook);
84
85         pr_notice("Removed %s emulation handler\n", ops->name);
86 }
87
88 static void enable_insn_hw_mode(void *data)
89 {
90         struct insn_emulation *insn = (struct insn_emulation *)data;
91         if (insn->ops->set_hw_mode)
92                 insn->ops->set_hw_mode(true);
93 }
94
95 static void disable_insn_hw_mode(void *data)
96 {
97         struct insn_emulation *insn = (struct insn_emulation *)data;
98         if (insn->ops->set_hw_mode)
99                 insn->ops->set_hw_mode(false);
100 }
101
102 /* Run set_hw_mode(mode) on all active CPUs */
103 static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
104 {
105         if (!insn->ops->set_hw_mode)
106                 return -EINVAL;
107         if (enable)
108                 on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
109         else
110                 on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
111         return 0;
112 }
113
114 /*
115  * Run set_hw_mode for all insns on a starting CPU.
116  * Returns:
117  *  0           - If all the hooks ran successfully.
118  * -EINVAL      - At least one hook is not supported by the CPU.
119  */
120 static int run_all_insn_set_hw_mode(unsigned int cpu)
121 {
122         int rc = 0;
123         unsigned long flags;
124         struct insn_emulation *insn;
125
126         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
127         list_for_each_entry(insn, &insn_emulation, node) {
128                 bool enable = (insn->current_mode == INSN_HW);
129                 if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
130                         pr_warn("CPU[%u] cannot support the emulation of %s",
131                                 cpu, insn->ops->name);
132                         rc = -EINVAL;
133                 }
134         }
135         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
136         return rc;
137 }
138
139 static int update_insn_emulation_mode(struct insn_emulation *insn,
140                                        enum insn_emulation_mode prev)
141 {
142         int ret = 0;
143
144         switch (prev) {
145         case INSN_UNDEF: /* Nothing to be done */
146                 break;
147         case INSN_EMULATE:
148                 remove_emulation_hooks(insn->ops);
149                 break;
150         case INSN_HW:
151                 if (!run_all_cpu_set_hw_mode(insn, false))
152                         pr_notice("Disabled %s support\n", insn->ops->name);
153                 break;
154         }
155
156         switch (insn->current_mode) {
157         case INSN_UNDEF:
158                 break;
159         case INSN_EMULATE:
160                 register_emulation_hooks(insn->ops);
161                 break;
162         case INSN_HW:
163                 ret = run_all_cpu_set_hw_mode(insn, true);
164                 if (!ret)
165                         pr_notice("Enabled %s support\n", insn->ops->name);
166                 break;
167         }
168
169         return ret;
170 }
171
172 static void __init register_insn_emulation(struct insn_emulation_ops *ops)
173 {
174         unsigned long flags;
175         struct insn_emulation *insn;
176
177         insn = kzalloc(sizeof(*insn), GFP_KERNEL);
178         if (!insn)
179                 return;
180
181         insn->ops = ops;
182         insn->min = INSN_UNDEF;
183
184         switch (ops->status) {
185         case INSN_DEPRECATED:
186                 insn->current_mode = INSN_EMULATE;
187                 /* Disable the HW mode if it was turned on at early boot time */
188                 run_all_cpu_set_hw_mode(insn, false);
189                 insn->max = INSN_HW;
190                 break;
191         case INSN_OBSOLETE:
192                 insn->current_mode = INSN_UNDEF;
193                 insn->max = INSN_EMULATE;
194                 break;
195         }
196
197         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
198         list_add(&insn->node, &insn_emulation);
199         nr_insn_emulated++;
200         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
201
202         /* Register any handlers if required */
203         update_insn_emulation_mode(insn, INSN_UNDEF);
204 }
205
206 static int emulation_proc_handler(struct ctl_table *table, int write,
207                                   void *buffer, size_t *lenp,
208                                   loff_t *ppos)
209 {
210         int ret = 0;
211         struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode);
212         enum insn_emulation_mode prev_mode = insn->current_mode;
213
214         mutex_lock(&insn_emulation_mutex);
215         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
216
217         if (ret || !write || prev_mode == insn->current_mode)
218                 goto ret;
219
220         ret = update_insn_emulation_mode(insn, prev_mode);
221         if (ret) {
222                 /* Mode change failed, revert to previous mode. */
223                 insn->current_mode = prev_mode;
224                 update_insn_emulation_mode(insn, INSN_UNDEF);
225         }
226 ret:
227         mutex_unlock(&insn_emulation_mutex);
228         return ret;
229 }
230
231 static void __init register_insn_emulation_sysctl(void)
232 {
233         unsigned long flags;
234         int i = 0;
235         struct insn_emulation *insn;
236         struct ctl_table *insns_sysctl, *sysctl;
237
238         insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
239                                GFP_KERNEL);
240         if (!insns_sysctl)
241                 return;
242
243         raw_spin_lock_irqsave(&insn_emulation_lock, flags);
244         list_for_each_entry(insn, &insn_emulation, node) {
245                 sysctl = &insns_sysctl[i];
246
247                 sysctl->mode = 0644;
248                 sysctl->maxlen = sizeof(int);
249
250                 sysctl->procname = insn->ops->name;
251                 sysctl->data = &insn->current_mode;
252                 sysctl->extra1 = &insn->min;
253                 sysctl->extra2 = &insn->max;
254                 sysctl->proc_handler = emulation_proc_handler;
255                 i++;
256         }
257         raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
258
259         register_sysctl("abi", insns_sysctl);
260 }
261
262 /*
263  *  Implement emulation of the SWP/SWPB instructions using load-exclusive and
264  *  store-exclusive.
265  *
266  *  Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
267  *  Where: Rt  = destination
268  *         Rt2 = source
269  *         Rn  = address
270  */
271
272 /*
273  * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
274  */
275
276 /* Arbitrary constant to ensure forward-progress of the LL/SC loop */
277 #define __SWP_LL_SC_LOOPS       4
278
279 #define __user_swpX_asm(data, addr, res, temp, temp2, B)        \
280 do {                                                            \
281         uaccess_enable_privileged();                            \
282         __asm__ __volatile__(                                   \
283         "       mov             %w3, %w7\n"                     \
284         "0:     ldxr"B"         %w2, [%4]\n"                    \
285         "1:     stxr"B"         %w0, %w1, [%4]\n"               \
286         "       cbz             %w0, 2f\n"                      \
287         "       sub             %w3, %w3, #1\n"                 \
288         "       cbnz            %w3, 0b\n"                      \
289         "       mov             %w0, %w5\n"                     \
290         "       b               3f\n"                           \
291         "2:\n"                                                  \
292         "       mov             %w1, %w2\n"                     \
293         "3:\n"                                                  \
294         "       .pushsection     .fixup,\"ax\"\n"               \
295         "       .align          2\n"                            \
296         "4:     mov             %w0, %w6\n"                     \
297         "       b               3b\n"                           \
298         "       .popsection"                                    \
299         _ASM_EXTABLE(0b, 4b)                                    \
300         _ASM_EXTABLE(1b, 4b)                                    \
301         : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
302         : "r" ((unsigned long)addr), "i" (-EAGAIN),             \
303           "i" (-EFAULT),                                        \
304           "i" (__SWP_LL_SC_LOOPS)                               \
305         : "memory");                                            \
306         uaccess_disable_privileged();                           \
307 } while (0)
308
309 #define __user_swp_asm(data, addr, res, temp, temp2) \
310         __user_swpX_asm(data, addr, res, temp, temp2, "")
311 #define __user_swpb_asm(data, addr, res, temp, temp2) \
312         __user_swpX_asm(data, addr, res, temp, temp2, "b")
313
314 /*
315  * Bit 22 of the instruction encoding distinguishes between
316  * the SWP and SWPB variants (bit set means SWPB).
317  */
318 #define TYPE_SWPB (1 << 22)
319
320 static int emulate_swpX(unsigned int address, unsigned int *data,
321                         unsigned int type)
322 {
323         unsigned int res = 0;
324
325         if ((type != TYPE_SWPB) && (address & 0x3)) {
326                 /* SWP to unaligned address not permitted */
327                 pr_debug("SWP instruction on unaligned pointer!\n");
328                 return -EFAULT;
329         }
330
331         while (1) {
332                 unsigned long temp, temp2;
333
334                 if (type == TYPE_SWPB)
335                         __user_swpb_asm(*data, address, res, temp, temp2);
336                 else
337                         __user_swp_asm(*data, address, res, temp, temp2);
338
339                 if (likely(res != -EAGAIN) || signal_pending(current))
340                         break;
341
342                 cond_resched();
343         }
344
345         return res;
346 }
347
348 #define ARM_OPCODE_CONDTEST_FAIL   0
349 #define ARM_OPCODE_CONDTEST_PASS   1
350 #define ARM_OPCODE_CONDTEST_UNCOND 2
351
352 #define ARM_OPCODE_CONDITION_UNCOND     0xf
353
354 static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
355 {
356         u32 cc_bits  = opcode >> 28;
357
358         if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
359                 if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
360                         return ARM_OPCODE_CONDTEST_PASS;
361                 else
362                         return ARM_OPCODE_CONDTEST_FAIL;
363         }
364         return ARM_OPCODE_CONDTEST_UNCOND;
365 }
366
367 /*
368  * swp_handler logs the id of calling process, dissects the instruction, sanity
369  * checks the memory location, calls emulate_swpX for the actual operation and
370  * deals with fixup/error handling before returning
371  */
372 static int swp_handler(struct pt_regs *regs, u32 instr)
373 {
374         u32 destreg, data, type, address = 0;
375         const void __user *user_ptr;
376         int rn, rt2, res = 0;
377
378         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
379
380         type = instr & TYPE_SWPB;
381
382         switch (aarch32_check_condition(instr, regs->pstate)) {
383         case ARM_OPCODE_CONDTEST_PASS:
384                 break;
385         case ARM_OPCODE_CONDTEST_FAIL:
386                 /* Condition failed - return to next instruction */
387                 goto ret;
388         case ARM_OPCODE_CONDTEST_UNCOND:
389                 /* If unconditional encoding - not a SWP, undef */
390                 return -EFAULT;
391         default:
392                 return -EINVAL;
393         }
394
395         rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
396         rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
397
398         address = (u32)regs->user_regs.regs[rn];
399         data    = (u32)regs->user_regs.regs[rt2];
400         destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
401
402         pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
403                 rn, address, destreg,
404                 aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
405
406         /* Check access in reasonable access range for both SWP and SWPB */
407         user_ptr = (const void __user *)(unsigned long)(address & ~3);
408         if (!access_ok(user_ptr, 4)) {
409                 pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
410                         address);
411                 goto fault;
412         }
413
414         res = emulate_swpX(address, &data, type);
415         if (res == -EFAULT)
416                 goto fault;
417         else if (res == 0)
418                 regs->user_regs.regs[destreg] = data;
419
420 ret:
421         if (type == TYPE_SWPB)
422                 trace_instruction_emulation("swpb", regs->pc);
423         else
424                 trace_instruction_emulation("swp", regs->pc);
425
426         pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
427                         current->comm, (unsigned long)current->pid, regs->pc);
428
429         arm64_skip_faulting_instruction(regs, 4);
430         return 0;
431
432 fault:
433         pr_debug("SWP{B} emulation: access caused memory abort!\n");
434         arm64_notify_segfault(address);
435
436         return 0;
437 }
438
439 /*
440  * Only emulate SWP/SWPB executed in ARM state/User mode.
441  * The kernel must be SWP free and SWP{B} does not exist in Thumb.
442  */
443 static struct undef_hook swp_hooks[] = {
444         {
445                 .instr_mask     = 0x0fb00ff0,
446                 .instr_val      = 0x01000090,
447                 .pstate_mask    = PSR_AA32_MODE_MASK,
448                 .pstate_val     = PSR_AA32_MODE_USR,
449                 .fn             = swp_handler
450         },
451         { }
452 };
453
454 static struct insn_emulation_ops swp_ops = {
455         .name = "swp",
456         .status = INSN_OBSOLETE,
457         .hooks = swp_hooks,
458         .set_hw_mode = NULL,
459 };
460
461 static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
462 {
463         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
464
465         switch (aarch32_check_condition(instr, regs->pstate)) {
466         case ARM_OPCODE_CONDTEST_PASS:
467                 break;
468         case ARM_OPCODE_CONDTEST_FAIL:
469                 /* Condition failed - return to next instruction */
470                 goto ret;
471         case ARM_OPCODE_CONDTEST_UNCOND:
472                 /* If unconditional encoding - not a barrier instruction */
473                 return -EFAULT;
474         default:
475                 return -EINVAL;
476         }
477
478         switch (aarch32_insn_mcr_extract_crm(instr)) {
479         case 10:
480                 /*
481                  * dmb - mcr p15, 0, Rt, c7, c10, 5
482                  * dsb - mcr p15, 0, Rt, c7, c10, 4
483                  */
484                 if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
485                         dmb(sy);
486                         trace_instruction_emulation(
487                                 "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
488                 } else {
489                         dsb(sy);
490                         trace_instruction_emulation(
491                                 "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
492                 }
493                 break;
494         case 5:
495                 /*
496                  * isb - mcr p15, 0, Rt, c7, c5, 4
497                  *
498                  * Taking an exception or returning from one acts as an
499                  * instruction barrier. So no explicit barrier needed here.
500                  */
501                 trace_instruction_emulation(
502                         "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
503                 break;
504         }
505
506 ret:
507         pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
508                         current->comm, (unsigned long)current->pid, regs->pc);
509
510         arm64_skip_faulting_instruction(regs, 4);
511         return 0;
512 }
513
514 static int cp15_barrier_set_hw_mode(bool enable)
515 {
516         if (enable)
517                 sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_CP15BEN);
518         else
519                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_CP15BEN, 0);
520         return 0;
521 }
522
523 static struct undef_hook cp15_barrier_hooks[] = {
524         {
525                 .instr_mask     = 0x0fff0fdf,
526                 .instr_val      = 0x0e070f9a,
527                 .pstate_mask    = PSR_AA32_MODE_MASK,
528                 .pstate_val     = PSR_AA32_MODE_USR,
529                 .fn             = cp15barrier_handler,
530         },
531         {
532                 .instr_mask     = 0x0fff0fff,
533                 .instr_val      = 0x0e070f95,
534                 .pstate_mask    = PSR_AA32_MODE_MASK,
535                 .pstate_val     = PSR_AA32_MODE_USR,
536                 .fn             = cp15barrier_handler,
537         },
538         { }
539 };
540
541 static struct insn_emulation_ops cp15_barrier_ops = {
542         .name = "cp15_barrier",
543         .status = INSN_DEPRECATED,
544         .hooks = cp15_barrier_hooks,
545         .set_hw_mode = cp15_barrier_set_hw_mode,
546 };
547
548 static int setend_set_hw_mode(bool enable)
549 {
550         if (!cpu_supports_mixed_endian_el0())
551                 return -EINVAL;
552
553         if (enable)
554                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_SED, 0);
555         else
556                 sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_SED);
557         return 0;
558 }
559
560 static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
561 {
562         char *insn;
563
564         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
565
566         if (big_endian) {
567                 insn = "setend be";
568                 regs->pstate |= PSR_AA32_E_BIT;
569         } else {
570                 insn = "setend le";
571                 regs->pstate &= ~PSR_AA32_E_BIT;
572         }
573
574         trace_instruction_emulation(insn, regs->pc);
575         pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
576                         current->comm, (unsigned long)current->pid, regs->pc);
577
578         return 0;
579 }
580
581 static int a32_setend_handler(struct pt_regs *regs, u32 instr)
582 {
583         int rc = compat_setend_handler(regs, (instr >> 9) & 1);
584         arm64_skip_faulting_instruction(regs, 4);
585         return rc;
586 }
587
588 static int t16_setend_handler(struct pt_regs *regs, u32 instr)
589 {
590         int rc = compat_setend_handler(regs, (instr >> 3) & 1);
591         arm64_skip_faulting_instruction(regs, 2);
592         return rc;
593 }
594
595 static struct undef_hook setend_hooks[] = {
596         {
597                 .instr_mask     = 0xfffffdff,
598                 .instr_val      = 0xf1010000,
599                 .pstate_mask    = PSR_AA32_MODE_MASK,
600                 .pstate_val     = PSR_AA32_MODE_USR,
601                 .fn             = a32_setend_handler,
602         },
603         {
604                 /* Thumb mode */
605                 .instr_mask     = 0xfffffff7,
606                 .instr_val      = 0x0000b650,
607                 .pstate_mask    = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
608                 .pstate_val     = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
609                 .fn             = t16_setend_handler,
610         },
611         {}
612 };
613
614 static struct insn_emulation_ops setend_ops = {
615         .name = "setend",
616         .status = INSN_DEPRECATED,
617         .hooks = setend_hooks,
618         .set_hw_mode = setend_set_hw_mode,
619 };
620
621 /*
622  * Invoked as core_initcall, which guarantees that the instruction
623  * emulation is ready for userspace.
624  */
625 static int __init armv8_deprecated_init(void)
626 {
627         if (IS_ENABLED(CONFIG_SWP_EMULATION))
628                 register_insn_emulation(&swp_ops);
629
630         if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
631                 register_insn_emulation(&cp15_barrier_ops);
632
633         if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
634                 if (system_supports_mixed_endian_el0())
635                         register_insn_emulation(&setend_ops);
636                 else
637                         pr_info("setend instruction emulation is not supported on this system\n");
638         }
639
640         cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
641                                   "arm64/isndep:starting",
642                                   run_all_insn_set_hw_mode, NULL);
643         register_insn_emulation_sysctl();
644
645         return 0;
646 }
647
648 core_initcall(armv8_deprecated_init);