1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Western Digital Corporation or its affiliates.
5 #include <linux/kernel.h>
6 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/perf_event.h>
10 #include <linux/irq.h>
11 #include <linux/stringify.h>
13 #include <asm/processor.h>
14 #include <asm/ptrace.h>
16 #include <asm/entry-common.h>
17 #include <asm/hwprobe.h>
18 #include <asm/cpufeature.h>
20 #define INSN_MATCH_LB 0x3
21 #define INSN_MASK_LB 0x707f
22 #define INSN_MATCH_LH 0x1003
23 #define INSN_MASK_LH 0x707f
24 #define INSN_MATCH_LW 0x2003
25 #define INSN_MASK_LW 0x707f
26 #define INSN_MATCH_LD 0x3003
27 #define INSN_MASK_LD 0x707f
28 #define INSN_MATCH_LBU 0x4003
29 #define INSN_MASK_LBU 0x707f
30 #define INSN_MATCH_LHU 0x5003
31 #define INSN_MASK_LHU 0x707f
32 #define INSN_MATCH_LWU 0x6003
33 #define INSN_MASK_LWU 0x707f
34 #define INSN_MATCH_SB 0x23
35 #define INSN_MASK_SB 0x707f
36 #define INSN_MATCH_SH 0x1023
37 #define INSN_MASK_SH 0x707f
38 #define INSN_MATCH_SW 0x2023
39 #define INSN_MASK_SW 0x707f
40 #define INSN_MATCH_SD 0x3023
41 #define INSN_MASK_SD 0x707f
43 #define INSN_MATCH_FLW 0x2007
44 #define INSN_MASK_FLW 0x707f
45 #define INSN_MATCH_FLD 0x3007
46 #define INSN_MASK_FLD 0x707f
47 #define INSN_MATCH_FLQ 0x4007
48 #define INSN_MASK_FLQ 0x707f
49 #define INSN_MATCH_FSW 0x2027
50 #define INSN_MASK_FSW 0x707f
51 #define INSN_MATCH_FSD 0x3027
52 #define INSN_MASK_FSD 0x707f
53 #define INSN_MATCH_FSQ 0x4027
54 #define INSN_MASK_FSQ 0x707f
56 #define INSN_MATCH_C_LD 0x6000
57 #define INSN_MASK_C_LD 0xe003
58 #define INSN_MATCH_C_SD 0xe000
59 #define INSN_MASK_C_SD 0xe003
60 #define INSN_MATCH_C_LW 0x4000
61 #define INSN_MASK_C_LW 0xe003
62 #define INSN_MATCH_C_SW 0xc000
63 #define INSN_MASK_C_SW 0xe003
64 #define INSN_MATCH_C_LDSP 0x6002
65 #define INSN_MASK_C_LDSP 0xe003
66 #define INSN_MATCH_C_SDSP 0xe002
67 #define INSN_MASK_C_SDSP 0xe003
68 #define INSN_MATCH_C_LWSP 0x4002
69 #define INSN_MASK_C_LWSP 0xe003
70 #define INSN_MATCH_C_SWSP 0xc002
71 #define INSN_MASK_C_SWSP 0xe003
73 #define INSN_MATCH_C_FLD 0x2000
74 #define INSN_MASK_C_FLD 0xe003
75 #define INSN_MATCH_C_FLW 0x6000
76 #define INSN_MASK_C_FLW 0xe003
77 #define INSN_MATCH_C_FSD 0xa000
78 #define INSN_MASK_C_FSD 0xe003
79 #define INSN_MATCH_C_FSW 0xe000
80 #define INSN_MASK_C_FSW 0xe003
81 #define INSN_MATCH_C_FLDSP 0x2002
82 #define INSN_MASK_C_FLDSP 0xe003
83 #define INSN_MATCH_C_FSDSP 0xa002
84 #define INSN_MASK_C_FSDSP 0xe003
85 #define INSN_MATCH_C_FLWSP 0x6002
86 #define INSN_MASK_C_FLWSP 0xe003
87 #define INSN_MATCH_C_FSWSP 0xe002
88 #define INSN_MASK_C_FSWSP 0xe003
90 #define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
92 #if defined(CONFIG_64BIT)
93 #define LOG_REGBYTES 3
96 #define LOG_REGBYTES 2
99 #define REGBYTES (1 << LOG_REGBYTES)
100 #define XLEN_MINUS_16 ((XLEN) - 16)
107 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
108 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
109 (RV_X(x, 10, 3) << 3) | \
110 (RV_X(x, 5, 1) << 6))
111 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
112 (RV_X(x, 5, 2) << 6))
113 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
114 (RV_X(x, 12, 1) << 5) | \
115 (RV_X(x, 2, 2) << 6))
116 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
117 (RV_X(x, 12, 1) << 5) | \
118 (RV_X(x, 2, 3) << 6))
119 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
120 (RV_X(x, 7, 2) << 6))
121 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
122 (RV_X(x, 7, 3) << 6))
123 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
124 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
125 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
127 #define SHIFT_RIGHT(x, y) \
128 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
131 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
133 #define REG_OFFSET(insn, pos) \
134 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
136 #define REG_PTR(insn, pos, regs) \
137 (ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
139 #define GET_RM(insn) (((insn) >> 12) & 7)
141 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
142 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
143 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
144 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
145 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
146 #define GET_SP(regs) (*REG_PTR(2, 0, regs))
147 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
148 #define IMM_I(insn) ((s32)(insn) >> 20)
149 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
150 (s32)(((insn) >> 7) & 0x1f))
151 #define MASK_FUNCT3 0x7000
153 #define GET_PRECISION(insn) (((insn) >> 25) & 3)
154 #define GET_RM(insn) (((insn) >> 12) & 7)
155 #define PRECISION_S 0
156 #define PRECISION_D 1
160 #define FP_GET_RD(insn) (insn >> 7 & 0x1F)
162 extern void put_f32_reg(unsigned long fp_reg, unsigned long value);
164 static int set_f32_rd(unsigned long insn, struct pt_regs *regs,
167 unsigned long fp_reg = FP_GET_RD(insn);
169 put_f32_reg(fp_reg, val);
170 regs->status |= SR_FS_DIRTY;
175 extern void put_f64_reg(unsigned long fp_reg, unsigned long value);
177 static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val)
179 unsigned long fp_reg = FP_GET_RD(insn);
182 #if __riscv_xlen == 32
183 value = (unsigned long) &val;
187 put_f64_reg(fp_reg, value);
188 regs->status |= SR_FS_DIRTY;
193 #if __riscv_xlen == 32
194 extern void get_f64_reg(unsigned long fp_reg, u64 *value);
196 static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset,
197 struct pt_regs *regs)
199 unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
202 get_f64_reg(fp_reg, &val);
203 regs->status |= SR_FS_DIRTY;
209 extern unsigned long get_f64_reg(unsigned long fp_reg);
211 static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
212 struct pt_regs *regs)
214 unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
217 val = get_f64_reg(fp_reg);
218 regs->status |= SR_FS_DIRTY;
225 extern unsigned long get_f32_reg(unsigned long fp_reg);
227 static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
228 struct pt_regs *regs)
230 unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
233 val = get_f32_reg(fp_reg);
234 regs->status |= SR_FS_DIRTY;
239 #else /* CONFIG_FPU */
240 static void set_f32_rd(unsigned long insn, struct pt_regs *regs,
241 unsigned long val) {}
243 static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {}
245 static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
246 struct pt_regs *regs)
251 static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
252 struct pt_regs *regs)
259 #define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs))
260 #define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs))
261 #define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs))
263 #define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs))
264 #define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
265 #define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
267 #ifdef CONFIG_RISCV_M_MODE
268 static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
272 asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
278 static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
280 asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
285 static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
287 register ulong __mepc asm ("a2") = mepc;
288 ulong val, rvc_mask = 3, tmp;
290 asm ("and %[tmp], %[addr], 2\n"
292 #if defined(CONFIG_64BIT)
293 __stringify(LWU) " %[insn], (%[addr])\n"
295 __stringify(LW) " %[insn], (%[addr])\n"
297 "and %[tmp], %[insn], %[rvc_mask]\n"
298 "beq %[tmp], %[rvc_mask], 2f\n"
299 "sll %[insn], %[insn], %[xlen_minus_16]\n"
300 "srl %[insn], %[insn], %[xlen_minus_16]\n"
303 "lhu %[insn], (%[addr])\n"
304 "and %[tmp], %[insn], %[rvc_mask]\n"
305 "bne %[tmp], %[rvc_mask], 2f\n"
306 "lhu %[tmp], 2(%[addr])\n"
307 "sll %[tmp], %[tmp], 16\n"
308 "add %[insn], %[insn], %[tmp]\n"
310 : [insn] "=&r" (val), [tmp] "=&r" (tmp)
311 : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
312 [xlen_minus_16] "i" (XLEN_MINUS_16));
319 static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
321 if (user_mode(regs)) {
322 return __get_user(*r_val, (u8 __user *)addr);
329 static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
331 if (user_mode(regs)) {
332 return __put_user(val, (u8 __user *)addr);
339 #define __read_insn(regs, insn, insn_addr) \
343 if (user_mode(regs)) { \
344 __ret = __get_user(insn, insn_addr); \
346 insn = *(__force u16 *)insn_addr; \
353 static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
359 u16 __user *insn_addr = (u16 __user *)epc;
361 if (__read_insn(regs, insn, insn_addr))
363 /* __get_user() uses regular "lw" which sign extend the loaded
364 * value make sure to clear higher order bits in case we "or" it
365 * below with the upper 16 bits half.
367 insn &= GENMASK(15, 0);
368 if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
373 if (__read_insn(regs, tmp, insn_addr))
375 *r_insn = (tmp << 16) | insn;
379 u32 __user *insn_addr = (u32 __user *)epc;
381 if (__read_insn(regs, insn, insn_addr))
383 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
387 insn &= GENMASK(15, 0);
401 static bool unaligned_ctl __read_mostly;
404 int unaligned_enabled __read_mostly = 1; /* Enabled by default */
406 int handle_misaligned_load(struct pt_regs *regs)
409 unsigned long epc = regs->epc;
411 unsigned long addr = regs->badaddr;
412 int i, fp = 0, shift = 0, len = 0;
414 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
416 *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
418 if (!unaligned_enabled)
421 if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
424 if (get_insn(regs, epc, &insn))
429 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
431 shift = 8 * (sizeof(unsigned long) - len);
432 #if defined(CONFIG_64BIT)
433 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
435 shift = 8 * (sizeof(unsigned long) - len);
436 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
439 } else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
442 } else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
445 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
447 shift = 8 * (sizeof(unsigned long) - len);
448 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
450 #if defined(CONFIG_64BIT)
451 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
453 shift = 8 * (sizeof(unsigned long) - len);
454 insn = RVC_RS2S(insn) << SH_RD;
455 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
456 ((insn >> SH_RD) & 0x1f)) {
458 shift = 8 * (sizeof(unsigned long) - len);
460 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
462 shift = 8 * (sizeof(unsigned long) - len);
463 insn = RVC_RS2S(insn) << SH_RD;
464 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
465 ((insn >> SH_RD) & 0x1f)) {
467 shift = 8 * (sizeof(unsigned long) - len);
468 } else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
471 insn = RVC_RS2S(insn) << SH_RD;
472 } else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
475 #if defined(CONFIG_32BIT)
476 } else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
479 insn = RVC_RS2S(insn) << SH_RD;
480 } else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
489 if (!IS_ENABLED(CONFIG_FPU) && fp)
493 for (i = 0; i < len; i++) {
494 if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i]))
499 SET_RD(insn, regs, val.data_ulong << shift >> shift);
501 set_f64_rd(insn, regs, val.data_u64);
503 set_f32_rd(insn, regs, val.data_ulong);
505 regs->epc = epc + INSN_LEN(insn);
510 int handle_misaligned_store(struct pt_regs *regs)
513 unsigned long epc = regs->epc;
515 unsigned long addr = regs->badaddr;
516 int i, len = 0, fp = 0;
518 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
520 if (!unaligned_enabled)
523 if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
526 if (get_insn(regs, epc, &insn))
531 val.data_ulong = GET_RS2(insn, regs);
533 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
535 #if defined(CONFIG_64BIT)
536 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
539 } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
542 val.data_u64 = GET_F64_RS2(insn, regs);
543 } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
546 val.data_ulong = GET_F32_RS2(insn, regs);
547 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
549 #if defined(CONFIG_64BIT)
550 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
552 val.data_ulong = GET_RS2S(insn, regs);
553 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
555 val.data_ulong = GET_RS2C(insn, regs);
557 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
559 val.data_ulong = GET_RS2S(insn, regs);
560 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
562 val.data_ulong = GET_RS2C(insn, regs);
563 } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
566 val.data_u64 = GET_F64_RS2S(insn, regs);
567 } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
570 val.data_u64 = GET_F64_RS2C(insn, regs);
571 #if !defined(CONFIG_64BIT)
572 } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
575 val.data_ulong = GET_F32_RS2S(insn, regs);
576 } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
579 val.data_ulong = GET_F32_RS2C(insn, regs);
586 if (!IS_ENABLED(CONFIG_FPU) && fp)
589 for (i = 0; i < len; i++) {
590 if (store_u8(regs, (void *)(addr + i), val.data_bytes[i]))
594 regs->epc = epc + INSN_LEN(insn);
599 bool check_unaligned_access_emulated(int cpu)
601 long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
602 unsigned long tmp_var, tmp_val;
603 bool misaligned_emu_detected;
605 *mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
607 __asm__ __volatile__ (
608 " "REG_L" %[tmp], 1(%[ptr])\n"
609 : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
611 misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
613 * If unaligned_ctl is already set, this means that we detected that all
614 * CPUS uses emulated misaligned access at boot time. If that changed
615 * when hotplugging the new cpu, this is something we don't handle.
617 if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
618 pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
623 return misaligned_emu_detected;
626 void unaligned_emulation_finish(void)
631 * We can only support PR_UNALIGN controls if all CPUs have misaligned
632 * accesses emulated since tasks requesting such control can run on any
635 for_each_online_cpu(cpu) {
636 if (per_cpu(misaligned_access_speed, cpu) !=
637 RISCV_HWPROBE_MISALIGNED_EMULATED) {
641 unaligned_ctl = true;
644 bool unaligned_ctl_available(void)
646 return unaligned_ctl;