1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Unaligned memory access handler
5 * Copyright (C) 2001 Randolph Chung <tausq@debian.org>
6 * Copyright (C) 2022 Helge Deller <deller@gmx.de>
7 * Significantly tweaked by LaMont Jones <lamont@debian.org>
10 #include <linux/sched/signal.h>
11 #include <linux/signal.h>
12 #include <linux/ratelimit.h>
13 #include <linux/uaccess.h>
14 #include <linux/sysctl.h>
15 #include <asm/unaligned.h>
16 #include <asm/hardirq.h>
17 #include <asm/traps.h>
19 /* #define DEBUG_UNALIGNED 1 */
21 #ifdef DEBUG_UNALIGNED
22 #define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
24 #define DPRINTF(fmt, args...)
29 /* 1111 1100 0000 0000 0001 0011 1100 0000 */
30 #define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
31 #define OPCODE2(a,b) ((a)<<26|(b)<<1)
32 #define OPCODE3(a,b) ((a)<<26|(b)<<2)
33 #define OPCODE4(a) ((a)<<26)
34 #define OPCODE1_MASK OPCODE1(0x3f,1,0xf)
35 #define OPCODE2_MASK OPCODE2(0x3f,1)
36 #define OPCODE3_MASK OPCODE3(0x3f,1)
37 #define OPCODE4_MASK OPCODE4(0x3f)
39 /* skip LDB - never unaligned (index) */
40 #define OPCODE_LDH_I OPCODE1(0x03,0,0x1)
41 #define OPCODE_LDW_I OPCODE1(0x03,0,0x2)
42 #define OPCODE_LDD_I OPCODE1(0x03,0,0x3)
43 #define OPCODE_LDDA_I OPCODE1(0x03,0,0x4)
44 #define OPCODE_LDCD_I OPCODE1(0x03,0,0x5)
45 #define OPCODE_LDWA_I OPCODE1(0x03,0,0x6)
46 #define OPCODE_LDCW_I OPCODE1(0x03,0,0x7)
47 /* skip LDB - never unaligned (short) */
48 #define OPCODE_LDH_S OPCODE1(0x03,1,0x1)
49 #define OPCODE_LDW_S OPCODE1(0x03,1,0x2)
50 #define OPCODE_LDD_S OPCODE1(0x03,1,0x3)
51 #define OPCODE_LDDA_S OPCODE1(0x03,1,0x4)
52 #define OPCODE_LDCD_S OPCODE1(0x03,1,0x5)
53 #define OPCODE_LDWA_S OPCODE1(0x03,1,0x6)
54 #define OPCODE_LDCW_S OPCODE1(0x03,1,0x7)
55 /* skip STB - never unaligned */
56 #define OPCODE_STH OPCODE1(0x03,1,0x9)
57 #define OPCODE_STW OPCODE1(0x03,1,0xa)
58 #define OPCODE_STD OPCODE1(0x03,1,0xb)
59 /* skip STBY - never unaligned */
60 /* skip STDBY - never unaligned */
61 #define OPCODE_STWA OPCODE1(0x03,1,0xe)
62 #define OPCODE_STDA OPCODE1(0x03,1,0xf)
64 #define OPCODE_FLDWX OPCODE1(0x09,0,0x0)
65 #define OPCODE_FLDWXR OPCODE1(0x09,0,0x1)
66 #define OPCODE_FSTWX OPCODE1(0x09,0,0x8)
67 #define OPCODE_FSTWXR OPCODE1(0x09,0,0x9)
68 #define OPCODE_FLDWS OPCODE1(0x09,1,0x0)
69 #define OPCODE_FLDWSR OPCODE1(0x09,1,0x1)
70 #define OPCODE_FSTWS OPCODE1(0x09,1,0x8)
71 #define OPCODE_FSTWSR OPCODE1(0x09,1,0x9)
72 #define OPCODE_FLDDX OPCODE1(0x0b,0,0x0)
73 #define OPCODE_FSTDX OPCODE1(0x0b,0,0x8)
74 #define OPCODE_FLDDS OPCODE1(0x0b,1,0x0)
75 #define OPCODE_FSTDS OPCODE1(0x0b,1,0x8)
77 #define OPCODE_LDD_L OPCODE2(0x14,0)
78 #define OPCODE_FLDD_L OPCODE2(0x14,1)
79 #define OPCODE_STD_L OPCODE2(0x1c,0)
80 #define OPCODE_FSTD_L OPCODE2(0x1c,1)
82 #define OPCODE_LDW_M OPCODE3(0x17,1)
83 #define OPCODE_FLDW_L OPCODE3(0x17,0)
84 #define OPCODE_FSTW_L OPCODE3(0x1f,0)
85 #define OPCODE_STW_M OPCODE3(0x1f,1)
87 #define OPCODE_LDH_L OPCODE4(0x11)
88 #define OPCODE_LDW_L OPCODE4(0x12)
89 #define OPCODE_LDWM OPCODE4(0x13)
90 #define OPCODE_STH_L OPCODE4(0x19)
91 #define OPCODE_STW_L OPCODE4(0x1A)
92 #define OPCODE_STWM OPCODE4(0x1B)
94 #define MAJOR_OP(i) (((i)>>26)&0x3f)
95 #define R1(i) (((i)>>21)&0x1f)
96 #define R2(i) (((i)>>16)&0x1f)
97 #define R3(i) ((i)&0x1f)
98 #define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
99 #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
100 #define IM5_2(i) IM((i)>>16,5)
101 #define IM5_3(i) IM((i),5)
102 #define IM14(i) IM((i),14)
104 #define ERR_NOTHANDLED -1
106 int unaligned_enabled __read_mostly = 1;
108 static int emulate_ldh(struct pt_regs *regs, int toreg)
110 unsigned long saddr = regs->ior;
111 unsigned long val = 0, temp1;
112 ASM_EXCEPTIONTABLE_VAR(ret);
114 DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n",
115 regs->isr, regs->ior, toreg);
117 __asm__ __volatile__ (
119 "1: ldbs 0(%%sr1,%3), %2\n"
120 "2: ldbs 1(%%sr1,%3), %0\n"
121 " depw %2, 23, 24, %0\n"
123 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
124 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
125 : "+r" (val), "+r" (ret), "=&r" (temp1)
126 : "r" (saddr), "r" (regs->isr) );
128 DPRINTF("val = " RFMT "\n", val);
131 regs->gr[toreg] = val;
136 static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
138 unsigned long saddr = regs->ior;
139 unsigned long val = 0, temp1, temp2;
140 ASM_EXCEPTIONTABLE_VAR(ret);
142 DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n",
143 regs->isr, regs->ior, toreg);
145 __asm__ __volatile__ (
146 " zdep %4,28,2,%2\n" /* r19=(ofs&3)*8 */
148 " depw %%r0,31,2,%4\n"
149 "1: ldw 0(%%sr1,%4),%0\n"
150 "2: ldw 4(%%sr1,%4),%3\n"
155 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
156 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
157 : "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2)
158 : "r" (saddr), "r" (regs->isr) );
160 DPRINTF("val = " RFMT "\n", val);
163 ((__u32*)(regs->fr))[toreg] = val;
165 regs->gr[toreg] = val;
169 static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
171 unsigned long saddr = regs->ior;
173 ASM_EXCEPTIONTABLE_VAR(ret);
175 DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n",
176 regs->isr, regs->ior, toreg);
178 if (!IS_ENABLED(CONFIG_64BIT) && !flop)
179 return ERR_NOTHANDLED;
182 __asm__ __volatile__ (
183 " depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
185 " depd %%r0,63,3,%3\n"
186 "1: ldd 0(%%sr1,%3),%0\n"
187 "2: ldd 8(%%sr1,%3),%%r20\n"
188 " subi 64,%%r19,%%r19\n"
190 " shrpd %0,%%r20,%%sar,%0\n"
192 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
193 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
194 : "=r" (val), "+r" (ret)
195 : "0" (val), "r" (saddr), "r" (regs->isr)
199 unsigned long shift, temp1;
200 __asm__ __volatile__ (
201 " zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */
203 " dep %%r0,31,2,%2\n"
204 "1: ldw 0(%%sr1,%2),%0\n"
205 "2: ldw 4(%%sr1,%2),%R0\n"
206 "3: ldw 8(%%sr1,%2),%4\n"
212 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b)
213 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b)
214 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b)
215 : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
220 DPRINTF("val = 0x%llx\n", val);
223 regs->fr[toreg] = val;
225 regs->gr[toreg] = val;
230 static int emulate_sth(struct pt_regs *regs, int frreg)
232 unsigned long val = regs->gr[frreg], temp1;
233 ASM_EXCEPTIONTABLE_VAR(ret);
238 DPRINTF("store r%d (" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg,
239 val, regs->isr, regs->ior);
241 __asm__ __volatile__ (
243 " extrw,u %2, 23, 8, %1\n"
244 "1: stb %1, 0(%%sr1, %3)\n"
245 "2: stb %2, 1(%%sr1, %3)\n"
247 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
248 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
249 : "+r" (ret), "=&r" (temp1)
250 : "r" (val), "r" (regs->ior), "r" (regs->isr) );
255 static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
258 ASM_EXCEPTIONTABLE_VAR(ret);
261 val = ((__u32*)(regs->fr))[frreg];
263 val = regs->gr[frreg];
267 DPRINTF("store r%d (" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg,
268 val, regs->isr, regs->ior);
271 __asm__ __volatile__ (
273 " zdep %2, 28, 2, %%r19\n"
274 " dep %%r0, 31, 2, %2\n"
276 " depwi,z -2, %%sar, 32, %%r19\n"
277 "1: ldw 0(%%sr1,%2),%%r20\n"
278 "2: ldw 4(%%sr1,%2),%%r21\n"
279 " vshd %%r0, %1, %%r22\n"
280 " vshd %1, %%r0, %%r1\n"
281 " and %%r20, %%r19, %%r20\n"
282 " andcm %%r21, %%r19, %%r21\n"
283 " or %%r22, %%r20, %%r20\n"
284 " or %%r1, %%r21, %%r21\n"
285 " stw %%r20,0(%%sr1,%2)\n"
286 " stw %%r21,4(%%sr1,%2)\n"
288 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
289 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
291 : "r" (val), "r" (regs->ior), "r" (regs->isr)
292 : "r19", "r20", "r21", "r22", "r1" );
296 static int emulate_std(struct pt_regs *regs, int frreg, int flop)
299 ASM_EXCEPTIONTABLE_VAR(ret);
302 val = regs->fr[frreg];
304 val = regs->gr[frreg];
308 DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg,
309 val, regs->isr, regs->ior);
311 if (!IS_ENABLED(CONFIG_64BIT) && !flop)
312 return ERR_NOTHANDLED;
315 __asm__ __volatile__ (
317 " depd,z %2, 60, 3, %%r19\n"
318 " depd %%r0, 63, 3, %2\n"
320 " depdi,z -2, %%sar, 64, %%r19\n"
321 "1: ldd 0(%%sr1,%2),%%r20\n"
322 "2: ldd 8(%%sr1,%2),%%r21\n"
323 " shrpd %%r0, %1, %%sar, %%r22\n"
324 " shrpd %1, %%r0, %%sar, %%r1\n"
325 " and %%r20, %%r19, %%r20\n"
326 " andcm %%r21, %%r19, %%r21\n"
327 " or %%r22, %%r20, %%r20\n"
328 " or %%r1, %%r21, %%r21\n"
329 "3: std %%r20,0(%%sr1,%2)\n"
330 "4: std %%r21,8(%%sr1,%2)\n"
332 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b)
333 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b)
334 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b)
335 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b)
337 : "r" (val), "r" (regs->ior), "r" (regs->isr)
338 : "r19", "r20", "r21", "r22", "r1" );
341 unsigned long valh = (val >> 32), vall = (val & 0xffffffffl);
342 __asm__ __volatile__ (
344 " zdep %2, 29, 2, %%r19\n"
345 " dep %%r0, 31, 2, %3\n"
347 " zvdepi -2, 32, %%r19\n"
348 "1: ldw 0(%%sr1,%3),%%r20\n"
349 "2: ldw 8(%%sr1,%3),%%r21\n"
350 " vshd %1, %2, %%r1\n"
351 " vshd %%r0, %1, %1\n"
352 " vshd %2, %%r0, %2\n"
353 " and %%r20, %%r19, %%r20\n"
354 " andcm %%r21, %%r19, %%r21\n"
355 " or %1, %%r20, %1\n"
356 " or %2, %%r21, %2\n"
357 "3: stw %1,0(%%sr1,%3)\n"
358 "4: stw %%r1,4(%%sr1,%3)\n"
359 "5: stw %2,8(%%sr1,%3)\n"
361 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b)
362 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b)
363 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b)
364 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b)
365 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b)
367 : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
368 : "r19", "r20", "r21", "r1" );
375 void handle_unaligned(struct pt_regs *regs)
377 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
378 unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
380 int ret = ERR_NOTHANDLED;
382 __inc_irq_stat(irq_unaligned_count);
384 /* log a message with pacing */
385 if (user_mode(regs)) {
386 if (current->thread.flags & PARISC_UAC_SIGBUS) {
390 if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
391 __ratelimit(&ratelimit)) {
392 printk(KERN_WARNING "%s(%d): unaligned access to " RFMT
393 " at ip " RFMT " (iir " RFMT ")\n",
394 current->comm, task_pid_nr(current), regs->ior,
395 regs->iaoq[0], regs->iir);
396 #ifdef DEBUG_UNALIGNED
401 if (!unaligned_enabled)
405 /* handle modification - OK, it's ugly, see the instruction manual */
406 switch (MAJOR_OP(regs->iir))
414 if (regs->iir&0x1000) /* short loads */
416 newbase += IM5_3(regs->iir);
418 newbase += IM5_2(regs->iir);
419 else if (regs->iir&0x2000) /* scaled indexed */
422 switch (regs->iir & OPCODE1_MASK)
432 newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift;
433 } else /* simple indexed */
434 newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0);
440 newbase += IM14(regs->iir);
447 newbase += IM14(regs->iir&~0xe);
453 newbase += IM14(regs->iir&6);
460 newbase += IM14(regs->iir&~4);
465 /* TODO: make this cleaner... */
466 switch (regs->iir & OPCODE1_MASK)
470 ret = emulate_ldh(regs, R3(regs->iir));
477 ret = emulate_ldw(regs, R3(regs->iir), 0);
481 ret = emulate_sth(regs, R2(regs->iir));
486 ret = emulate_stw(regs, R2(regs->iir), 0);
494 ret = emulate_ldd(regs, R3(regs->iir), 0);
499 ret = emulate_std(regs, R2(regs->iir), 0);
507 ret = emulate_ldw(regs, FR3(regs->iir), 1);
512 ret = emulate_ldd(regs, R3(regs->iir), 1);
519 ret = emulate_stw(regs, FR3(regs->iir), 1);
524 ret = emulate_std(regs, R3(regs->iir), 1);
531 ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
534 switch (regs->iir & OPCODE2_MASK)
537 ret = emulate_ldd(regs,R2(regs->iir),1);
540 ret = emulate_std(regs, R2(regs->iir),1);
544 ret = emulate_ldd(regs, R2(regs->iir),0);
547 ret = emulate_std(regs, R2(regs->iir),0);
551 switch (regs->iir & OPCODE3_MASK)
554 ret = emulate_ldw(regs, R2(regs->iir), 1);
557 ret = emulate_ldw(regs, R2(regs->iir), 0);
561 ret = emulate_stw(regs, R2(regs->iir),1);
564 ret = emulate_stw(regs, R2(regs->iir),0);
567 switch (regs->iir & OPCODE4_MASK)
570 ret = emulate_ldh(regs, R2(regs->iir));
574 ret = emulate_ldw(regs, R2(regs->iir),0);
577 ret = emulate_sth(regs, R2(regs->iir));
581 ret = emulate_stw(regs, R2(regs->iir),0);
585 if (ret == 0 && modify && R1(regs->iir))
586 regs->gr[R1(regs->iir)] = newbase;
589 if (ret == ERR_NOTHANDLED)
590 printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir);
592 DPRINTF("ret = %d\n", ret);
597 * The unaligned handler failed.
598 * If we were called by __get_user() or __put_user() jump
599 * to it's exception fixup handler instead of crashing.
601 if (!user_mode(regs) && fixup_exception(regs))
604 printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
605 die_if_kernel("Unaligned data reference", regs, 28);
609 force_sig_fault(SIGSEGV, SEGV_MAPERR,
610 (void __user *)regs->ior);
615 /* couldn't handle it ... */
616 force_sig_fault(SIGBUS, BUS_ADRALN,
617 (void __user *)regs->ior);
623 /* else we handled it, let life go on. */
628 * NB: check_unaligned() is only used for PCXS processors right
629 * now, so we only check for PA1.1 encodings at this point.
633 check_unaligned(struct pt_regs *regs)
635 unsigned long align_mask;
637 /* Get alignment mask */
640 switch (regs->iir & OPCODE1_MASK) {
658 switch (regs->iir & OPCODE4_MASK) {
673 return (int)(regs->ior & align_mask);