2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
25 * problems with user programs have been fixed. For programmers this is the
28 * Fixing address errors is a per process option. The option is inherited
29 * across fork(2) and execve(2) calls. If you really want to use the
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
33 * #include <sys/sysmips.h>
36 * sysmips(MIPS_FIXADE, x);
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
41 * Below a little program to play around with this feature.
44 * #include <sys/sysmips.h>
47 * unsigned char bar[8];
50 * main(int argc, char *argv[])
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
59 * printf("*p = %08lx\n", *p);
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
68 * Coprocessor loads are not supported; I think this case is unimportant
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
76 #include <linux/context_tracking.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
88 #include <asm/debug.h>
90 #include <asm/fpu_emulator.h>
92 #include <asm/unaligned-emul.h>
93 #include <asm/mmu_context.h>
94 #include <asm/traps.h>
95 #include <linux/uaccess.h>
97 #include "access-helper.h"
100 UNALIGNED_ACTION_QUIET,
101 UNALIGNED_ACTION_SIGNAL,
102 UNALIGNED_ACTION_SHOW,
104 #ifdef CONFIG_DEBUG_FS
105 static u32 unaligned_instructions;
106 static u32 unaligned_action;
108 #define unaligned_action UNALIGNED_ACTION_QUIET
110 extern void show_registers(struct pt_regs *regs);
112 static void emulate_load_store_insn(struct pt_regs *regs,
113 void __user *addr, unsigned int *pc)
115 unsigned long origpc, orig31, value;
116 union mips_instruction insn;
118 bool user = user_mode(regs);
120 origpc = (unsigned long)pc;
121 orig31 = regs->regs[31];
123 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
126 * This load never faults.
128 __get_inst32(&insn.word, pc, user);
130 switch (insn.i_format.opcode) {
132 * These are instructions that a compiler doesn't generate. We
133 * can assume therefore that the code is MIPS-aware and
134 * really buggy. Emulating these instructions would break the
143 * For these instructions the only way to create an address
144 * error is an attempted access to kernel/supervisor address
161 * The remaining opcodes are the ones that are really of
164 #ifdef CONFIG_MACH_INGENIC
166 if (insn.mxu_lx_format.func != mxu_lx_op)
167 goto sigbus; /* other MXU instructions we don't care */
169 switch (insn.mxu_lx_format.op) {
171 if (user && !access_ok(addr, 4))
173 LoadW(addr, value, res);
176 compute_return_epc(regs);
177 regs->regs[insn.mxu_lx_format.rd] = value;
180 if (user && !access_ok(addr, 2))
182 LoadHW(addr, value, res);
185 compute_return_epc(regs);
186 regs->regs[insn.dsp_format.rd] = value;
189 if (user && !access_ok(addr, 2))
191 LoadHWU(addr, value, res);
194 compute_return_epc(regs);
195 regs->regs[insn.dsp_format.rd] = value;
206 if (insn.dsp_format.func == lx_op) {
207 switch (insn.dsp_format.op) {
209 if (user && !access_ok(addr, 4))
211 LoadW(addr, value, res);
214 compute_return_epc(regs);
215 regs->regs[insn.dsp_format.rd] = value;
218 if (user && !access_ok(addr, 2))
220 LoadHW(addr, value, res);
223 compute_return_epc(regs);
224 regs->regs[insn.dsp_format.rd] = value;
233 * we can land here only from kernel accessing user
234 * memory, so we need to "switch" the address limit to
235 * user space, so that address check can work properly.
237 switch (insn.spec3_format.func) {
239 if (!access_ok(addr, 2))
241 LoadHWE(addr, value, res);
244 compute_return_epc(regs);
245 regs->regs[insn.spec3_format.rt] = value;
248 if (!access_ok(addr, 4))
250 LoadWE(addr, value, res);
253 compute_return_epc(regs);
254 regs->regs[insn.spec3_format.rt] = value;
257 if (!access_ok(addr, 2))
259 LoadHWUE(addr, value, res);
262 compute_return_epc(regs);
263 regs->regs[insn.spec3_format.rt] = value;
266 if (!access_ok(addr, 2))
268 compute_return_epc(regs);
269 value = regs->regs[insn.spec3_format.rt];
270 StoreHWE(addr, value, res);
275 if (!access_ok(addr, 4))
277 compute_return_epc(regs);
278 value = regs->regs[insn.spec3_format.rt];
279 StoreWE(addr, value, res);
290 if (user && !access_ok(addr, 2))
293 if (IS_ENABLED(CONFIG_EVA) && user)
294 LoadHWE(addr, value, res);
296 LoadHW(addr, value, res);
300 compute_return_epc(regs);
301 regs->regs[insn.i_format.rt] = value;
305 if (user && !access_ok(addr, 4))
308 if (IS_ENABLED(CONFIG_EVA) && user)
309 LoadWE(addr, value, res);
311 LoadW(addr, value, res);
315 compute_return_epc(regs);
316 regs->regs[insn.i_format.rt] = value;
320 if (user && !access_ok(addr, 2))
323 if (IS_ENABLED(CONFIG_EVA) && user)
324 LoadHWUE(addr, value, res);
326 LoadHWU(addr, value, res);
330 compute_return_epc(regs);
331 regs->regs[insn.i_format.rt] = value;
337 * A 32-bit kernel might be running on a 64-bit processor. But
338 * if we're on a 32-bit processor and an i-cache incoherency
339 * or race makes us see a 64-bit instruction here the sdl/sdr
340 * would blow up, so for now we don't handle unaligned 64-bit
341 * instructions on 32-bit kernels.
343 if (user && !access_ok(addr, 4))
346 LoadWU(addr, value, res);
349 compute_return_epc(regs);
350 regs->regs[insn.i_format.rt] = value;
352 #endif /* CONFIG_64BIT */
354 /* Cannot handle 64-bit instructions in 32-bit kernel */
360 * A 32-bit kernel might be running on a 64-bit processor. But
361 * if we're on a 32-bit processor and an i-cache incoherency
362 * or race makes us see a 64-bit instruction here the sdl/sdr
363 * would blow up, so for now we don't handle unaligned 64-bit
364 * instructions on 32-bit kernels.
366 if (user && !access_ok(addr, 8))
369 LoadDW(addr, value, res);
372 compute_return_epc(regs);
373 regs->regs[insn.i_format.rt] = value;
375 #endif /* CONFIG_64BIT */
377 /* Cannot handle 64-bit instructions in 32-bit kernel */
381 if (user && !access_ok(addr, 2))
384 compute_return_epc(regs);
385 value = regs->regs[insn.i_format.rt];
387 if (IS_ENABLED(CONFIG_EVA) && user)
388 StoreHWE(addr, value, res);
390 StoreHW(addr, value, res);
397 if (user && !access_ok(addr, 4))
400 compute_return_epc(regs);
401 value = regs->regs[insn.i_format.rt];
403 if (IS_ENABLED(CONFIG_EVA) && user)
404 StoreWE(addr, value, res);
406 StoreW(addr, value, res);
415 * A 32-bit kernel might be running on a 64-bit processor. But
416 * if we're on a 32-bit processor and an i-cache incoherency
417 * or race makes us see a 64-bit instruction here the sdl/sdr
418 * would blow up, so for now we don't handle unaligned 64-bit
419 * instructions on 32-bit kernels.
421 if (user && !access_ok(addr, 8))
424 compute_return_epc(regs);
425 value = regs->regs[insn.i_format.rt];
426 StoreDW(addr, value, res);
430 #endif /* CONFIG_64BIT */
432 /* Cannot handle 64-bit instructions in 32-bit kernel */
435 #ifdef CONFIG_MIPS_FP_SUPPORT
442 void __user *fault_addr = NULL;
444 die_if_kernel("Unaligned FP access in kernel code", regs);
445 BUG_ON(!used_math());
447 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
449 own_fpu(1); /* Restore FPU state. */
451 /* Signal if something went wrong. */
452 process_fpemu_return(res, fault_addr, 0);
458 #endif /* CONFIG_MIPS_FP_SUPPORT */
460 #ifdef CONFIG_CPU_HAS_MSA
463 unsigned int wd, preempted;
471 * If we've reached this point then userland should have taken
472 * the MSA disabled exception & initialised vector context at
473 * some point in the past.
475 BUG_ON(!thread_msa_context_live());
477 df = insn.msa_mi10_format.df;
478 wd = insn.msa_mi10_format.wd;
479 fpr = ¤t->thread.fpu.fpr[wd];
481 switch (insn.msa_mi10_format.func) {
483 if (!access_ok(addr, sizeof(*fpr)))
488 * If we have live MSA context keep track of
489 * whether we get preempted in order to avoid
490 * the register context we load being clobbered
491 * by the live context as it's saved during
492 * preemption. If we don't have live context
493 * then it can't be saved to clobber the value
496 preempted = test_thread_flag(TIF_USEDMSA);
498 res = __copy_from_user_inatomic(fpr, addr,
504 * Update the hardware register if it is in use
505 * by the task in this quantum, in order to
506 * avoid having to save & restore the whole
510 if (test_thread_flag(TIF_USEDMSA)) {
511 write_msa_wr(wd, fpr, df);
519 if (!access_ok(addr, sizeof(*fpr)))
523 * Update from the hardware register if it is in use by
524 * the task in this quantum, in order to avoid having to
525 * save & restore the whole vector context.
528 if (test_thread_flag(TIF_USEDMSA))
529 read_msa_wr(wd, fpr, df);
532 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
541 compute_return_epc(regs);
544 #endif /* CONFIG_CPU_HAS_MSA */
546 #ifndef CONFIG_CPU_MIPSR6
548 * COP2 is available to implementor for application specific use.
549 * It's up to applications to register a notifier chain and do
550 * whatever they have to do, including possible sending of signals.
552 * This instruction has been reallocated in Release 6
555 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
559 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
563 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
567 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
572 * Pheeee... We encountered an yet unknown instruction or
573 * cache coherence problem. Die sucker, die ...
578 #ifdef CONFIG_DEBUG_FS
579 unaligned_instructions++;
585 /* roll back jump/branch */
586 regs->cp0_epc = origpc;
587 regs->regs[31] = orig31;
588 /* Did we have an exception handler installed? */
589 if (fixup_exception(regs))
592 die_if_kernel("Unhandled kernel unaligned access", regs);
598 die_if_kernel("Unhandled kernel unaligned access", regs);
605 ("Unhandled kernel unaligned access or invalid instruction", regs);
609 /* Recode table from 16-bit register notation to 32-bit GPR. */
610 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
612 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
613 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
615 static void emulate_load_store_microMIPS(struct pt_regs *regs,
621 unsigned int reg = 0, rvar;
622 unsigned long orig31;
626 unsigned long origpc, contpc;
627 union mips_instruction insn;
628 struct mm_decoded_insn mminsn;
629 bool user = user_mode(regs);
631 origpc = regs->cp0_epc;
632 orig31 = regs->regs[31];
634 mminsn.micro_mips_mode = 1;
637 * This load never faults.
639 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
640 __get_user(halfword, pc16);
642 contpc = regs->cp0_epc + 2;
643 word = ((unsigned int)halfword << 16);
646 if (!mm_insn_16bit(halfword)) {
647 __get_user(halfword, pc16);
649 contpc = regs->cp0_epc + 4;
655 if (get_user(halfword, pc16))
657 mminsn.next_pc_inc = 2;
658 word = ((unsigned int)halfword << 16);
660 if (!mm_insn_16bit(halfword)) {
662 if (get_user(halfword, pc16))
664 mminsn.next_pc_inc = 4;
667 mminsn.next_insn = word;
669 insn = (union mips_instruction)(mminsn.insn);
670 if (mm_isBranchInstr(regs, mminsn, &contpc))
671 insn = (union mips_instruction)(mminsn.next_insn);
673 /* Parse instruction to find what to do */
675 switch (insn.mm_i_format.opcode) {
678 switch (insn.mm_x_format.func) {
680 reg = insn.mm_x_format.rd;
687 switch (insn.mm_m_format.func) {
689 reg = insn.mm_m_format.rd;
693 if (user && !access_ok(addr, 8))
696 LoadW(addr, value, res);
699 regs->regs[reg] = value;
701 LoadW(addr, value, res);
704 regs->regs[reg + 1] = value;
708 reg = insn.mm_m_format.rd;
712 if (user && !access_ok(addr, 8))
715 value = regs->regs[reg];
716 StoreW(addr, value, res);
720 value = regs->regs[reg + 1];
721 StoreW(addr, value, res);
728 reg = insn.mm_m_format.rd;
732 if (user && !access_ok(addr, 16))
735 LoadDW(addr, value, res);
738 regs->regs[reg] = value;
740 LoadDW(addr, value, res);
743 regs->regs[reg + 1] = value;
745 #endif /* CONFIG_64BIT */
751 reg = insn.mm_m_format.rd;
755 if (user && !access_ok(addr, 16))
758 value = regs->regs[reg];
759 StoreDW(addr, value, res);
763 value = regs->regs[reg + 1];
764 StoreDW(addr, value, res);
768 #endif /* CONFIG_64BIT */
773 reg = insn.mm_m_format.rd;
775 if ((rvar > 9) || !reg)
778 if (user && !access_ok(addr, 4 * (rvar + 1)))
781 if (user && !access_ok(addr, 4 * rvar))
786 for (i = 16; rvar; rvar--, i++) {
787 LoadW(addr, value, res);
791 regs->regs[i] = value;
793 if ((reg & 0xf) == 9) {
794 LoadW(addr, value, res);
798 regs->regs[30] = value;
801 LoadW(addr, value, res);
804 regs->regs[31] = value;
809 reg = insn.mm_m_format.rd;
811 if ((rvar > 9) || !reg)
814 if (user && !access_ok(addr, 4 * (rvar + 1)))
817 if (user && !access_ok(addr, 4 * rvar))
822 for (i = 16; rvar; rvar--, i++) {
823 value = regs->regs[i];
824 StoreW(addr, value, res);
829 if ((reg & 0xf) == 9) {
830 value = regs->regs[30];
831 StoreW(addr, value, res);
837 value = regs->regs[31];
838 StoreW(addr, value, res);
846 reg = insn.mm_m_format.rd;
848 if ((rvar > 9) || !reg)
851 if (user && !access_ok(addr, 8 * (rvar + 1)))
854 if (user && !access_ok(addr, 8 * rvar))
860 for (i = 16; rvar; rvar--, i++) {
861 LoadDW(addr, value, res);
865 regs->regs[i] = value;
867 if ((reg & 0xf) == 9) {
868 LoadDW(addr, value, res);
872 regs->regs[30] = value;
875 LoadDW(addr, value, res);
878 regs->regs[31] = value;
881 #endif /* CONFIG_64BIT */
887 reg = insn.mm_m_format.rd;
889 if ((rvar > 9) || !reg)
892 if (user && !access_ok(addr, 8 * (rvar + 1)))
895 if (user && !access_ok(addr, 8 * rvar))
901 for (i = 16; rvar; rvar--, i++) {
902 value = regs->regs[i];
903 StoreDW(addr, value, res);
908 if ((reg & 0xf) == 9) {
909 value = regs->regs[30];
910 StoreDW(addr, value, res);
916 value = regs->regs[31];
917 StoreDW(addr, value, res);
922 #endif /* CONFIG_64BIT */
926 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
932 switch (insn.mm_m_format.func) {
934 reg = insn.mm_m_format.rd;
938 /* LL,SC,LLD,SCD are not serviced */
941 #ifdef CONFIG_MIPS_FP_SUPPORT
943 switch (insn.mm_x_format.func) {
957 void __user *fault_addr = NULL;
960 /* roll back jump/branch */
961 regs->cp0_epc = origpc;
962 regs->regs[31] = orig31;
964 die_if_kernel("Unaligned FP access in kernel code", regs);
965 BUG_ON(!used_math());
966 BUG_ON(!is_fpu_owner());
968 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
970 own_fpu(1); /* restore FPU state */
972 /* If something went wrong, signal */
973 process_fpemu_return(res, fault_addr, 0);
979 #endif /* CONFIG_MIPS_FP_SUPPORT */
982 reg = insn.mm_i_format.rt;
986 reg = insn.mm_i_format.rt;
990 reg = insn.mm_i_format.rt;
994 reg = insn.mm_i_format.rt;
998 reg = insn.mm_i_format.rt;
1002 reg = insn.mm_i_format.rt;
1006 reg = insn.mm_i_format.rt;
1010 switch (insn.mm16_m_format.func) {
1012 reg = insn.mm16_m_format.rlist;
1014 if (user && !access_ok(addr, 4 * rvar))
1017 for (i = 16; rvar; rvar--, i++) {
1018 LoadW(addr, value, res);
1022 regs->regs[i] = value;
1024 LoadW(addr, value, res);
1027 regs->regs[31] = value;
1032 reg = insn.mm16_m_format.rlist;
1034 if (user && !access_ok(addr, 4 * rvar))
1037 for (i = 16; rvar; rvar--, i++) {
1038 value = regs->regs[i];
1039 StoreW(addr, value, res);
1044 value = regs->regs[31];
1045 StoreW(addr, value, res);
1056 reg = reg16to32[insn.mm16_rb_format.rt];
1060 reg = reg16to32[insn.mm16_rb_format.rt];
1064 reg = reg16to32st[insn.mm16_rb_format.rt];
1068 reg = reg16to32st[insn.mm16_rb_format.rt];
1072 reg = insn.mm16_r5_format.rt;
1076 reg = insn.mm16_r5_format.rt;
1080 reg = reg16to32[insn.mm16_r3_format.rt];
1088 if (user && !access_ok(addr, 2))
1091 LoadHW(addr, value, res);
1094 regs->regs[reg] = value;
1098 if (user && !access_ok(addr, 2))
1101 LoadHWU(addr, value, res);
1104 regs->regs[reg] = value;
1108 if (user && !access_ok(addr, 4))
1111 LoadW(addr, value, res);
1114 regs->regs[reg] = value;
1120 * A 32-bit kernel might be running on a 64-bit processor. But
1121 * if we're on a 32-bit processor and an i-cache incoherency
1122 * or race makes us see a 64-bit instruction here the sdl/sdr
1123 * would blow up, so for now we don't handle unaligned 64-bit
1124 * instructions on 32-bit kernels.
1126 if (user && !access_ok(addr, 4))
1129 LoadWU(addr, value, res);
1132 regs->regs[reg] = value;
1134 #endif /* CONFIG_64BIT */
1136 /* Cannot handle 64-bit instructions in 32-bit kernel */
1142 * A 32-bit kernel might be running on a 64-bit processor. But
1143 * if we're on a 32-bit processor and an i-cache incoherency
1144 * or race makes us see a 64-bit instruction here the sdl/sdr
1145 * would blow up, so for now we don't handle unaligned 64-bit
1146 * instructions on 32-bit kernels.
1148 if (user && !access_ok(addr, 8))
1151 LoadDW(addr, value, res);
1154 regs->regs[reg] = value;
1156 #endif /* CONFIG_64BIT */
1158 /* Cannot handle 64-bit instructions in 32-bit kernel */
1162 if (user && !access_ok(addr, 2))
1165 value = regs->regs[reg];
1166 StoreHW(addr, value, res);
1172 if (user && !access_ok(addr, 4))
1175 value = regs->regs[reg];
1176 StoreW(addr, value, res);
1184 * A 32-bit kernel might be running on a 64-bit processor. But
1185 * if we're on a 32-bit processor and an i-cache incoherency
1186 * or race makes us see a 64-bit instruction here the sdl/sdr
1187 * would blow up, so for now we don't handle unaligned 64-bit
1188 * instructions on 32-bit kernels.
1190 if (user && !access_ok(addr, 8))
1193 value = regs->regs[reg];
1194 StoreDW(addr, value, res);
1198 #endif /* CONFIG_64BIT */
1200 /* Cannot handle 64-bit instructions in 32-bit kernel */
1204 regs->cp0_epc = contpc; /* advance or branch */
1206 #ifdef CONFIG_DEBUG_FS
1207 unaligned_instructions++;
1212 /* roll back jump/branch */
1213 regs->cp0_epc = origpc;
1214 regs->regs[31] = orig31;
1215 /* Did we have an exception handler installed? */
1216 if (fixup_exception(regs))
1219 die_if_kernel("Unhandled kernel unaligned access", regs);
1225 die_if_kernel("Unhandled kernel unaligned access", regs);
1232 ("Unhandled kernel unaligned access or invalid instruction", regs);
1236 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1238 unsigned long value;
1241 unsigned long orig31;
1243 unsigned long origpc;
1244 union mips16e_instruction mips16inst, oldinst;
1245 unsigned int opcode;
1247 bool user = user_mode(regs);
1249 origpc = regs->cp0_epc;
1250 orig31 = regs->regs[31];
1251 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1253 * This load never faults.
1255 __get_user(mips16inst.full, pc16);
1256 oldinst = mips16inst;
1258 /* skip EXTEND instruction */
1259 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1262 __get_user(mips16inst.full, pc16);
1263 } else if (delay_slot(regs)) {
1264 /* skip jump instructions */
1265 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1266 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1269 if (get_user(mips16inst.full, pc16))
1273 opcode = mips16inst.ri.opcode;
1275 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1276 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1277 case MIPS16e_ldpc_func:
1278 case MIPS16e_ldsp_func:
1279 reg = reg16to32[mips16inst.ri64.ry];
1282 case MIPS16e_sdsp_func:
1283 reg = reg16to32[mips16inst.ri64.ry];
1286 case MIPS16e_sdrasp_func:
1287 reg = 29; /* GPRSP */
1293 case MIPS16e_swsp_op:
1294 reg = reg16to32[mips16inst.ri.rx];
1295 if (extended && cpu_has_mips16e2)
1296 switch (mips16inst.ri.imm >> 5) {
1301 opcode = MIPS16e_sh_op;
1308 case MIPS16e_lwpc_op:
1309 reg = reg16to32[mips16inst.ri.rx];
1312 case MIPS16e_lwsp_op:
1313 reg = reg16to32[mips16inst.ri.rx];
1314 if (extended && cpu_has_mips16e2)
1315 switch (mips16inst.ri.imm >> 5) {
1320 opcode = MIPS16e_lh_op;
1323 opcode = MIPS16e_lhu_op;
1331 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1333 reg = 29; /* GPRSP */
1337 reg = reg16to32[mips16inst.rri.ry];
1344 case MIPS16e_lbu_op:
1349 if (user && !access_ok(addr, 2))
1352 LoadHW(addr, value, res);
1355 MIPS16e_compute_return_epc(regs, &oldinst);
1356 regs->regs[reg] = value;
1359 case MIPS16e_lhu_op:
1360 if (user && !access_ok(addr, 2))
1363 LoadHWU(addr, value, res);
1366 MIPS16e_compute_return_epc(regs, &oldinst);
1367 regs->regs[reg] = value;
1371 case MIPS16e_lwpc_op:
1372 case MIPS16e_lwsp_op:
1373 if (user && !access_ok(addr, 4))
1376 LoadW(addr, value, res);
1379 MIPS16e_compute_return_epc(regs, &oldinst);
1380 regs->regs[reg] = value;
1383 case MIPS16e_lwu_op:
1386 * A 32-bit kernel might be running on a 64-bit processor. But
1387 * if we're on a 32-bit processor and an i-cache incoherency
1388 * or race makes us see a 64-bit instruction here the sdl/sdr
1389 * would blow up, so for now we don't handle unaligned 64-bit
1390 * instructions on 32-bit kernels.
1392 if (user && !access_ok(addr, 4))
1395 LoadWU(addr, value, res);
1398 MIPS16e_compute_return_epc(regs, &oldinst);
1399 regs->regs[reg] = value;
1401 #endif /* CONFIG_64BIT */
1403 /* Cannot handle 64-bit instructions in 32-bit kernel */
1410 * A 32-bit kernel might be running on a 64-bit processor. But
1411 * if we're on a 32-bit processor and an i-cache incoherency
1412 * or race makes us see a 64-bit instruction here the sdl/sdr
1413 * would blow up, so for now we don't handle unaligned 64-bit
1414 * instructions on 32-bit kernels.
1416 if (user && !access_ok(addr, 8))
1419 LoadDW(addr, value, res);
1422 MIPS16e_compute_return_epc(regs, &oldinst);
1423 regs->regs[reg] = value;
1425 #endif /* CONFIG_64BIT */
1427 /* Cannot handle 64-bit instructions in 32-bit kernel */
1431 if (user && !access_ok(addr, 2))
1434 MIPS16e_compute_return_epc(regs, &oldinst);
1435 value = regs->regs[reg];
1436 StoreHW(addr, value, res);
1442 case MIPS16e_swsp_op:
1443 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1444 if (user && !access_ok(addr, 4))
1447 MIPS16e_compute_return_epc(regs, &oldinst);
1448 value = regs->regs[reg];
1449 StoreW(addr, value, res);
1458 * A 32-bit kernel might be running on a 64-bit processor. But
1459 * if we're on a 32-bit processor and an i-cache incoherency
1460 * or race makes us see a 64-bit instruction here the sdl/sdr
1461 * would blow up, so for now we don't handle unaligned 64-bit
1462 * instructions on 32-bit kernels.
1464 if (user && !access_ok(addr, 8))
1467 MIPS16e_compute_return_epc(regs, &oldinst);
1468 value = regs->regs[reg];
1469 StoreDW(addr, value, res);
1473 #endif /* CONFIG_64BIT */
1475 /* Cannot handle 64-bit instructions in 32-bit kernel */
1480 * Pheeee... We encountered an yet unknown instruction or
1481 * cache coherence problem. Die sucker, die ...
1486 #ifdef CONFIG_DEBUG_FS
1487 unaligned_instructions++;
1493 /* roll back jump/branch */
1494 regs->cp0_epc = origpc;
1495 regs->regs[31] = orig31;
1496 /* Did we have an exception handler installed? */
1497 if (fixup_exception(regs))
1500 die_if_kernel("Unhandled kernel unaligned access", regs);
1506 die_if_kernel("Unhandled kernel unaligned access", regs);
1513 ("Unhandled kernel unaligned access or invalid instruction", regs);
1517 asmlinkage void do_ade(struct pt_regs *regs)
1519 enum ctx_state prev_state;
1522 prev_state = exception_enter();
1523 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1524 1, regs, regs->cp0_badvaddr);
1528 * check, if we are hitting space between CPU implemented maximum
1529 * virtual user address and 64bit maximum virtual user address
1530 * and do exception handling to get EFAULTs for get_user/put_user
1532 if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
1533 (regs->cp0_badvaddr < XKSSEG)) {
1534 if (fixup_exception(regs)) {
1535 current->thread.cp0_baduaddr = regs->cp0_badvaddr;
1543 * Did we catch a fault trying to load an instruction?
1545 if (regs->cp0_badvaddr == regs->cp0_epc)
1548 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1550 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1554 * Do branch emulation only if we didn't forward the exception.
1555 * This is all so but ugly ...
1559 * Are we running in microMIPS mode?
1561 if (get_isa16_mode(regs->cp0_epc)) {
1563 * Did we catch a fault trying to load an instruction in
1566 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1568 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1569 show_registers(regs);
1571 if (cpu_has_mmips) {
1572 emulate_load_store_microMIPS(regs,
1573 (void __user *)regs->cp0_badvaddr);
1577 if (cpu_has_mips16) {
1578 emulate_load_store_MIPS16e(regs,
1579 (void __user *)regs->cp0_badvaddr);
1586 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1587 show_registers(regs);
1588 pc = (unsigned int *)exception_epc(regs);
1590 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1595 die_if_kernel("Kernel unaligned instruction access", regs);
1599 * XXX On return from the signal handler we should advance the epc
1601 exception_exit(prev_state);
1604 #ifdef CONFIG_DEBUG_FS
1605 static int __init debugfs_unaligned(void)
1607 debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1608 &unaligned_instructions);
1609 debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1610 mips_debugfs_dir, &unaligned_action);
1613 arch_initcall(debugfs_unaligned);