2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
25 * problems with user programs have been fixed. For programmers this is the
28 * Fixing address errors is a per process option. The option is inherited
29 * across fork(2) and execve(2) calls. If you really want to use the
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
33 * #include <sys/sysmips.h>
36 * sysmips(MIPS_FIXADE, x);
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
41 * Below a little program to play around with this feature.
44 * #include <sys/sysmips.h>
47 * unsigned char bar[8];
50 * main(int argc, char *argv[])
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
59 * printf("*p = %08lx\n", *p);
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
68 * Coprocessor loads are not supported; I think this case is unimportant
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
76 #include <linux/context_tracking.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
88 #include <asm/debug.h>
90 #include <asm/fpu_emulator.h>
92 #include <asm/unaligned-emul.h>
93 #include <asm/mmu_context.h>
94 #include <linux/uaccess.h>
96 #include "access-helper.h"
99 UNALIGNED_ACTION_QUIET,
100 UNALIGNED_ACTION_SIGNAL,
101 UNALIGNED_ACTION_SHOW,
103 #ifdef CONFIG_DEBUG_FS
104 static u32 unaligned_instructions;
105 static u32 unaligned_action;
107 #define unaligned_action UNALIGNED_ACTION_QUIET
109 extern void show_registers(struct pt_regs *regs);
111 static void emulate_load_store_insn(struct pt_regs *regs,
112 void __user *addr, unsigned int *pc)
114 unsigned long origpc, orig31, value;
115 union mips_instruction insn;
117 bool user = user_mode(regs);
119 origpc = (unsigned long)pc;
120 orig31 = regs->regs[31];
122 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
125 * This load never faults.
127 __get_inst32(&insn.word, pc, user);
129 switch (insn.i_format.opcode) {
131 * These are instructions that a compiler doesn't generate. We
132 * can assume therefore that the code is MIPS-aware and
133 * really buggy. Emulating these instructions would break the
142 * For these instructions the only way to create an address
143 * error is an attempted access to kernel/supervisor address
160 * The remaining opcodes are the ones that are really of
163 #ifdef CONFIG_MACH_INGENIC
165 if (insn.mxu_lx_format.func != mxu_lx_op)
166 goto sigbus; /* other MXU instructions we don't care */
168 switch (insn.mxu_lx_format.op) {
170 if (user && !access_ok(addr, 4))
172 LoadW(addr, value, res);
175 compute_return_epc(regs);
176 regs->regs[insn.mxu_lx_format.rd] = value;
179 if (user && !access_ok(addr, 2))
181 LoadHW(addr, value, res);
184 compute_return_epc(regs);
185 regs->regs[insn.dsp_format.rd] = value;
188 if (user && !access_ok(addr, 2))
190 LoadHWU(addr, value, res);
193 compute_return_epc(regs);
194 regs->regs[insn.dsp_format.rd] = value;
205 if (insn.dsp_format.func == lx_op) {
206 switch (insn.dsp_format.op) {
208 if (user && !access_ok(addr, 4))
210 LoadW(addr, value, res);
213 compute_return_epc(regs);
214 regs->regs[insn.dsp_format.rd] = value;
217 if (user && !access_ok(addr, 2))
219 LoadHW(addr, value, res);
222 compute_return_epc(regs);
223 regs->regs[insn.dsp_format.rd] = value;
232 * we can land here only from kernel accessing user
233 * memory, so we need to "switch" the address limit to
234 * user space, so that address check can work properly.
236 switch (insn.spec3_format.func) {
238 if (!access_ok(addr, 2))
240 LoadHWE(addr, value, res);
243 compute_return_epc(regs);
244 regs->regs[insn.spec3_format.rt] = value;
247 if (!access_ok(addr, 4))
249 LoadWE(addr, value, res);
252 compute_return_epc(regs);
253 regs->regs[insn.spec3_format.rt] = value;
256 if (!access_ok(addr, 2))
258 LoadHWUE(addr, value, res);
261 compute_return_epc(regs);
262 regs->regs[insn.spec3_format.rt] = value;
265 if (!access_ok(addr, 2))
267 compute_return_epc(regs);
268 value = regs->regs[insn.spec3_format.rt];
269 StoreHWE(addr, value, res);
274 if (!access_ok(addr, 4))
276 compute_return_epc(regs);
277 value = regs->regs[insn.spec3_format.rt];
278 StoreWE(addr, value, res);
289 if (user && !access_ok(addr, 2))
292 if (IS_ENABLED(CONFIG_EVA) && user)
293 LoadHWE(addr, value, res);
295 LoadHW(addr, value, res);
299 compute_return_epc(regs);
300 regs->regs[insn.i_format.rt] = value;
304 if (user && !access_ok(addr, 4))
307 if (IS_ENABLED(CONFIG_EVA) && user)
308 LoadWE(addr, value, res);
310 LoadW(addr, value, res);
314 compute_return_epc(regs);
315 regs->regs[insn.i_format.rt] = value;
319 if (user && !access_ok(addr, 2))
322 if (IS_ENABLED(CONFIG_EVA) && user)
323 LoadHWUE(addr, value, res);
325 LoadHWU(addr, value, res);
329 compute_return_epc(regs);
330 regs->regs[insn.i_format.rt] = value;
336 * A 32-bit kernel might be running on a 64-bit processor. But
337 * if we're on a 32-bit processor and an i-cache incoherency
338 * or race makes us see a 64-bit instruction here the sdl/sdr
339 * would blow up, so for now we don't handle unaligned 64-bit
340 * instructions on 32-bit kernels.
342 if (user && !access_ok(addr, 4))
345 LoadWU(addr, value, res);
348 compute_return_epc(regs);
349 regs->regs[insn.i_format.rt] = value;
351 #endif /* CONFIG_64BIT */
353 /* Cannot handle 64-bit instructions in 32-bit kernel */
359 * A 32-bit kernel might be running on a 64-bit processor. But
360 * if we're on a 32-bit processor and an i-cache incoherency
361 * or race makes us see a 64-bit instruction here the sdl/sdr
362 * would blow up, so for now we don't handle unaligned 64-bit
363 * instructions on 32-bit kernels.
365 if (user && !access_ok(addr, 8))
368 LoadDW(addr, value, res);
371 compute_return_epc(regs);
372 regs->regs[insn.i_format.rt] = value;
374 #endif /* CONFIG_64BIT */
376 /* Cannot handle 64-bit instructions in 32-bit kernel */
380 if (user && !access_ok(addr, 2))
383 compute_return_epc(regs);
384 value = regs->regs[insn.i_format.rt];
386 if (IS_ENABLED(CONFIG_EVA) && user)
387 StoreHWE(addr, value, res);
389 StoreHW(addr, value, res);
396 if (user && !access_ok(addr, 4))
399 compute_return_epc(regs);
400 value = regs->regs[insn.i_format.rt];
402 if (IS_ENABLED(CONFIG_EVA) && user)
403 StoreWE(addr, value, res);
405 StoreW(addr, value, res);
414 * A 32-bit kernel might be running on a 64-bit processor. But
415 * if we're on a 32-bit processor and an i-cache incoherency
416 * or race makes us see a 64-bit instruction here the sdl/sdr
417 * would blow up, so for now we don't handle unaligned 64-bit
418 * instructions on 32-bit kernels.
420 if (user && !access_ok(addr, 8))
423 compute_return_epc(regs);
424 value = regs->regs[insn.i_format.rt];
425 StoreDW(addr, value, res);
429 #endif /* CONFIG_64BIT */
431 /* Cannot handle 64-bit instructions in 32-bit kernel */
434 #ifdef CONFIG_MIPS_FP_SUPPORT
441 void __user *fault_addr = NULL;
443 die_if_kernel("Unaligned FP access in kernel code", regs);
444 BUG_ON(!used_math());
446 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
448 own_fpu(1); /* Restore FPU state. */
450 /* Signal if something went wrong. */
451 process_fpemu_return(res, fault_addr, 0);
457 #endif /* CONFIG_MIPS_FP_SUPPORT */
459 #ifdef CONFIG_CPU_HAS_MSA
462 unsigned int wd, preempted;
470 * If we've reached this point then userland should have taken
471 * the MSA disabled exception & initialised vector context at
472 * some point in the past.
474 BUG_ON(!thread_msa_context_live());
476 df = insn.msa_mi10_format.df;
477 wd = insn.msa_mi10_format.wd;
478 fpr = ¤t->thread.fpu.fpr[wd];
480 switch (insn.msa_mi10_format.func) {
482 if (!access_ok(addr, sizeof(*fpr)))
487 * If we have live MSA context keep track of
488 * whether we get preempted in order to avoid
489 * the register context we load being clobbered
490 * by the live context as it's saved during
491 * preemption. If we don't have live context
492 * then it can't be saved to clobber the value
495 preempted = test_thread_flag(TIF_USEDMSA);
497 res = __copy_from_user_inatomic(fpr, addr,
503 * Update the hardware register if it is in use
504 * by the task in this quantum, in order to
505 * avoid having to save & restore the whole
509 if (test_thread_flag(TIF_USEDMSA)) {
510 write_msa_wr(wd, fpr, df);
518 if (!access_ok(addr, sizeof(*fpr)))
522 * Update from the hardware register if it is in use by
523 * the task in this quantum, in order to avoid having to
524 * save & restore the whole vector context.
527 if (test_thread_flag(TIF_USEDMSA))
528 read_msa_wr(wd, fpr, df);
531 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
540 compute_return_epc(regs);
543 #endif /* CONFIG_CPU_HAS_MSA */
545 #ifndef CONFIG_CPU_MIPSR6
547 * COP2 is available to implementor for application specific use.
548 * It's up to applications to register a notifier chain and do
549 * whatever they have to do, including possible sending of signals.
551 * This instruction has been reallocated in Release 6
554 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
558 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
562 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
566 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
571 * Pheeee... We encountered an yet unknown instruction or
572 * cache coherence problem. Die sucker, die ...
577 #ifdef CONFIG_DEBUG_FS
578 unaligned_instructions++;
584 /* roll back jump/branch */
585 regs->cp0_epc = origpc;
586 regs->regs[31] = orig31;
587 /* Did we have an exception handler installed? */
588 if (fixup_exception(regs))
591 die_if_kernel("Unhandled kernel unaligned access", regs);
597 die_if_kernel("Unhandled kernel unaligned access", regs);
604 ("Unhandled kernel unaligned access or invalid instruction", regs);
608 /* Recode table from 16-bit register notation to 32-bit GPR. */
609 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
611 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
612 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
614 static void emulate_load_store_microMIPS(struct pt_regs *regs,
620 unsigned int reg = 0, rvar;
621 unsigned long orig31;
625 unsigned long origpc, contpc;
626 union mips_instruction insn;
627 struct mm_decoded_insn mminsn;
628 bool user = user_mode(regs);
630 origpc = regs->cp0_epc;
631 orig31 = regs->regs[31];
633 mminsn.micro_mips_mode = 1;
636 * This load never faults.
638 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
639 __get_user(halfword, pc16);
641 contpc = regs->cp0_epc + 2;
642 word = ((unsigned int)halfword << 16);
645 if (!mm_insn_16bit(halfword)) {
646 __get_user(halfword, pc16);
648 contpc = regs->cp0_epc + 4;
654 if (get_user(halfword, pc16))
656 mminsn.next_pc_inc = 2;
657 word = ((unsigned int)halfword << 16);
659 if (!mm_insn_16bit(halfword)) {
661 if (get_user(halfword, pc16))
663 mminsn.next_pc_inc = 4;
666 mminsn.next_insn = word;
668 insn = (union mips_instruction)(mminsn.insn);
669 if (mm_isBranchInstr(regs, mminsn, &contpc))
670 insn = (union mips_instruction)(mminsn.next_insn);
672 /* Parse instruction to find what to do */
674 switch (insn.mm_i_format.opcode) {
677 switch (insn.mm_x_format.func) {
679 reg = insn.mm_x_format.rd;
686 switch (insn.mm_m_format.func) {
688 reg = insn.mm_m_format.rd;
692 if (user && !access_ok(addr, 8))
695 LoadW(addr, value, res);
698 regs->regs[reg] = value;
700 LoadW(addr, value, res);
703 regs->regs[reg + 1] = value;
707 reg = insn.mm_m_format.rd;
711 if (user && !access_ok(addr, 8))
714 value = regs->regs[reg];
715 StoreW(addr, value, res);
719 value = regs->regs[reg + 1];
720 StoreW(addr, value, res);
727 reg = insn.mm_m_format.rd;
731 if (user && !access_ok(addr, 16))
734 LoadDW(addr, value, res);
737 regs->regs[reg] = value;
739 LoadDW(addr, value, res);
742 regs->regs[reg + 1] = value;
744 #endif /* CONFIG_64BIT */
750 reg = insn.mm_m_format.rd;
754 if (user && !access_ok(addr, 16))
757 value = regs->regs[reg];
758 StoreDW(addr, value, res);
762 value = regs->regs[reg + 1];
763 StoreDW(addr, value, res);
767 #endif /* CONFIG_64BIT */
772 reg = insn.mm_m_format.rd;
774 if ((rvar > 9) || !reg)
777 if (user && !access_ok(addr, 4 * (rvar + 1)))
780 if (user && !access_ok(addr, 4 * rvar))
785 for (i = 16; rvar; rvar--, i++) {
786 LoadW(addr, value, res);
790 regs->regs[i] = value;
792 if ((reg & 0xf) == 9) {
793 LoadW(addr, value, res);
797 regs->regs[30] = value;
800 LoadW(addr, value, res);
803 regs->regs[31] = value;
808 reg = insn.mm_m_format.rd;
810 if ((rvar > 9) || !reg)
813 if (user && !access_ok(addr, 4 * (rvar + 1)))
816 if (user && !access_ok(addr, 4 * rvar))
821 for (i = 16; rvar; rvar--, i++) {
822 value = regs->regs[i];
823 StoreW(addr, value, res);
828 if ((reg & 0xf) == 9) {
829 value = regs->regs[30];
830 StoreW(addr, value, res);
836 value = regs->regs[31];
837 StoreW(addr, value, res);
845 reg = insn.mm_m_format.rd;
847 if ((rvar > 9) || !reg)
850 if (user && !access_ok(addr, 8 * (rvar + 1)))
853 if (user && !access_ok(addr, 8 * rvar))
859 for (i = 16; rvar; rvar--, i++) {
860 LoadDW(addr, value, res);
864 regs->regs[i] = value;
866 if ((reg & 0xf) == 9) {
867 LoadDW(addr, value, res);
871 regs->regs[30] = value;
874 LoadDW(addr, value, res);
877 regs->regs[31] = value;
880 #endif /* CONFIG_64BIT */
886 reg = insn.mm_m_format.rd;
888 if ((rvar > 9) || !reg)
891 if (user && !access_ok(addr, 8 * (rvar + 1)))
894 if (user && !access_ok(addr, 8 * rvar))
900 for (i = 16; rvar; rvar--, i++) {
901 value = regs->regs[i];
902 StoreDW(addr, value, res);
907 if ((reg & 0xf) == 9) {
908 value = regs->regs[30];
909 StoreDW(addr, value, res);
915 value = regs->regs[31];
916 StoreDW(addr, value, res);
921 #endif /* CONFIG_64BIT */
925 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
931 switch (insn.mm_m_format.func) {
933 reg = insn.mm_m_format.rd;
937 /* LL,SC,LLD,SCD are not serviced */
940 #ifdef CONFIG_MIPS_FP_SUPPORT
942 switch (insn.mm_x_format.func) {
956 void __user *fault_addr = NULL;
959 /* roll back jump/branch */
960 regs->cp0_epc = origpc;
961 regs->regs[31] = orig31;
963 die_if_kernel("Unaligned FP access in kernel code", regs);
964 BUG_ON(!used_math());
965 BUG_ON(!is_fpu_owner());
967 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
969 own_fpu(1); /* restore FPU state */
971 /* If something went wrong, signal */
972 process_fpemu_return(res, fault_addr, 0);
978 #endif /* CONFIG_MIPS_FP_SUPPORT */
981 reg = insn.mm_i_format.rt;
985 reg = insn.mm_i_format.rt;
989 reg = insn.mm_i_format.rt;
993 reg = insn.mm_i_format.rt;
997 reg = insn.mm_i_format.rt;
1001 reg = insn.mm_i_format.rt;
1005 reg = insn.mm_i_format.rt;
1009 switch (insn.mm16_m_format.func) {
1011 reg = insn.mm16_m_format.rlist;
1013 if (user && !access_ok(addr, 4 * rvar))
1016 for (i = 16; rvar; rvar--, i++) {
1017 LoadW(addr, value, res);
1021 regs->regs[i] = value;
1023 LoadW(addr, value, res);
1026 regs->regs[31] = value;
1031 reg = insn.mm16_m_format.rlist;
1033 if (user && !access_ok(addr, 4 * rvar))
1036 for (i = 16; rvar; rvar--, i++) {
1037 value = regs->regs[i];
1038 StoreW(addr, value, res);
1043 value = regs->regs[31];
1044 StoreW(addr, value, res);
1055 reg = reg16to32[insn.mm16_rb_format.rt];
1059 reg = reg16to32[insn.mm16_rb_format.rt];
1063 reg = reg16to32st[insn.mm16_rb_format.rt];
1067 reg = reg16to32st[insn.mm16_rb_format.rt];
1071 reg = insn.mm16_r5_format.rt;
1075 reg = insn.mm16_r5_format.rt;
1079 reg = reg16to32[insn.mm16_r3_format.rt];
1087 if (user && !access_ok(addr, 2))
1090 LoadHW(addr, value, res);
1093 regs->regs[reg] = value;
1097 if (user && !access_ok(addr, 2))
1100 LoadHWU(addr, value, res);
1103 regs->regs[reg] = value;
1107 if (user && !access_ok(addr, 4))
1110 LoadW(addr, value, res);
1113 regs->regs[reg] = value;
1119 * A 32-bit kernel might be running on a 64-bit processor. But
1120 * if we're on a 32-bit processor and an i-cache incoherency
1121 * or race makes us see a 64-bit instruction here the sdl/sdr
1122 * would blow up, so for now we don't handle unaligned 64-bit
1123 * instructions on 32-bit kernels.
1125 if (user && !access_ok(addr, 4))
1128 LoadWU(addr, value, res);
1131 regs->regs[reg] = value;
1133 #endif /* CONFIG_64BIT */
1135 /* Cannot handle 64-bit instructions in 32-bit kernel */
1141 * A 32-bit kernel might be running on a 64-bit processor. But
1142 * if we're on a 32-bit processor and an i-cache incoherency
1143 * or race makes us see a 64-bit instruction here the sdl/sdr
1144 * would blow up, so for now we don't handle unaligned 64-bit
1145 * instructions on 32-bit kernels.
1147 if (user && !access_ok(addr, 8))
1150 LoadDW(addr, value, res);
1153 regs->regs[reg] = value;
1155 #endif /* CONFIG_64BIT */
1157 /* Cannot handle 64-bit instructions in 32-bit kernel */
1161 if (user && !access_ok(addr, 2))
1164 value = regs->regs[reg];
1165 StoreHW(addr, value, res);
1171 if (user && !access_ok(addr, 4))
1174 value = regs->regs[reg];
1175 StoreW(addr, value, res);
1183 * A 32-bit kernel might be running on a 64-bit processor. But
1184 * if we're on a 32-bit processor and an i-cache incoherency
1185 * or race makes us see a 64-bit instruction here the sdl/sdr
1186 * would blow up, so for now we don't handle unaligned 64-bit
1187 * instructions on 32-bit kernels.
1189 if (user && !access_ok(addr, 8))
1192 value = regs->regs[reg];
1193 StoreDW(addr, value, res);
1197 #endif /* CONFIG_64BIT */
1199 /* Cannot handle 64-bit instructions in 32-bit kernel */
1203 regs->cp0_epc = contpc; /* advance or branch */
1205 #ifdef CONFIG_DEBUG_FS
1206 unaligned_instructions++;
1211 /* roll back jump/branch */
1212 regs->cp0_epc = origpc;
1213 regs->regs[31] = orig31;
1214 /* Did we have an exception handler installed? */
1215 if (fixup_exception(regs))
1218 die_if_kernel("Unhandled kernel unaligned access", regs);
1224 die_if_kernel("Unhandled kernel unaligned access", regs);
1231 ("Unhandled kernel unaligned access or invalid instruction", regs);
1235 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1237 unsigned long value;
1240 unsigned long orig31;
1242 unsigned long origpc;
1243 union mips16e_instruction mips16inst, oldinst;
1244 unsigned int opcode;
1246 bool user = user_mode(regs);
1248 origpc = regs->cp0_epc;
1249 orig31 = regs->regs[31];
1250 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1252 * This load never faults.
1254 __get_user(mips16inst.full, pc16);
1255 oldinst = mips16inst;
1257 /* skip EXTEND instruction */
1258 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1261 __get_user(mips16inst.full, pc16);
1262 } else if (delay_slot(regs)) {
1263 /* skip jump instructions */
1264 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1265 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1268 if (get_user(mips16inst.full, pc16))
1272 opcode = mips16inst.ri.opcode;
1274 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1275 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1276 case MIPS16e_ldpc_func:
1277 case MIPS16e_ldsp_func:
1278 reg = reg16to32[mips16inst.ri64.ry];
1281 case MIPS16e_sdsp_func:
1282 reg = reg16to32[mips16inst.ri64.ry];
1285 case MIPS16e_sdrasp_func:
1286 reg = 29; /* GPRSP */
1292 case MIPS16e_swsp_op:
1293 reg = reg16to32[mips16inst.ri.rx];
1294 if (extended && cpu_has_mips16e2)
1295 switch (mips16inst.ri.imm >> 5) {
1300 opcode = MIPS16e_sh_op;
1307 case MIPS16e_lwpc_op:
1308 reg = reg16to32[mips16inst.ri.rx];
1311 case MIPS16e_lwsp_op:
1312 reg = reg16to32[mips16inst.ri.rx];
1313 if (extended && cpu_has_mips16e2)
1314 switch (mips16inst.ri.imm >> 5) {
1319 opcode = MIPS16e_lh_op;
1322 opcode = MIPS16e_lhu_op;
1330 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1332 reg = 29; /* GPRSP */
1336 reg = reg16to32[mips16inst.rri.ry];
1343 case MIPS16e_lbu_op:
1348 if (user && !access_ok(addr, 2))
1351 LoadHW(addr, value, res);
1354 MIPS16e_compute_return_epc(regs, &oldinst);
1355 regs->regs[reg] = value;
1358 case MIPS16e_lhu_op:
1359 if (user && !access_ok(addr, 2))
1362 LoadHWU(addr, value, res);
1365 MIPS16e_compute_return_epc(regs, &oldinst);
1366 regs->regs[reg] = value;
1370 case MIPS16e_lwpc_op:
1371 case MIPS16e_lwsp_op:
1372 if (user && !access_ok(addr, 4))
1375 LoadW(addr, value, res);
1378 MIPS16e_compute_return_epc(regs, &oldinst);
1379 regs->regs[reg] = value;
1382 case MIPS16e_lwu_op:
1385 * A 32-bit kernel might be running on a 64-bit processor. But
1386 * if we're on a 32-bit processor and an i-cache incoherency
1387 * or race makes us see a 64-bit instruction here the sdl/sdr
1388 * would blow up, so for now we don't handle unaligned 64-bit
1389 * instructions on 32-bit kernels.
1391 if (user && !access_ok(addr, 4))
1394 LoadWU(addr, value, res);
1397 MIPS16e_compute_return_epc(regs, &oldinst);
1398 regs->regs[reg] = value;
1400 #endif /* CONFIG_64BIT */
1402 /* Cannot handle 64-bit instructions in 32-bit kernel */
1409 * A 32-bit kernel might be running on a 64-bit processor. But
1410 * if we're on a 32-bit processor and an i-cache incoherency
1411 * or race makes us see a 64-bit instruction here the sdl/sdr
1412 * would blow up, so for now we don't handle unaligned 64-bit
1413 * instructions on 32-bit kernels.
1415 if (user && !access_ok(addr, 8))
1418 LoadDW(addr, value, res);
1421 MIPS16e_compute_return_epc(regs, &oldinst);
1422 regs->regs[reg] = value;
1424 #endif /* CONFIG_64BIT */
1426 /* Cannot handle 64-bit instructions in 32-bit kernel */
1430 if (user && !access_ok(addr, 2))
1433 MIPS16e_compute_return_epc(regs, &oldinst);
1434 value = regs->regs[reg];
1435 StoreHW(addr, value, res);
1441 case MIPS16e_swsp_op:
1442 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1443 if (user && !access_ok(addr, 4))
1446 MIPS16e_compute_return_epc(regs, &oldinst);
1447 value = regs->regs[reg];
1448 StoreW(addr, value, res);
1457 * A 32-bit kernel might be running on a 64-bit processor. But
1458 * if we're on a 32-bit processor and an i-cache incoherency
1459 * or race makes us see a 64-bit instruction here the sdl/sdr
1460 * would blow up, so for now we don't handle unaligned 64-bit
1461 * instructions on 32-bit kernels.
1463 if (user && !access_ok(addr, 8))
1466 MIPS16e_compute_return_epc(regs, &oldinst);
1467 value = regs->regs[reg];
1468 StoreDW(addr, value, res);
1472 #endif /* CONFIG_64BIT */
1474 /* Cannot handle 64-bit instructions in 32-bit kernel */
1479 * Pheeee... We encountered an yet unknown instruction or
1480 * cache coherence problem. Die sucker, die ...
1485 #ifdef CONFIG_DEBUG_FS
1486 unaligned_instructions++;
1492 /* roll back jump/branch */
1493 regs->cp0_epc = origpc;
1494 regs->regs[31] = orig31;
1495 /* Did we have an exception handler installed? */
1496 if (fixup_exception(regs))
1499 die_if_kernel("Unhandled kernel unaligned access", regs);
1505 die_if_kernel("Unhandled kernel unaligned access", regs);
1512 ("Unhandled kernel unaligned access or invalid instruction", regs);
1516 asmlinkage void do_ade(struct pt_regs *regs)
1518 enum ctx_state prev_state;
1521 prev_state = exception_enter();
1522 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1523 1, regs, regs->cp0_badvaddr);
1527 * check, if we are hitting space between CPU implemented maximum
1528 * virtual user address and 64bit maximum virtual user address
1529 * and do exception handling to get EFAULTs for get_user/put_user
1531 if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
1532 (regs->cp0_badvaddr < XKSSEG)) {
1533 if (fixup_exception(regs)) {
1534 current->thread.cp0_baduaddr = regs->cp0_badvaddr;
1542 * Did we catch a fault trying to load an instruction?
1544 if (regs->cp0_badvaddr == regs->cp0_epc)
1547 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1549 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1553 * Do branch emulation only if we didn't forward the exception.
1554 * This is all so but ugly ...
1558 * Are we running in microMIPS mode?
1560 if (get_isa16_mode(regs->cp0_epc)) {
1562 * Did we catch a fault trying to load an instruction in
1565 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1567 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1568 show_registers(regs);
1570 if (cpu_has_mmips) {
1571 emulate_load_store_microMIPS(regs,
1572 (void __user *)regs->cp0_badvaddr);
1576 if (cpu_has_mips16) {
1577 emulate_load_store_MIPS16e(regs,
1578 (void __user *)regs->cp0_badvaddr);
1585 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1586 show_registers(regs);
1587 pc = (unsigned int *)exception_epc(regs);
1589 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1594 die_if_kernel("Kernel unaligned instruction access", regs);
1598 * XXX On return from the signal handler we should advance the epc
1600 exception_exit(prev_state);
1603 #ifdef CONFIG_DEBUG_FS
1604 static int __init debugfs_unaligned(void)
1606 debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1607 &unaligned_instructions);
1608 debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1609 mips_debugfs_dir, &unaligned_action);
1612 arch_initcall(debugfs_unaligned);