2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
25 * problems with user programs have been fixed. For programmers this is the
28 * Fixing address errors is a per process option. The option is inherited
29 * across fork(2) and execve(2) calls. If you really want to use the
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
33 * #include <sys/sysmips.h>
36 * sysmips(MIPS_FIXADE, x);
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
41 * Below a little program to play around with this feature.
44 * #include <sys/sysmips.h>
47 * unsigned char bar[8];
50 * main(int argc, char *argv[])
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
59 * printf("*p = %08lx\n", *p);
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
68 * Coprocessor loads are not supported; I think this case is unimportant
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
76 #include <linux/context_tracking.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
88 #include <asm/debug.h>
90 #include <asm/fpu_emulator.h>
92 #include <asm/unaligned-emul.h>
93 #include <asm/mmu_context.h>
94 #include <linux/uaccess.h>
96 #include "access-helper.h"
99 UNALIGNED_ACTION_QUIET,
100 UNALIGNED_ACTION_SIGNAL,
101 UNALIGNED_ACTION_SHOW,
103 #ifdef CONFIG_DEBUG_FS
104 static u32 unaligned_instructions;
105 static u32 unaligned_action;
107 #define unaligned_action UNALIGNED_ACTION_QUIET
109 extern void show_registers(struct pt_regs *regs);
111 static void emulate_load_store_insn(struct pt_regs *regs,
112 void __user *addr, unsigned int *pc)
114 unsigned long origpc, orig31, value;
115 union mips_instruction insn;
117 bool user = user_mode(regs);
119 origpc = (unsigned long)pc;
120 orig31 = regs->regs[31];
122 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
125 * This load never faults.
127 __get_inst32(&insn.word, pc, user);
129 switch (insn.i_format.opcode) {
131 * These are instructions that a compiler doesn't generate. We
132 * can assume therefore that the code is MIPS-aware and
133 * really buggy. Emulating these instructions would break the
142 * For these instructions the only way to create an address
143 * error is an attempted access to kernel/supervisor address
160 * The remaining opcodes are the ones that are really of
164 if (insn.dsp_format.func == lx_op) {
165 switch (insn.dsp_format.op) {
167 if (user && !access_ok(addr, 4))
169 LoadW(addr, value, res);
172 compute_return_epc(regs);
173 regs->regs[insn.dsp_format.rd] = value;
176 if (user && !access_ok(addr, 2))
178 LoadHW(addr, value, res);
181 compute_return_epc(regs);
182 regs->regs[insn.dsp_format.rd] = value;
191 * we can land here only from kernel accessing user
192 * memory, so we need to "switch" the address limit to
193 * user space, so that address check can work properly.
195 switch (insn.spec3_format.func) {
197 if (!access_ok(addr, 2))
199 LoadHWE(addr, value, res);
202 compute_return_epc(regs);
203 regs->regs[insn.spec3_format.rt] = value;
206 if (!access_ok(addr, 4))
208 LoadWE(addr, value, res);
211 compute_return_epc(regs);
212 regs->regs[insn.spec3_format.rt] = value;
215 if (!access_ok(addr, 2))
217 LoadHWUE(addr, value, res);
220 compute_return_epc(regs);
221 regs->regs[insn.spec3_format.rt] = value;
224 if (!access_ok(addr, 2))
226 compute_return_epc(regs);
227 value = regs->regs[insn.spec3_format.rt];
228 StoreHWE(addr, value, res);
233 if (!access_ok(addr, 4))
235 compute_return_epc(regs);
236 value = regs->regs[insn.spec3_format.rt];
237 StoreWE(addr, value, res);
248 if (user && !access_ok(addr, 2))
251 if (IS_ENABLED(CONFIG_EVA) && user)
252 LoadHWE(addr, value, res);
254 LoadHW(addr, value, res);
258 compute_return_epc(regs);
259 regs->regs[insn.i_format.rt] = value;
263 if (user && !access_ok(addr, 4))
266 if (IS_ENABLED(CONFIG_EVA) && user)
267 LoadWE(addr, value, res);
269 LoadW(addr, value, res);
273 compute_return_epc(regs);
274 regs->regs[insn.i_format.rt] = value;
278 if (user && !access_ok(addr, 2))
281 if (IS_ENABLED(CONFIG_EVA) && user)
282 LoadHWUE(addr, value, res);
284 LoadHWU(addr, value, res);
288 compute_return_epc(regs);
289 regs->regs[insn.i_format.rt] = value;
295 * A 32-bit kernel might be running on a 64-bit processor. But
296 * if we're on a 32-bit processor and an i-cache incoherency
297 * or race makes us see a 64-bit instruction here the sdl/sdr
298 * would blow up, so for now we don't handle unaligned 64-bit
299 * instructions on 32-bit kernels.
301 if (user && !access_ok(addr, 4))
304 LoadWU(addr, value, res);
307 compute_return_epc(regs);
308 regs->regs[insn.i_format.rt] = value;
310 #endif /* CONFIG_64BIT */
312 /* Cannot handle 64-bit instructions in 32-bit kernel */
318 * A 32-bit kernel might be running on a 64-bit processor. But
319 * if we're on a 32-bit processor and an i-cache incoherency
320 * or race makes us see a 64-bit instruction here the sdl/sdr
321 * would blow up, so for now we don't handle unaligned 64-bit
322 * instructions on 32-bit kernels.
324 if (user && !access_ok(addr, 8))
327 LoadDW(addr, value, res);
330 compute_return_epc(regs);
331 regs->regs[insn.i_format.rt] = value;
333 #endif /* CONFIG_64BIT */
335 /* Cannot handle 64-bit instructions in 32-bit kernel */
339 if (user && !access_ok(addr, 2))
342 compute_return_epc(regs);
343 value = regs->regs[insn.i_format.rt];
345 if (IS_ENABLED(CONFIG_EVA) && user)
346 StoreHWE(addr, value, res);
348 StoreHW(addr, value, res);
355 if (user && !access_ok(addr, 4))
358 compute_return_epc(regs);
359 value = regs->regs[insn.i_format.rt];
361 if (IS_ENABLED(CONFIG_EVA) && user)
362 StoreWE(addr, value, res);
364 StoreW(addr, value, res);
373 * A 32-bit kernel might be running on a 64-bit processor. But
374 * if we're on a 32-bit processor and an i-cache incoherency
375 * or race makes us see a 64-bit instruction here the sdl/sdr
376 * would blow up, so for now we don't handle unaligned 64-bit
377 * instructions on 32-bit kernels.
379 if (user && !access_ok(addr, 8))
382 compute_return_epc(regs);
383 value = regs->regs[insn.i_format.rt];
384 StoreDW(addr, value, res);
388 #endif /* CONFIG_64BIT */
390 /* Cannot handle 64-bit instructions in 32-bit kernel */
393 #ifdef CONFIG_MIPS_FP_SUPPORT
400 void __user *fault_addr = NULL;
402 die_if_kernel("Unaligned FP access in kernel code", regs);
403 BUG_ON(!used_math());
405 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
407 own_fpu(1); /* Restore FPU state. */
409 /* Signal if something went wrong. */
410 process_fpemu_return(res, fault_addr, 0);
416 #endif /* CONFIG_MIPS_FP_SUPPORT */
418 #ifdef CONFIG_CPU_HAS_MSA
421 unsigned int wd, preempted;
429 * If we've reached this point then userland should have taken
430 * the MSA disabled exception & initialised vector context at
431 * some point in the past.
433 BUG_ON(!thread_msa_context_live());
435 df = insn.msa_mi10_format.df;
436 wd = insn.msa_mi10_format.wd;
437 fpr = ¤t->thread.fpu.fpr[wd];
439 switch (insn.msa_mi10_format.func) {
441 if (!access_ok(addr, sizeof(*fpr)))
446 * If we have live MSA context keep track of
447 * whether we get preempted in order to avoid
448 * the register context we load being clobbered
449 * by the live context as it's saved during
450 * preemption. If we don't have live context
451 * then it can't be saved to clobber the value
454 preempted = test_thread_flag(TIF_USEDMSA);
456 res = __copy_from_user_inatomic(fpr, addr,
462 * Update the hardware register if it is in use
463 * by the task in this quantum, in order to
464 * avoid having to save & restore the whole
468 if (test_thread_flag(TIF_USEDMSA)) {
469 write_msa_wr(wd, fpr, df);
477 if (!access_ok(addr, sizeof(*fpr)))
481 * Update from the hardware register if it is in use by
482 * the task in this quantum, in order to avoid having to
483 * save & restore the whole vector context.
486 if (test_thread_flag(TIF_USEDMSA))
487 read_msa_wr(wd, fpr, df);
490 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
499 compute_return_epc(regs);
502 #endif /* CONFIG_CPU_HAS_MSA */
504 #ifndef CONFIG_CPU_MIPSR6
506 * COP2 is available to implementor for application specific use.
507 * It's up to applications to register a notifier chain and do
508 * whatever they have to do, including possible sending of signals.
510 * This instruction has been reallocated in Release 6
513 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
517 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
521 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
525 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
530 * Pheeee... We encountered an yet unknown instruction or
531 * cache coherence problem. Die sucker, die ...
536 #ifdef CONFIG_DEBUG_FS
537 unaligned_instructions++;
543 /* roll back jump/branch */
544 regs->cp0_epc = origpc;
545 regs->regs[31] = orig31;
546 /* Did we have an exception handler installed? */
547 if (fixup_exception(regs))
550 die_if_kernel("Unhandled kernel unaligned access", regs);
556 die_if_kernel("Unhandled kernel unaligned access", regs);
563 ("Unhandled kernel unaligned access or invalid instruction", regs);
567 /* Recode table from 16-bit register notation to 32-bit GPR. */
568 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
570 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
571 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
573 static void emulate_load_store_microMIPS(struct pt_regs *regs,
579 unsigned int reg = 0, rvar;
580 unsigned long orig31;
584 unsigned long origpc, contpc;
585 union mips_instruction insn;
586 struct mm_decoded_insn mminsn;
587 bool user = user_mode(regs);
589 origpc = regs->cp0_epc;
590 orig31 = regs->regs[31];
592 mminsn.micro_mips_mode = 1;
595 * This load never faults.
597 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
598 __get_user(halfword, pc16);
600 contpc = regs->cp0_epc + 2;
601 word = ((unsigned int)halfword << 16);
604 if (!mm_insn_16bit(halfword)) {
605 __get_user(halfword, pc16);
607 contpc = regs->cp0_epc + 4;
613 if (get_user(halfword, pc16))
615 mminsn.next_pc_inc = 2;
616 word = ((unsigned int)halfword << 16);
618 if (!mm_insn_16bit(halfword)) {
620 if (get_user(halfword, pc16))
622 mminsn.next_pc_inc = 4;
625 mminsn.next_insn = word;
627 insn = (union mips_instruction)(mminsn.insn);
628 if (mm_isBranchInstr(regs, mminsn, &contpc))
629 insn = (union mips_instruction)(mminsn.next_insn);
631 /* Parse instruction to find what to do */
633 switch (insn.mm_i_format.opcode) {
636 switch (insn.mm_x_format.func) {
638 reg = insn.mm_x_format.rd;
645 switch (insn.mm_m_format.func) {
647 reg = insn.mm_m_format.rd;
651 if (user && !access_ok(addr, 8))
654 LoadW(addr, value, res);
657 regs->regs[reg] = value;
659 LoadW(addr, value, res);
662 regs->regs[reg + 1] = value;
666 reg = insn.mm_m_format.rd;
670 if (user && !access_ok(addr, 8))
673 value = regs->regs[reg];
674 StoreW(addr, value, res);
678 value = regs->regs[reg + 1];
679 StoreW(addr, value, res);
686 reg = insn.mm_m_format.rd;
690 if (user && !access_ok(addr, 16))
693 LoadDW(addr, value, res);
696 regs->regs[reg] = value;
698 LoadDW(addr, value, res);
701 regs->regs[reg + 1] = value;
703 #endif /* CONFIG_64BIT */
709 reg = insn.mm_m_format.rd;
713 if (user && !access_ok(addr, 16))
716 value = regs->regs[reg];
717 StoreDW(addr, value, res);
721 value = regs->regs[reg + 1];
722 StoreDW(addr, value, res);
726 #endif /* CONFIG_64BIT */
731 reg = insn.mm_m_format.rd;
733 if ((rvar > 9) || !reg)
736 if (user && !access_ok(addr, 4 * (rvar + 1)))
739 if (user && !access_ok(addr, 4 * rvar))
744 for (i = 16; rvar; rvar--, i++) {
745 LoadW(addr, value, res);
749 regs->regs[i] = value;
751 if ((reg & 0xf) == 9) {
752 LoadW(addr, value, res);
756 regs->regs[30] = value;
759 LoadW(addr, value, res);
762 regs->regs[31] = value;
767 reg = insn.mm_m_format.rd;
769 if ((rvar > 9) || !reg)
772 if (user && !access_ok(addr, 4 * (rvar + 1)))
775 if (user && !access_ok(addr, 4 * rvar))
780 for (i = 16; rvar; rvar--, i++) {
781 value = regs->regs[i];
782 StoreW(addr, value, res);
787 if ((reg & 0xf) == 9) {
788 value = regs->regs[30];
789 StoreW(addr, value, res);
795 value = regs->regs[31];
796 StoreW(addr, value, res);
804 reg = insn.mm_m_format.rd;
806 if ((rvar > 9) || !reg)
809 if (user && !access_ok(addr, 8 * (rvar + 1)))
812 if (user && !access_ok(addr, 8 * rvar))
818 for (i = 16; rvar; rvar--, i++) {
819 LoadDW(addr, value, res);
823 regs->regs[i] = value;
825 if ((reg & 0xf) == 9) {
826 LoadDW(addr, value, res);
830 regs->regs[30] = value;
833 LoadDW(addr, value, res);
836 regs->regs[31] = value;
839 #endif /* CONFIG_64BIT */
845 reg = insn.mm_m_format.rd;
847 if ((rvar > 9) || !reg)
850 if (user && !access_ok(addr, 8 * (rvar + 1)))
853 if (user && !access_ok(addr, 8 * rvar))
859 for (i = 16; rvar; rvar--, i++) {
860 value = regs->regs[i];
861 StoreDW(addr, value, res);
866 if ((reg & 0xf) == 9) {
867 value = regs->regs[30];
868 StoreDW(addr, value, res);
874 value = regs->regs[31];
875 StoreDW(addr, value, res);
880 #endif /* CONFIG_64BIT */
884 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
890 switch (insn.mm_m_format.func) {
892 reg = insn.mm_m_format.rd;
896 /* LL,SC,LLD,SCD are not serviced */
899 #ifdef CONFIG_MIPS_FP_SUPPORT
901 switch (insn.mm_x_format.func) {
915 void __user *fault_addr = NULL;
918 /* roll back jump/branch */
919 regs->cp0_epc = origpc;
920 regs->regs[31] = orig31;
922 die_if_kernel("Unaligned FP access in kernel code", regs);
923 BUG_ON(!used_math());
924 BUG_ON(!is_fpu_owner());
926 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
928 own_fpu(1); /* restore FPU state */
930 /* If something went wrong, signal */
931 process_fpemu_return(res, fault_addr, 0);
937 #endif /* CONFIG_MIPS_FP_SUPPORT */
940 reg = insn.mm_i_format.rt;
944 reg = insn.mm_i_format.rt;
948 reg = insn.mm_i_format.rt;
952 reg = insn.mm_i_format.rt;
956 reg = insn.mm_i_format.rt;
960 reg = insn.mm_i_format.rt;
964 reg = insn.mm_i_format.rt;
968 switch (insn.mm16_m_format.func) {
970 reg = insn.mm16_m_format.rlist;
972 if (user && !access_ok(addr, 4 * rvar))
975 for (i = 16; rvar; rvar--, i++) {
976 LoadW(addr, value, res);
980 regs->regs[i] = value;
982 LoadW(addr, value, res);
985 regs->regs[31] = value;
990 reg = insn.mm16_m_format.rlist;
992 if (user && !access_ok(addr, 4 * rvar))
995 for (i = 16; rvar; rvar--, i++) {
996 value = regs->regs[i];
997 StoreW(addr, value, res);
1002 value = regs->regs[31];
1003 StoreW(addr, value, res);
1014 reg = reg16to32[insn.mm16_rb_format.rt];
1018 reg = reg16to32[insn.mm16_rb_format.rt];
1022 reg = reg16to32st[insn.mm16_rb_format.rt];
1026 reg = reg16to32st[insn.mm16_rb_format.rt];
1030 reg = insn.mm16_r5_format.rt;
1034 reg = insn.mm16_r5_format.rt;
1038 reg = reg16to32[insn.mm16_r3_format.rt];
1046 if (user && !access_ok(addr, 2))
1049 LoadHW(addr, value, res);
1052 regs->regs[reg] = value;
1056 if (user && !access_ok(addr, 2))
1059 LoadHWU(addr, value, res);
1062 regs->regs[reg] = value;
1066 if (user && !access_ok(addr, 4))
1069 LoadW(addr, value, res);
1072 regs->regs[reg] = value;
1078 * A 32-bit kernel might be running on a 64-bit processor. But
1079 * if we're on a 32-bit processor and an i-cache incoherency
1080 * or race makes us see a 64-bit instruction here the sdl/sdr
1081 * would blow up, so for now we don't handle unaligned 64-bit
1082 * instructions on 32-bit kernels.
1084 if (user && !access_ok(addr, 4))
1087 LoadWU(addr, value, res);
1090 regs->regs[reg] = value;
1092 #endif /* CONFIG_64BIT */
1094 /* Cannot handle 64-bit instructions in 32-bit kernel */
1100 * A 32-bit kernel might be running on a 64-bit processor. But
1101 * if we're on a 32-bit processor and an i-cache incoherency
1102 * or race makes us see a 64-bit instruction here the sdl/sdr
1103 * would blow up, so for now we don't handle unaligned 64-bit
1104 * instructions on 32-bit kernels.
1106 if (user && !access_ok(addr, 8))
1109 LoadDW(addr, value, res);
1112 regs->regs[reg] = value;
1114 #endif /* CONFIG_64BIT */
1116 /* Cannot handle 64-bit instructions in 32-bit kernel */
1120 if (user && !access_ok(addr, 2))
1123 value = regs->regs[reg];
1124 StoreHW(addr, value, res);
1130 if (user && !access_ok(addr, 4))
1133 value = regs->regs[reg];
1134 StoreW(addr, value, res);
1142 * A 32-bit kernel might be running on a 64-bit processor. But
1143 * if we're on a 32-bit processor and an i-cache incoherency
1144 * or race makes us see a 64-bit instruction here the sdl/sdr
1145 * would blow up, so for now we don't handle unaligned 64-bit
1146 * instructions on 32-bit kernels.
1148 if (user && !access_ok(addr, 8))
1151 value = regs->regs[reg];
1152 StoreDW(addr, value, res);
1156 #endif /* CONFIG_64BIT */
1158 /* Cannot handle 64-bit instructions in 32-bit kernel */
1162 regs->cp0_epc = contpc; /* advance or branch */
1164 #ifdef CONFIG_DEBUG_FS
1165 unaligned_instructions++;
1170 /* roll back jump/branch */
1171 regs->cp0_epc = origpc;
1172 regs->regs[31] = orig31;
1173 /* Did we have an exception handler installed? */
1174 if (fixup_exception(regs))
1177 die_if_kernel("Unhandled kernel unaligned access", regs);
1183 die_if_kernel("Unhandled kernel unaligned access", regs);
1190 ("Unhandled kernel unaligned access or invalid instruction", regs);
1194 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1196 unsigned long value;
1199 unsigned long orig31;
1201 unsigned long origpc;
1202 union mips16e_instruction mips16inst, oldinst;
1203 unsigned int opcode;
1205 bool user = user_mode(regs);
1207 origpc = regs->cp0_epc;
1208 orig31 = regs->regs[31];
1209 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1211 * This load never faults.
1213 __get_user(mips16inst.full, pc16);
1214 oldinst = mips16inst;
1216 /* skip EXTEND instruction */
1217 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1220 __get_user(mips16inst.full, pc16);
1221 } else if (delay_slot(regs)) {
1222 /* skip jump instructions */
1223 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1224 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1227 if (get_user(mips16inst.full, pc16))
1231 opcode = mips16inst.ri.opcode;
1233 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1234 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1235 case MIPS16e_ldpc_func:
1236 case MIPS16e_ldsp_func:
1237 reg = reg16to32[mips16inst.ri64.ry];
1240 case MIPS16e_sdsp_func:
1241 reg = reg16to32[mips16inst.ri64.ry];
1244 case MIPS16e_sdrasp_func:
1245 reg = 29; /* GPRSP */
1251 case MIPS16e_swsp_op:
1252 reg = reg16to32[mips16inst.ri.rx];
1253 if (extended && cpu_has_mips16e2)
1254 switch (mips16inst.ri.imm >> 5) {
1259 opcode = MIPS16e_sh_op;
1266 case MIPS16e_lwpc_op:
1267 reg = reg16to32[mips16inst.ri.rx];
1270 case MIPS16e_lwsp_op:
1271 reg = reg16to32[mips16inst.ri.rx];
1272 if (extended && cpu_has_mips16e2)
1273 switch (mips16inst.ri.imm >> 5) {
1278 opcode = MIPS16e_lh_op;
1281 opcode = MIPS16e_lhu_op;
1289 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1291 reg = 29; /* GPRSP */
1295 reg = reg16to32[mips16inst.rri.ry];
1302 case MIPS16e_lbu_op:
1307 if (user && !access_ok(addr, 2))
1310 LoadHW(addr, value, res);
1313 MIPS16e_compute_return_epc(regs, &oldinst);
1314 regs->regs[reg] = value;
1317 case MIPS16e_lhu_op:
1318 if (user && !access_ok(addr, 2))
1321 LoadHWU(addr, value, res);
1324 MIPS16e_compute_return_epc(regs, &oldinst);
1325 regs->regs[reg] = value;
1329 case MIPS16e_lwpc_op:
1330 case MIPS16e_lwsp_op:
1331 if (user && !access_ok(addr, 4))
1334 LoadW(addr, value, res);
1337 MIPS16e_compute_return_epc(regs, &oldinst);
1338 regs->regs[reg] = value;
1341 case MIPS16e_lwu_op:
1344 * A 32-bit kernel might be running on a 64-bit processor. But
1345 * if we're on a 32-bit processor and an i-cache incoherency
1346 * or race makes us see a 64-bit instruction here the sdl/sdr
1347 * would blow up, so for now we don't handle unaligned 64-bit
1348 * instructions on 32-bit kernels.
1350 if (user && !access_ok(addr, 4))
1353 LoadWU(addr, value, res);
1356 MIPS16e_compute_return_epc(regs, &oldinst);
1357 regs->regs[reg] = value;
1359 #endif /* CONFIG_64BIT */
1361 /* Cannot handle 64-bit instructions in 32-bit kernel */
1368 * A 32-bit kernel might be running on a 64-bit processor. But
1369 * if we're on a 32-bit processor and an i-cache incoherency
1370 * or race makes us see a 64-bit instruction here the sdl/sdr
1371 * would blow up, so for now we don't handle unaligned 64-bit
1372 * instructions on 32-bit kernels.
1374 if (user && !access_ok(addr, 8))
1377 LoadDW(addr, value, res);
1380 MIPS16e_compute_return_epc(regs, &oldinst);
1381 regs->regs[reg] = value;
1383 #endif /* CONFIG_64BIT */
1385 /* Cannot handle 64-bit instructions in 32-bit kernel */
1389 if (user && !access_ok(addr, 2))
1392 MIPS16e_compute_return_epc(regs, &oldinst);
1393 value = regs->regs[reg];
1394 StoreHW(addr, value, res);
1400 case MIPS16e_swsp_op:
1401 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1402 if (user && !access_ok(addr, 4))
1405 MIPS16e_compute_return_epc(regs, &oldinst);
1406 value = regs->regs[reg];
1407 StoreW(addr, value, res);
1416 * A 32-bit kernel might be running on a 64-bit processor. But
1417 * if we're on a 32-bit processor and an i-cache incoherency
1418 * or race makes us see a 64-bit instruction here the sdl/sdr
1419 * would blow up, so for now we don't handle unaligned 64-bit
1420 * instructions on 32-bit kernels.
1422 if (user && !access_ok(addr, 8))
1425 MIPS16e_compute_return_epc(regs, &oldinst);
1426 value = regs->regs[reg];
1427 StoreDW(addr, value, res);
1431 #endif /* CONFIG_64BIT */
1433 /* Cannot handle 64-bit instructions in 32-bit kernel */
1438 * Pheeee... We encountered an yet unknown instruction or
1439 * cache coherence problem. Die sucker, die ...
1444 #ifdef CONFIG_DEBUG_FS
1445 unaligned_instructions++;
1451 /* roll back jump/branch */
1452 regs->cp0_epc = origpc;
1453 regs->regs[31] = orig31;
1454 /* Did we have an exception handler installed? */
1455 if (fixup_exception(regs))
1458 die_if_kernel("Unhandled kernel unaligned access", regs);
1464 die_if_kernel("Unhandled kernel unaligned access", regs);
1471 ("Unhandled kernel unaligned access or invalid instruction", regs);
1475 asmlinkage void do_ade(struct pt_regs *regs)
1477 enum ctx_state prev_state;
1480 prev_state = exception_enter();
1481 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1482 1, regs, regs->cp0_badvaddr);
1484 * Did we catch a fault trying to load an instruction?
1486 if (regs->cp0_badvaddr == regs->cp0_epc)
1489 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1491 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1495 * Do branch emulation only if we didn't forward the exception.
1496 * This is all so but ugly ...
1500 * Are we running in microMIPS mode?
1502 if (get_isa16_mode(regs->cp0_epc)) {
1504 * Did we catch a fault trying to load an instruction in
1507 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1509 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1510 show_registers(regs);
1512 if (cpu_has_mmips) {
1513 emulate_load_store_microMIPS(regs,
1514 (void __user *)regs->cp0_badvaddr);
1518 if (cpu_has_mips16) {
1519 emulate_load_store_MIPS16e(regs,
1520 (void __user *)regs->cp0_badvaddr);
1527 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1528 show_registers(regs);
1529 pc = (unsigned int *)exception_epc(regs);
1531 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1536 die_if_kernel("Kernel unaligned instruction access", regs);
1540 * XXX On return from the signal handler we should advance the epc
1542 exception_exit(prev_state);
1545 #ifdef CONFIG_DEBUG_FS
1546 static int __init debugfs_unaligned(void)
1548 debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1549 &unaligned_instructions);
1550 debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1551 mips_debugfs_dir, &unaligned_action);
1554 arch_initcall(debugfs_unaligned);