2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/kexec.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/extable.h>
26 #include <linux/sched.h>
27 #include <linux/smp.h>
28 #include <linux/spinlock.h>
29 #include <linux/kallsyms.h>
30 #include <linux/bootmem.h>
31 #include <linux/interrupt.h>
32 #include <linux/ptrace.h>
33 #include <linux/kgdb.h>
34 #include <linux/kdebug.h>
35 #include <linux/kprobes.h>
36 #include <linux/notifier.h>
37 #include <linux/kdb.h>
38 #include <linux/irq.h>
39 #include <linux/perf_event.h>
41 #include <asm/addrspace.h>
42 #include <asm/bootinfo.h>
43 #include <asm/branch.h>
44 #include <asm/break.h>
47 #include <asm/cpu-type.h>
50 #include <asm/fpu_emulator.h>
52 #include <asm/mips-cm.h>
53 #include <asm/mips-r2-to-r6-emul.h>
54 #include <asm/mips-cm.h>
55 #include <asm/mipsregs.h>
56 #include <asm/mipsmtregs.h>
57 #include <asm/module.h>
59 #include <asm/pgtable.h>
60 #include <asm/ptrace.h>
61 #include <asm/sections.h>
62 #include <asm/siginfo.h>
63 #include <asm/tlbdebug.h>
64 #include <asm/traps.h>
65 #include <asm/uaccess.h>
66 #include <asm/watch.h>
67 #include <asm/mmu_context.h>
68 #include <asm/types.h>
69 #include <asm/stacktrace.h>
72 extern void check_wait(void);
73 extern asmlinkage void rollback_handle_int(void);
74 extern asmlinkage void handle_int(void);
75 extern u32 handle_tlbl[];
76 extern u32 handle_tlbs[];
77 extern u32 handle_tlbm[];
78 extern asmlinkage void handle_adel(void);
79 extern asmlinkage void handle_ades(void);
80 extern asmlinkage void handle_ibe(void);
81 extern asmlinkage void handle_dbe(void);
82 extern asmlinkage void handle_sys(void);
83 extern asmlinkage void handle_bp(void);
84 extern asmlinkage void handle_ri(void);
85 extern asmlinkage void handle_ri_rdhwr_tlbp(void);
86 extern asmlinkage void handle_ri_rdhwr(void);
87 extern asmlinkage void handle_cpu(void);
88 extern asmlinkage void handle_ov(void);
89 extern asmlinkage void handle_tr(void);
90 extern asmlinkage void handle_msa_fpe(void);
91 extern asmlinkage void handle_fpe(void);
92 extern asmlinkage void handle_ftlb(void);
93 extern asmlinkage void handle_msa(void);
94 extern asmlinkage void handle_mdmx(void);
95 extern asmlinkage void handle_watch(void);
96 extern asmlinkage void handle_mt(void);
97 extern asmlinkage void handle_dsp(void);
98 extern asmlinkage void handle_mcheck(void);
99 extern asmlinkage void handle_reserved(void);
100 extern void tlb_do_page_fault_0(void);
102 void (*board_be_init)(void);
103 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
104 void (*board_nmi_handler_setup)(void);
105 void (*board_ejtag_handler_setup)(void);
106 void (*board_bind_eic_interrupt)(int irq, int regset);
107 void (*board_ebase_setup)(void);
108 void(*board_cache_error_setup)(void);
110 static void show_raw_backtrace(unsigned long reg29)
112 unsigned long *sp = (unsigned long *)(reg29 & ~3);
115 printk("Call Trace:");
116 #ifdef CONFIG_KALLSYMS
119 while (!kstack_end(sp)) {
120 unsigned long __user *p =
121 (unsigned long __user *)(unsigned long)sp++;
122 if (__get_user(addr, p)) {
123 printk(" (Bad stack address)");
126 if (__kernel_text_address(addr))
132 #ifdef CONFIG_KALLSYMS
134 static int __init set_raw_show_trace(char *str)
139 __setup("raw_show_trace", set_raw_show_trace);
142 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
144 unsigned long sp = regs->regs[29];
145 unsigned long ra = regs->regs[31];
146 unsigned long pc = regs->cp0_epc;
151 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
152 show_raw_backtrace(sp);
155 printk("Call Trace:\n");
158 pc = unwind_stack(task, &sp, pc, &ra);
164 * This routine abuses get_user()/put_user() to reference pointers
165 * with at least a bit of error checking ...
167 static void show_stacktrace(struct task_struct *task,
168 const struct pt_regs *regs)
170 const int field = 2 * sizeof(unsigned long);
173 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
177 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
178 if (i && ((i % (64 / field)) == 0)) {
187 if (__get_user(stackdata, sp++)) {
188 pr_cont(" (Bad stack address)");
192 pr_cont(" %0*lx", field, stackdata);
196 show_backtrace(task, regs);
199 void show_stack(struct task_struct *task, unsigned long *sp)
202 mm_segment_t old_fs = get_fs();
204 regs.cp0_status = KSU_KERNEL;
206 regs.regs[29] = (unsigned long)sp;
210 if (task && task != current) {
211 regs.regs[29] = task->thread.reg29;
213 regs.cp0_epc = task->thread.reg31;
214 #ifdef CONFIG_KGDB_KDB
215 } else if (atomic_read(&kgdb_active) != -1 &&
217 memcpy(®s, kdb_current_regs, sizeof(regs));
218 #endif /* CONFIG_KGDB_KDB */
220 prepare_frametrace(®s);
224 * show_stack() deals exclusively with kernel mode, so be sure to access
225 * the stack in the kernel (not user) address space.
228 show_stacktrace(task, ®s);
232 static void show_code(unsigned int __user *pc)
235 unsigned short __user *pc16 = NULL;
239 if ((unsigned long)pc & 1)
240 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
241 for(i = -3 ; i < 6 ; i++) {
243 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
244 pr_cont(" (Bad address in epc)\n");
247 pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
252 static void __show_regs(const struct pt_regs *regs)
254 const int field = 2 * sizeof(unsigned long);
255 unsigned int cause = regs->cp0_cause;
256 unsigned int exccode;
259 show_regs_print_info(KERN_DEFAULT);
262 * Saved main processor registers
264 for (i = 0; i < 32; ) {
268 pr_cont(" %0*lx", field, 0UL);
269 else if (i == 26 || i == 27)
270 pr_cont(" %*s", field, "");
272 pr_cont(" %0*lx", field, regs->regs[i]);
279 #ifdef CONFIG_CPU_HAS_SMARTMIPS
280 printk("Acx : %0*lx\n", field, regs->acx);
282 printk("Hi : %0*lx\n", field, regs->hi);
283 printk("Lo : %0*lx\n", field, regs->lo);
286 * Saved cp0 registers
288 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
289 (void *) regs->cp0_epc);
290 printk("ra : %0*lx %pS\n", field, regs->regs[31],
291 (void *) regs->regs[31]);
293 printk("Status: %08x ", (uint32_t) regs->cp0_status);
296 if (regs->cp0_status & ST0_KUO)
298 if (regs->cp0_status & ST0_IEO)
300 if (regs->cp0_status & ST0_KUP)
302 if (regs->cp0_status & ST0_IEP)
304 if (regs->cp0_status & ST0_KUC)
306 if (regs->cp0_status & ST0_IEC)
308 } else if (cpu_has_4kex) {
309 if (regs->cp0_status & ST0_KX)
311 if (regs->cp0_status & ST0_SX)
313 if (regs->cp0_status & ST0_UX)
315 switch (regs->cp0_status & ST0_KSU) {
320 pr_cont("SUPERVISOR ");
326 pr_cont("BAD_MODE ");
329 if (regs->cp0_status & ST0_ERL)
331 if (regs->cp0_status & ST0_EXL)
333 if (regs->cp0_status & ST0_IE)
338 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
339 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
341 if (1 <= exccode && exccode <= 5)
342 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
344 printk("PrId : %08x (%s)\n", read_c0_prid(),
349 * FIXME: really the generic show_regs should take a const pointer argument.
351 void show_regs(struct pt_regs *regs)
353 __show_regs((struct pt_regs *)regs);
357 void show_registers(struct pt_regs *regs)
359 const int field = 2 * sizeof(unsigned long);
360 mm_segment_t old_fs = get_fs();
364 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
365 current->comm, current->pid, current_thread_info(), current,
366 field, current_thread_info()->tp_value);
367 if (cpu_has_userlocal) {
370 tls = read_c0_userlocal();
371 if (tls != current_thread_info()->tp_value)
372 printk("*HwTLS: %0*lx\n", field, tls);
375 if (!user_mode(regs))
376 /* Necessary for getting the correct stack content */
378 show_stacktrace(current, regs);
379 show_code((unsigned int __user *) regs->cp0_epc);
384 static DEFINE_RAW_SPINLOCK(die_lock);
386 void __noreturn die(const char *str, struct pt_regs *regs)
388 static int die_counter;
393 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
394 SIGSEGV) == NOTIFY_STOP)
398 raw_spin_lock_irq(&die_lock);
401 printk("%s[#%d]:\n", str, ++die_counter);
402 show_registers(regs);
403 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
404 raw_spin_unlock_irq(&die_lock);
409 panic("Fatal exception in interrupt");
412 panic("Fatal exception");
414 if (regs && kexec_should_crash(current))
420 extern struct exception_table_entry __start___dbe_table[];
421 extern struct exception_table_entry __stop___dbe_table[];
424 " .section __dbe_table, \"a\"\n"
427 /* Given an address, look for it in the exception tables. */
428 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
430 const struct exception_table_entry *e;
432 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
434 e = search_module_dbetables(addr);
438 asmlinkage void do_be(struct pt_regs *regs)
440 const int field = 2 * sizeof(unsigned long);
441 const struct exception_table_entry *fixup = NULL;
442 int data = regs->cp0_cause & 4;
443 int action = MIPS_BE_FATAL;
444 enum ctx_state prev_state;
446 prev_state = exception_enter();
447 /* XXX For now. Fixme, this searches the wrong table ... */
448 if (data && !user_mode(regs))
449 fixup = search_dbe_tables(exception_epc(regs));
452 action = MIPS_BE_FIXUP;
454 if (board_be_handler)
455 action = board_be_handler(regs, fixup != NULL);
457 mips_cm_error_report();
460 case MIPS_BE_DISCARD:
464 regs->cp0_epc = fixup->nextinsn;
473 * Assume it would be too dangerous to continue ...
475 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
476 data ? "Data" : "Instruction",
477 field, regs->cp0_epc, field, regs->regs[31]);
478 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
479 SIGBUS) == NOTIFY_STOP)
482 die_if_kernel("Oops", regs);
483 force_sig(SIGBUS, current);
486 exception_exit(prev_state);
490 * ll/sc, rdhwr, sync emulation
493 #define OPCODE 0xfc000000
494 #define BASE 0x03e00000
495 #define RT 0x001f0000
496 #define OFFSET 0x0000ffff
497 #define LL 0xc0000000
498 #define SC 0xe0000000
499 #define SPEC0 0x00000000
500 #define SPEC3 0x7c000000
501 #define RD 0x0000f800
502 #define FUNC 0x0000003f
503 #define SYNC 0x0000000f
504 #define RDHWR 0x0000003b
506 /* microMIPS definitions */
507 #define MM_POOL32A_FUNC 0xfc00ffff
508 #define MM_RDHWR 0x00006b3c
509 #define MM_RS 0x001f0000
510 #define MM_RT 0x03e00000
513 * The ll_bit is cleared by r*_switch.S
517 struct task_struct *ll_task;
519 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
521 unsigned long value, __user *vaddr;
525 * analyse the ll instruction that just caused a ri exception
526 * and put the referenced address to addr.
529 /* sign extend offset */
530 offset = opcode & OFFSET;
534 vaddr = (unsigned long __user *)
535 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
537 if ((unsigned long)vaddr & 3)
539 if (get_user(value, vaddr))
544 if (ll_task == NULL || ll_task == current) {
553 regs->regs[(opcode & RT) >> 16] = value;
558 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
560 unsigned long __user *vaddr;
565 * analyse the sc instruction that just caused a ri exception
566 * and put the referenced address to addr.
569 /* sign extend offset */
570 offset = opcode & OFFSET;
574 vaddr = (unsigned long __user *)
575 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
576 reg = (opcode & RT) >> 16;
578 if ((unsigned long)vaddr & 3)
583 if (ll_bit == 0 || ll_task != current) {
591 if (put_user(regs->regs[reg], vaddr))
600 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
601 * opcodes are supposed to result in coprocessor unusable exceptions if
602 * executed on ll/sc-less processors. That's the theory. In practice a
603 * few processors such as NEC's VR4100 throw reserved instruction exceptions
604 * instead, so we're doing the emulation thing in both exception handlers.
606 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
608 if ((opcode & OPCODE) == LL) {
609 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
611 return simulate_ll(regs, opcode);
613 if ((opcode & OPCODE) == SC) {
614 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
616 return simulate_sc(regs, opcode);
619 return -1; /* Must be something else ... */
623 * Simulate trapping 'rdhwr' instructions to provide user accessible
624 * registers not implemented in hardware.
626 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
628 struct thread_info *ti = task_thread_info(current);
630 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
633 case MIPS_HWR_CPUNUM: /* CPU number */
634 regs->regs[rt] = smp_processor_id();
636 case MIPS_HWR_SYNCISTEP: /* SYNCI length */
637 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
638 current_cpu_data.icache.linesz);
640 case MIPS_HWR_CC: /* Read count register */
641 regs->regs[rt] = read_c0_count();
643 case MIPS_HWR_CCRES: /* Count register resolution */
644 switch (current_cpu_type()) {
653 case MIPS_HWR_ULR: /* Read UserLocal register */
654 regs->regs[rt] = ti->tp_value;
661 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
663 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
664 int rd = (opcode & RD) >> 11;
665 int rt = (opcode & RT) >> 16;
667 simulate_rdhwr(regs, rd, rt);
675 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
677 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
678 int rd = (opcode & MM_RS) >> 16;
679 int rt = (opcode & MM_RT) >> 21;
680 simulate_rdhwr(regs, rd, rt);
688 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
690 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
691 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
696 return -1; /* Must be something else ... */
699 asmlinkage void do_ov(struct pt_regs *regs)
701 enum ctx_state prev_state;
704 .si_code = FPE_INTOVF,
705 .si_addr = (void __user *)regs->cp0_epc,
708 prev_state = exception_enter();
709 die_if_kernel("Integer overflow", regs);
711 force_sig_info(SIGFPE, &info, current);
712 exception_exit(prev_state);
716 * Send SIGFPE according to FCSR Cause bits, which must have already
717 * been masked against Enable bits. This is impotant as Inexact can
718 * happen together with Overflow or Underflow, and `ptrace' can set
721 void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
722 struct task_struct *tsk)
724 struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
726 if (fcr31 & FPU_CSR_INV_X)
727 si.si_code = FPE_FLTINV;
728 else if (fcr31 & FPU_CSR_DIV_X)
729 si.si_code = FPE_FLTDIV;
730 else if (fcr31 & FPU_CSR_OVF_X)
731 si.si_code = FPE_FLTOVF;
732 else if (fcr31 & FPU_CSR_UDF_X)
733 si.si_code = FPE_FLTUND;
734 else if (fcr31 & FPU_CSR_INE_X)
735 si.si_code = FPE_FLTRES;
737 si.si_code = __SI_FAULT;
738 force_sig_info(SIGFPE, &si, tsk);
741 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
743 struct siginfo si = { 0 };
744 struct vm_area_struct *vma;
751 force_fcr31_sig(fcr31, fault_addr, current);
755 si.si_addr = fault_addr;
757 si.si_code = BUS_ADRERR;
758 force_sig_info(sig, &si, current);
762 si.si_addr = fault_addr;
764 down_read(¤t->mm->mmap_sem);
765 vma = find_vma(current->mm, (unsigned long)fault_addr);
766 if (vma && (vma->vm_start <= (unsigned long)fault_addr))
767 si.si_code = SEGV_ACCERR;
769 si.si_code = SEGV_MAPERR;
770 up_read(¤t->mm->mmap_sem);
771 force_sig_info(sig, &si, current);
775 force_sig(sig, current);
780 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
781 unsigned long old_epc, unsigned long old_ra)
783 union mips_instruction inst = { .word = opcode };
784 void __user *fault_addr;
788 /* If it's obviously not an FP instruction, skip it */
789 switch (inst.i_format.opcode) {
803 * do_ri skipped over the instruction via compute_return_epc, undo
804 * that for the FPU emulator.
806 regs->cp0_epc = old_epc;
807 regs->regs[31] = old_ra;
809 /* Save the FP context to struct thread_struct */
812 /* Run the emulator */
813 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
817 * We can't allow the emulated instruction to leave any
818 * enabled Cause bits set in $fcr31.
820 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
821 current->thread.fpu.fcr31 &= ~fcr31;
823 /* Restore the hardware register state */
826 /* Send a signal if required. */
827 process_fpemu_return(sig, fault_addr, fcr31);
833 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
835 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
837 enum ctx_state prev_state;
838 void __user *fault_addr;
841 prev_state = exception_enter();
842 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
843 SIGFPE) == NOTIFY_STOP)
846 /* Clear FCSR.Cause before enabling interrupts */
847 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
850 die_if_kernel("FP exception in kernel code", regs);
852 if (fcr31 & FPU_CSR_UNI_X) {
854 * Unimplemented operation exception. If we've got the full
855 * software emulator on-board, let's use it...
857 * Force FPU to dump state into task/thread context. We're
858 * moving a lot of data here for what is probably a single
859 * instruction, but the alternative is to pre-decode the FP
860 * register operands before invoking the emulator, which seems
861 * a bit extreme for what should be an infrequent event.
863 /* Ensure 'resume' not overwrite saved fp context again. */
866 /* Run the emulator */
867 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
871 * We can't allow the emulated instruction to leave any
872 * enabled Cause bits set in $fcr31.
874 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
875 current->thread.fpu.fcr31 &= ~fcr31;
877 /* Restore the hardware register state */
878 own_fpu(1); /* Using the FPU again. */
881 fault_addr = (void __user *) regs->cp0_epc;
884 /* Send a signal if required. */
885 process_fpemu_return(sig, fault_addr, fcr31);
888 exception_exit(prev_state);
891 void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
894 siginfo_t info = { 0 };
897 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
898 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
899 SIGTRAP) == NOTIFY_STOP)
901 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
903 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
904 SIGTRAP) == NOTIFY_STOP)
908 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
909 * insns, even for trap and break codes that indicate arithmetic
910 * failures. Weird ...
911 * But should we continue the brokenness??? --macro
916 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
917 die_if_kernel(b, regs);
918 if (code == BRK_DIVZERO)
919 info.si_code = FPE_INTDIV;
921 info.si_code = FPE_INTOVF;
922 info.si_signo = SIGFPE;
923 info.si_addr = (void __user *) regs->cp0_epc;
924 force_sig_info(SIGFPE, &info, current);
927 die_if_kernel("Kernel bug detected", regs);
928 force_sig(SIGTRAP, current);
932 * This breakpoint code is used by the FPU emulator to retake
933 * control of the CPU after executing the instruction from the
934 * delay slot of an emulated branch.
936 * Terminate if exception was recognized as a delay slot return
937 * otherwise handle as normal.
939 if (do_dsemulret(regs))
942 die_if_kernel("Math emu break/trap", regs);
943 force_sig(SIGTRAP, current);
946 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
947 die_if_kernel(b, regs);
949 info.si_signo = SIGTRAP;
950 info.si_code = si_code;
951 force_sig_info(SIGTRAP, &info, current);
953 force_sig(SIGTRAP, current);
958 asmlinkage void do_bp(struct pt_regs *regs)
960 unsigned long epc = msk_isa16_mode(exception_epc(regs));
961 unsigned int opcode, bcode;
962 enum ctx_state prev_state;
966 if (!user_mode(regs))
969 prev_state = exception_enter();
970 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
971 if (get_isa16_mode(regs->cp0_epc)) {
974 if (__get_user(instr[0], (u16 __user *)epc))
977 if (!cpu_has_mmips) {
979 bcode = (instr[0] >> 5) & 0x3f;
980 } else if (mm_insn_16bit(instr[0])) {
981 /* 16-bit microMIPS BREAK */
982 bcode = instr[0] & 0xf;
984 /* 32-bit microMIPS BREAK */
985 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
987 opcode = (instr[0] << 16) | instr[1];
988 bcode = (opcode >> 6) & ((1 << 20) - 1);
991 if (__get_user(opcode, (unsigned int __user *)epc))
993 bcode = (opcode >> 6) & ((1 << 20) - 1);
997 * There is the ancient bug in the MIPS assemblers that the break
998 * code starts left to bit 16 instead to bit 6 in the opcode.
999 * Gas is bug-compatible, but not always, grrr...
1000 * We handle both cases with a simple heuristics. --macro
1002 if (bcode >= (1 << 10))
1003 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1006 * notify the kprobe handlers, if instruction is likely to
1011 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1012 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1016 case BRK_UPROBE_XOL:
1017 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1018 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1023 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1024 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1028 case BRK_KPROBE_SSTEPBP:
1029 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1030 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1038 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1042 exception_exit(prev_state);
1046 force_sig(SIGSEGV, current);
1050 asmlinkage void do_tr(struct pt_regs *regs)
1052 u32 opcode, tcode = 0;
1053 enum ctx_state prev_state;
1056 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1059 if (!user_mode(regs))
1062 prev_state = exception_enter();
1063 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1064 if (get_isa16_mode(regs->cp0_epc)) {
1065 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1066 __get_user(instr[1], (u16 __user *)(epc + 2)))
1068 opcode = (instr[0] << 16) | instr[1];
1069 /* Immediate versions don't provide a code. */
1070 if (!(opcode & OPCODE))
1071 tcode = (opcode >> 12) & ((1 << 4) - 1);
1073 if (__get_user(opcode, (u32 __user *)epc))
1075 /* Immediate versions don't provide a code. */
1076 if (!(opcode & OPCODE))
1077 tcode = (opcode >> 6) & ((1 << 10) - 1);
1080 do_trap_or_bp(regs, tcode, 0, "Trap");
1084 exception_exit(prev_state);
1088 force_sig(SIGSEGV, current);
1092 asmlinkage void do_ri(struct pt_regs *regs)
1094 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1095 unsigned long old_epc = regs->cp0_epc;
1096 unsigned long old31 = regs->regs[31];
1097 enum ctx_state prev_state;
1098 unsigned int opcode = 0;
1102 * Avoid any kernel code. Just emulate the R2 instruction
1103 * as quickly as possible.
1105 if (mipsr2_emulation && cpu_has_mips_r6 &&
1106 likely(user_mode(regs)) &&
1107 likely(get_user(opcode, epc) >= 0)) {
1108 unsigned long fcr31 = 0;
1110 status = mipsr2_decoder(regs, opcode, &fcr31);
1114 task_thread_info(current)->r2_emul_return = 1;
1119 process_fpemu_return(status,
1120 ¤t->thread.cp0_baduaddr,
1122 task_thread_info(current)->r2_emul_return = 1;
1129 prev_state = exception_enter();
1130 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1132 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1133 SIGILL) == NOTIFY_STOP)
1136 die_if_kernel("Reserved instruction in kernel code", regs);
1138 if (unlikely(compute_return_epc(regs) < 0))
1141 if (!get_isa16_mode(regs->cp0_epc)) {
1142 if (unlikely(get_user(opcode, epc) < 0))
1145 if (!cpu_has_llsc && status < 0)
1146 status = simulate_llsc(regs, opcode);
1149 status = simulate_rdhwr_normal(regs, opcode);
1152 status = simulate_sync(regs, opcode);
1155 status = simulate_fp(regs, opcode, old_epc, old31);
1156 } else if (cpu_has_mmips) {
1157 unsigned short mmop[2] = { 0 };
1159 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1161 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1164 opcode = (opcode << 16) | mmop[1];
1167 status = simulate_rdhwr_mm(regs, opcode);
1173 if (unlikely(status > 0)) {
1174 regs->cp0_epc = old_epc; /* Undo skip-over. */
1175 regs->regs[31] = old31;
1176 force_sig(status, current);
1180 exception_exit(prev_state);
1184 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1185 * emulated more than some threshold number of instructions, force migration to
1186 * a "CPU" that has FP support.
1188 static void mt_ase_fp_affinity(void)
1190 #ifdef CONFIG_MIPS_MT_FPAFF
1191 if (mt_fpemul_threshold > 0 &&
1192 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1194 * If there's no FPU present, or if the application has already
1195 * restricted the allowed set to exclude any CPUs with FPUs,
1196 * we'll skip the procedure.
1198 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
1201 current->thread.user_cpus_allowed
1202 = current->cpus_allowed;
1203 cpumask_and(&tmask, ¤t->cpus_allowed,
1205 set_cpus_allowed_ptr(current, &tmask);
1206 set_thread_flag(TIF_FPUBOUND);
1209 #endif /* CONFIG_MIPS_MT_FPAFF */
1213 * No lock; only written during early bootup by CPU 0.
1215 static RAW_NOTIFIER_HEAD(cu2_chain);
1217 int __ref register_cu2_notifier(struct notifier_block *nb)
1219 return raw_notifier_chain_register(&cu2_chain, nb);
1222 int cu2_notifier_call_chain(unsigned long val, void *v)
1224 return raw_notifier_call_chain(&cu2_chain, val, v);
1227 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1230 struct pt_regs *regs = data;
1232 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1233 "instruction", regs);
1234 force_sig(SIGILL, current);
1239 static int wait_on_fp_mode_switch(atomic_t *p)
1242 * The FP mode for this task is currently being switched. That may
1243 * involve modifications to the format of this tasks FP context which
1244 * make it unsafe to proceed with execution for the moment. Instead,
1245 * schedule some other task.
1251 static int enable_restore_fp_context(int msa)
1253 int err, was_fpu_owner, prior_msa;
1256 * If an FP mode switch is currently underway, wait for it to
1257 * complete before proceeding.
1259 wait_on_atomic_t(¤t->mm->context.fp_mode_switching,
1260 wait_on_fp_mode_switch, TASK_KILLABLE);
1263 /* First time FP context user. */
1269 set_thread_flag(TIF_USEDMSA);
1270 set_thread_flag(TIF_MSA_CTX_LIVE);
1279 * This task has formerly used the FP context.
1281 * If this thread has no live MSA vector context then we can simply
1282 * restore the scalar FP context. If it has live MSA vector context
1283 * (that is, it has or may have used MSA since last performing a
1284 * function call) then we'll need to restore the vector context. This
1285 * applies even if we're currently only executing a scalar FP
1286 * instruction. This is because if we were to later execute an MSA
1287 * instruction then we'd either have to:
1289 * - Restore the vector context & clobber any registers modified by
1290 * scalar FP instructions between now & then.
1294 * - Not restore the vector context & lose the most significant bits
1295 * of all vector registers.
1297 * Neither of those options is acceptable. We cannot restore the least
1298 * significant bits of the registers now & only restore the most
1299 * significant bits later because the most significant bits of any
1300 * vector registers whose aliased FP register is modified now will have
1301 * been zeroed. We'd have no way to know that when restoring the vector
1302 * context & thus may load an outdated value for the most significant
1303 * bits of a vector register.
1305 if (!msa && !thread_msa_context_live())
1309 * This task is using or has previously used MSA. Thus we require
1310 * that Status.FR == 1.
1313 was_fpu_owner = is_fpu_owner();
1314 err = own_fpu_inatomic(0);
1319 write_msa_csr(current->thread.fpu.msacsr);
1320 set_thread_flag(TIF_USEDMSA);
1323 * If this is the first time that the task is using MSA and it has
1324 * previously used scalar FP in this time slice then we already nave
1325 * FP context which we shouldn't clobber. We do however need to clear
1326 * the upper 64b of each vector register so that this task has no
1327 * opportunity to see data left behind by another.
1329 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1330 if (!prior_msa && was_fpu_owner) {
1338 * Restore the least significant 64b of each vector register
1339 * from the existing scalar FP context.
1341 _restore_fp(current);
1344 * The task has not formerly used MSA, so clear the upper 64b
1345 * of each vector register such that it cannot see data left
1346 * behind by another task.
1350 /* We need to restore the vector context. */
1351 restore_msa(current);
1353 /* Restore the scalar FP control & status register */
1355 write_32bit_cp1_register(CP1_STATUS,
1356 current->thread.fpu.fcr31);
1365 asmlinkage void do_cpu(struct pt_regs *regs)
1367 enum ctx_state prev_state;
1368 unsigned int __user *epc;
1369 unsigned long old_epc, old31;
1370 void __user *fault_addr;
1371 unsigned int opcode;
1372 unsigned long fcr31;
1377 prev_state = exception_enter();
1378 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1381 die_if_kernel("do_cpu invoked from kernel context!", regs);
1385 epc = (unsigned int __user *)exception_epc(regs);
1386 old_epc = regs->cp0_epc;
1387 old31 = regs->regs[31];
1391 if (unlikely(compute_return_epc(regs) < 0))
1394 if (!get_isa16_mode(regs->cp0_epc)) {
1395 if (unlikely(get_user(opcode, epc) < 0))
1398 if (!cpu_has_llsc && status < 0)
1399 status = simulate_llsc(regs, opcode);
1405 if (unlikely(status > 0)) {
1406 regs->cp0_epc = old_epc; /* Undo skip-over. */
1407 regs->regs[31] = old31;
1408 force_sig(status, current);
1415 * The COP3 opcode space and consequently the CP0.Status.CU3
1416 * bit and the CP0.Cause.CE=3 encoding have been removed as
1417 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
1418 * up the space has been reused for COP1X instructions, that
1419 * are enabled by the CP0.Status.CU1 bit and consequently
1420 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1421 * exceptions. Some FPU-less processors that implement one
1422 * of these ISAs however use this code erroneously for COP1X
1423 * instructions. Therefore we redirect this trap to the FP
1426 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1427 force_sig(SIGILL, current);
1433 err = enable_restore_fp_context(0);
1435 if (raw_cpu_has_fpu && !err)
1438 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1442 * We can't allow the emulated instruction to leave
1443 * any enabled Cause bits set in $fcr31.
1445 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1446 current->thread.fpu.fcr31 &= ~fcr31;
1448 /* Send a signal if required. */
1449 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1450 mt_ase_fp_affinity();
1455 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1459 exception_exit(prev_state);
1462 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1464 enum ctx_state prev_state;
1466 prev_state = exception_enter();
1467 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1468 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1469 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1472 /* Clear MSACSR.Cause before enabling interrupts */
1473 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1476 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1477 force_sig(SIGFPE, current);
1479 exception_exit(prev_state);
1482 asmlinkage void do_msa(struct pt_regs *regs)
1484 enum ctx_state prev_state;
1487 prev_state = exception_enter();
1489 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1490 force_sig(SIGILL, current);
1494 die_if_kernel("do_msa invoked from kernel context!", regs);
1496 err = enable_restore_fp_context(1);
1498 force_sig(SIGILL, current);
1500 exception_exit(prev_state);
1503 asmlinkage void do_mdmx(struct pt_regs *regs)
1505 enum ctx_state prev_state;
1507 prev_state = exception_enter();
1508 force_sig(SIGILL, current);
1509 exception_exit(prev_state);
1513 * Called with interrupts disabled.
1515 asmlinkage void do_watch(struct pt_regs *regs)
1517 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1518 enum ctx_state prev_state;
1520 prev_state = exception_enter();
1522 * Clear WP (bit 22) bit of cause register so we don't loop
1525 clear_c0_cause(CAUSEF_WP);
1528 * If the current thread has the watch registers loaded, save
1529 * their values and send SIGTRAP. Otherwise another thread
1530 * left the registers set, clear them and continue.
1532 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1533 mips_read_watch_registers();
1535 force_sig_info(SIGTRAP, &info, current);
1537 mips_clear_watch_registers();
1540 exception_exit(prev_state);
1543 asmlinkage void do_mcheck(struct pt_regs *regs)
1545 int multi_match = regs->cp0_status & ST0_TS;
1546 enum ctx_state prev_state;
1547 mm_segment_t old_fs = get_fs();
1549 prev_state = exception_enter();
1558 if (!user_mode(regs))
1561 show_code((unsigned int __user *) regs->cp0_epc);
1566 * Some chips may have other causes of machine check (e.g. SB1
1569 panic("Caught Machine Check exception - %scaused by multiple "
1570 "matching entries in the TLB.",
1571 (multi_match) ? "" : "not ");
1574 asmlinkage void do_mt(struct pt_regs *regs)
1578 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1579 >> VPECONTROL_EXCPT_SHIFT;
1582 printk(KERN_DEBUG "Thread Underflow\n");
1585 printk(KERN_DEBUG "Thread Overflow\n");
1588 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1591 printk(KERN_DEBUG "Gating Storage Exception\n");
1594 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1597 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1600 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1604 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1606 force_sig(SIGILL, current);
1610 asmlinkage void do_dsp(struct pt_regs *regs)
1613 panic("Unexpected DSP exception");
1615 force_sig(SIGILL, current);
1618 asmlinkage void do_reserved(struct pt_regs *regs)
1621 * Game over - no way to handle this if it ever occurs. Most probably
1622 * caused by a new unknown cpu type or after another deadly
1623 * hard/software error.
1626 panic("Caught reserved exception %ld - should not happen.",
1627 (regs->cp0_cause & 0x7f) >> 2);
1630 static int __initdata l1parity = 1;
1631 static int __init nol1parity(char *s)
1636 __setup("nol1par", nol1parity);
1637 static int __initdata l2parity = 1;
1638 static int __init nol2parity(char *s)
1643 __setup("nol2par", nol2parity);
1646 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1647 * it different ways.
1649 static inline void parity_protection_init(void)
1651 #define ERRCTL_PE 0x80000000
1652 #define ERRCTL_L2P 0x00800000
1654 if (mips_cm_revision() >= CM_REV_CM3) {
1655 ulong gcr_ectl, cp0_ectl;
1658 * With CM3 systems we need to ensure that the L1 & L2
1659 * parity enables are set to the same value, since this
1660 * is presumed by the hardware engineers.
1662 * If the user disabled either of L1 or L2 ECC checking,
1665 l1parity &= l2parity;
1666 l2parity &= l1parity;
1668 /* Probe L1 ECC support */
1669 cp0_ectl = read_c0_ecc();
1670 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1671 back_to_back_c0_hazard();
1672 cp0_ectl = read_c0_ecc();
1674 /* Probe L2 ECC support */
1675 gcr_ectl = read_gcr_err_control();
1677 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK) ||
1678 !(cp0_ectl & ERRCTL_PE)) {
1680 * One of L1 or L2 ECC checking isn't supported,
1681 * so we cannot enable either.
1683 l1parity = l2parity = 0;
1686 /* Configure L1 ECC checking */
1688 cp0_ectl |= ERRCTL_PE;
1690 cp0_ectl &= ~ERRCTL_PE;
1691 write_c0_ecc(cp0_ectl);
1692 back_to_back_c0_hazard();
1693 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1695 /* Configure L2 ECC checking */
1697 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
1699 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
1700 write_gcr_err_control(gcr_ectl);
1701 gcr_ectl = read_gcr_err_control();
1702 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
1703 WARN_ON(!!gcr_ectl != l2parity);
1705 pr_info("Cache parity protection %sabled\n",
1706 l1parity ? "en" : "dis");
1710 switch (current_cpu_type()) {
1716 case CPU_INTERAPTIV:
1719 case CPU_QEMU_GENERIC:
1722 unsigned long errctl;
1723 unsigned int l1parity_present, l2parity_present;
1725 errctl = read_c0_ecc();
1726 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1728 /* probe L1 parity support */
1729 write_c0_ecc(errctl | ERRCTL_PE);
1730 back_to_back_c0_hazard();
1731 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1733 /* probe L2 parity support */
1734 write_c0_ecc(errctl|ERRCTL_L2P);
1735 back_to_back_c0_hazard();
1736 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1738 if (l1parity_present && l2parity_present) {
1740 errctl |= ERRCTL_PE;
1741 if (l1parity ^ l2parity)
1742 errctl |= ERRCTL_L2P;
1743 } else if (l1parity_present) {
1745 errctl |= ERRCTL_PE;
1746 } else if (l2parity_present) {
1748 errctl |= ERRCTL_L2P;
1750 /* No parity available */
1753 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1755 write_c0_ecc(errctl);
1756 back_to_back_c0_hazard();
1757 errctl = read_c0_ecc();
1758 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1760 if (l1parity_present)
1761 printk(KERN_INFO "Cache parity protection %sabled\n",
1762 (errctl & ERRCTL_PE) ? "en" : "dis");
1764 if (l2parity_present) {
1765 if (l1parity_present && l1parity)
1766 errctl ^= ERRCTL_L2P;
1767 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1768 (errctl & ERRCTL_L2P) ? "en" : "dis");
1776 write_c0_ecc(0x80000000);
1777 back_to_back_c0_hazard();
1778 /* Set the PE bit (bit 31) in the c0_errctl register. */
1779 printk(KERN_INFO "Cache parity protection %sabled\n",
1780 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1784 /* Clear the DE bit (bit 16) in the c0_status register. */
1785 printk(KERN_INFO "Enable cache parity protection for "
1786 "MIPS 20KC/25KF CPUs.\n");
1787 clear_c0_status(ST0_DE);
1794 asmlinkage void cache_parity_error(void)
1796 const int field = 2 * sizeof(unsigned long);
1797 unsigned int reg_val;
1799 /* For the moment, report the problem and hang. */
1800 printk("Cache error exception:\n");
1801 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1802 reg_val = read_c0_cacheerr();
1803 printk("c0_cacheerr == %08x\n", reg_val);
1805 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1806 reg_val & (1<<30) ? "secondary" : "primary",
1807 reg_val & (1<<31) ? "data" : "insn");
1808 if ((cpu_has_mips_r2_r6) &&
1809 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1810 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1811 reg_val & (1<<29) ? "ED " : "",
1812 reg_val & (1<<28) ? "ET " : "",
1813 reg_val & (1<<27) ? "ES " : "",
1814 reg_val & (1<<26) ? "EE " : "",
1815 reg_val & (1<<25) ? "EB " : "",
1816 reg_val & (1<<24) ? "EI " : "",
1817 reg_val & (1<<23) ? "E1 " : "",
1818 reg_val & (1<<22) ? "E0 " : "");
1820 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1821 reg_val & (1<<29) ? "ED " : "",
1822 reg_val & (1<<28) ? "ET " : "",
1823 reg_val & (1<<26) ? "EE " : "",
1824 reg_val & (1<<25) ? "EB " : "",
1825 reg_val & (1<<24) ? "EI " : "",
1826 reg_val & (1<<23) ? "E1 " : "",
1827 reg_val & (1<<22) ? "E0 " : "");
1829 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1831 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1832 if (reg_val & (1<<22))
1833 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1835 if (reg_val & (1<<23))
1836 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1839 panic("Can't handle the cache error!");
1842 asmlinkage void do_ftlb(void)
1844 const int field = 2 * sizeof(unsigned long);
1845 unsigned int reg_val;
1847 /* For the moment, report the problem and hang. */
1848 if ((cpu_has_mips_r2_r6) &&
1849 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1850 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1851 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1853 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1854 reg_val = read_c0_cacheerr();
1855 pr_err("c0_cacheerr == %08x\n", reg_val);
1857 if ((reg_val & 0xc0000000) == 0xc0000000) {
1858 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1860 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1861 reg_val & (1<<30) ? "secondary" : "primary",
1862 reg_val & (1<<31) ? "data" : "insn");
1865 pr_err("FTLB error exception\n");
1867 /* Just print the cacheerr bits for now */
1868 cache_parity_error();
1872 * SDBBP EJTAG debug exception handler.
1873 * We skip the instruction and return to the next instruction.
1875 void ejtag_exception_handler(struct pt_regs *regs)
1877 const int field = 2 * sizeof(unsigned long);
1878 unsigned long depc, old_epc, old_ra;
1881 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1882 depc = read_c0_depc();
1883 debug = read_c0_debug();
1884 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1885 if (debug & 0x80000000) {
1887 * In branch delay slot.
1888 * We cheat a little bit here and use EPC to calculate the
1889 * debug return address (DEPC). EPC is restored after the
1892 old_epc = regs->cp0_epc;
1893 old_ra = regs->regs[31];
1894 regs->cp0_epc = depc;
1895 compute_return_epc(regs);
1896 depc = regs->cp0_epc;
1897 regs->cp0_epc = old_epc;
1898 regs->regs[31] = old_ra;
1901 write_c0_depc(depc);
1904 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1905 write_c0_debug(debug | 0x100);
1910 * NMI exception handler.
1911 * No lock; only written during early bootup by CPU 0.
1913 static RAW_NOTIFIER_HEAD(nmi_chain);
1915 int register_nmi_notifier(struct notifier_block *nb)
1917 return raw_notifier_chain_register(&nmi_chain, nb);
1920 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1925 raw_notifier_call_chain(&nmi_chain, 0, regs);
1927 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1928 smp_processor_id(), regs->cp0_epc);
1929 regs->cp0_epc = read_c0_errorepc();
1934 #define VECTORSPACING 0x100 /* for EI/VI mode */
1936 unsigned long ebase;
1937 EXPORT_SYMBOL_GPL(ebase);
1938 unsigned long exception_handlers[32];
1939 unsigned long vi_handlers[64];
1941 void __init *set_except_vector(int n, void *addr)
1943 unsigned long handler = (unsigned long) addr;
1944 unsigned long old_handler;
1946 #ifdef CONFIG_CPU_MICROMIPS
1948 * Only the TLB handlers are cache aligned with an even
1949 * address. All other handlers are on an odd address and
1950 * require no modification. Otherwise, MIPS32 mode will
1951 * be entered when handling any TLB exceptions. That
1952 * would be bad...since we must stay in microMIPS mode.
1954 if (!(handler & 0x1))
1957 old_handler = xchg(&exception_handlers[n], handler);
1959 if (n == 0 && cpu_has_divec) {
1960 #ifdef CONFIG_CPU_MICROMIPS
1961 unsigned long jump_mask = ~((1 << 27) - 1);
1963 unsigned long jump_mask = ~((1 << 28) - 1);
1965 u32 *buf = (u32 *)(ebase + 0x200);
1966 unsigned int k0 = 26;
1967 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1968 uasm_i_j(&buf, handler & ~jump_mask);
1971 UASM_i_LA(&buf, k0, handler);
1972 uasm_i_jr(&buf, k0);
1975 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1977 return (void *)old_handler;
1980 static void do_default_vi(void)
1982 show_regs(get_irq_regs());
1983 panic("Caught unexpected vectored interrupt.");
1986 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1988 unsigned long handler;
1989 unsigned long old_handler = vi_handlers[n];
1990 int srssets = current_cpu_data.srsets;
1994 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1997 handler = (unsigned long) do_default_vi;
2000 handler = (unsigned long) addr;
2001 vi_handlers[n] = handler;
2003 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2006 panic("Shadow register set %d not supported", srs);
2009 if (board_bind_eic_interrupt)
2010 board_bind_eic_interrupt(n, srs);
2011 } else if (cpu_has_vint) {
2012 /* SRSMap is only defined if shadow sets are implemented */
2014 change_c0_srsmap(0xf << n*4, srs << n*4);
2019 * If no shadow set is selected then use the default handler
2020 * that does normal register saving and standard interrupt exit
2022 extern const u8 except_vec_vi[], except_vec_vi_lui[];
2023 extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
2024 extern const u8 rollback_except_vec_vi[];
2025 const u8 *vec_start = using_rollback_handler() ?
2026 rollback_except_vec_vi : except_vec_vi;
2027 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2028 const int lui_offset = except_vec_vi_lui - vec_start + 2;
2029 const int ori_offset = except_vec_vi_ori - vec_start + 2;
2031 const int lui_offset = except_vec_vi_lui - vec_start;
2032 const int ori_offset = except_vec_vi_ori - vec_start;
2034 const int handler_len = except_vec_vi_end - vec_start;
2036 if (handler_len > VECTORSPACING) {
2038 * Sigh... panicing won't help as the console
2039 * is probably not configured :(
2041 panic("VECTORSPACING too small");
2044 set_handler(((unsigned long)b - ebase), vec_start,
2045 #ifdef CONFIG_CPU_MICROMIPS
2050 h = (u16 *)(b + lui_offset);
2051 *h = (handler >> 16) & 0xffff;
2052 h = (u16 *)(b + ori_offset);
2053 *h = (handler & 0xffff);
2054 local_flush_icache_range((unsigned long)b,
2055 (unsigned long)(b+handler_len));
2059 * In other cases jump directly to the interrupt handler. It
2060 * is the handler's responsibility to save registers if required
2061 * (eg hi/lo) and return from the exception using "eret".
2067 #ifdef CONFIG_CPU_MICROMIPS
2068 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2070 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2072 h[0] = (insn >> 16) & 0xffff;
2073 h[1] = insn & 0xffff;
2076 local_flush_icache_range((unsigned long)b,
2077 (unsigned long)(b+8));
2080 return (void *)old_handler;
2083 void *set_vi_handler(int n, vi_handler_t addr)
2085 return set_vi_srs_handler(n, addr, 0);
2088 extern void tlb_init(void);
2093 int cp0_compare_irq;
2094 EXPORT_SYMBOL_GPL(cp0_compare_irq);
2095 int cp0_compare_irq_shift;
2098 * Performance counter IRQ or -1 if shared with timer
2100 int cp0_perfcount_irq;
2101 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2104 * Fast debug channel IRQ or -1 if not present
2107 EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2111 static int __init ulri_disable(char *s)
2113 pr_info("Disabling ulri\n");
2118 __setup("noulri", ulri_disable);
2120 /* configure STATUS register */
2121 static void configure_status(void)
2124 * Disable coprocessors and select 32-bit or 64-bit addressing
2125 * and the 16/32 or 32/32 FPR register model. Reset the BEV
2126 * flag that some firmware may have left set and the TS bit (for
2127 * IP27). Set XX for ISA IV code to work.
2129 unsigned int status_set = ST0_CU0;
2131 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2133 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2134 status_set |= ST0_XX;
2136 status_set |= ST0_MX;
2138 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2140 back_to_back_c0_hazard();
2143 unsigned int hwrena;
2144 EXPORT_SYMBOL_GPL(hwrena);
2146 /* configure HWRENA register */
2147 static void configure_hwrena(void)
2149 hwrena = cpu_hwrena_impl_bits;
2151 if (cpu_has_mips_r2_r6)
2152 hwrena |= MIPS_HWRENA_CPUNUM |
2153 MIPS_HWRENA_SYNCISTEP |
2157 if (!noulri && cpu_has_userlocal)
2158 hwrena |= MIPS_HWRENA_ULR;
2161 write_c0_hwrena(hwrena);
2164 static void configure_exception_vector(void)
2166 if (cpu_has_veic || cpu_has_vint) {
2167 unsigned long sr = set_c0_status(ST0_BEV);
2168 /* If available, use WG to set top bits of EBASE */
2169 if (cpu_has_ebase_wg) {
2171 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2173 write_c0_ebase(ebase | MIPS_EBASE_WG);
2176 write_c0_ebase(ebase);
2177 write_c0_status(sr);
2178 /* Setting vector spacing enables EI/VI mode */
2179 change_c0_intctl(0x3e0, VECTORSPACING);
2181 if (cpu_has_divec) {
2182 if (cpu_has_mipsmt) {
2183 unsigned int vpflags = dvpe();
2184 set_c0_cause(CAUSEF_IV);
2187 set_c0_cause(CAUSEF_IV);
2191 void per_cpu_trap_init(bool is_boot_cpu)
2193 unsigned int cpu = smp_processor_id();
2198 configure_exception_vector();
2201 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2203 * o read IntCtl.IPTI to determine the timer interrupt
2204 * o read IntCtl.IPPCI to determine the performance counter interrupt
2205 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
2207 if (cpu_has_mips_r2_r6) {
2209 * We shouldn't trust a secondary core has a sane EBASE register
2210 * so use the one calculated by the boot CPU.
2213 /* If available, use WG to set top bits of EBASE */
2214 if (cpu_has_ebase_wg) {
2216 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2218 write_c0_ebase(ebase | MIPS_EBASE_WG);
2221 write_c0_ebase(ebase);
2224 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2225 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2226 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2227 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2232 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2233 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2234 cp0_perfcount_irq = -1;
2238 if (!cpu_data[cpu].asid_cache)
2239 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2241 atomic_inc(&init_mm.mm_count);
2242 current->active_mm = &init_mm;
2243 BUG_ON(current->mm);
2244 enter_lazy_tlb(&init_mm, current);
2246 /* Boot CPU's cache setup in setup_arch(). */
2250 TLBMISS_HANDLER_SETUP();
2253 /* Install CPU exception handler */
2254 void set_handler(unsigned long offset, const void *addr, unsigned long size)
2256 #ifdef CONFIG_CPU_MICROMIPS
2257 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2259 memcpy((void *)(ebase + offset), addr, size);
2261 local_flush_icache_range(ebase + offset, ebase + offset + size);
2264 static char panic_null_cerr[] =
2265 "Trying to set NULL cache error exception handler";
2268 * Install uncached CPU exception handler.
2269 * This is suitable only for the cache error exception which is the only
2270 * exception handler that is being run uncached.
2272 void set_uncached_handler(unsigned long offset, void *addr,
2275 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2278 panic(panic_null_cerr);
2280 memcpy((void *)(uncached_ebase + offset), addr, size);
2283 static int __initdata rdhwr_noopt;
2284 static int __init set_rdhwr_noopt(char *str)
2290 __setup("rdhwr_noopt", set_rdhwr_noopt);
2292 void __init trap_init(void)
2294 extern char except_vec3_generic;
2295 extern char except_vec4;
2296 extern char except_vec3_r4000;
2301 if (cpu_has_veic || cpu_has_vint) {
2302 unsigned long size = 0x200 + VECTORSPACING*64;
2303 phys_addr_t ebase_pa;
2305 ebase = (unsigned long)
2306 __alloc_bootmem(size, 1 << fls(size), 0);
2309 * Try to ensure ebase resides in KSeg0 if possible.
2311 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2312 * hitting a poorly defined exception base for Cache Errors.
2313 * The allocation is likely to be in the low 512MB of physical,
2314 * in which case we should be able to convert to KSeg0.
2316 * EVA is special though as it allows segments to be rearranged
2317 * and to become uncached during cache error handling.
2319 ebase_pa = __pa(ebase);
2320 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2321 ebase = CKSEG0ADDR(ebase_pa);
2325 if (cpu_has_mips_r2_r6) {
2326 if (cpu_has_ebase_wg) {
2328 ebase = (read_c0_ebase_64() & ~0xfff);
2330 ebase = (read_c0_ebase() & ~0xfff);
2333 ebase += (read_c0_ebase() & 0x3ffff000);
2338 if (cpu_has_mmips) {
2339 unsigned int config3 = read_c0_config3();
2341 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2342 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2344 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2347 if (board_ebase_setup)
2348 board_ebase_setup();
2349 per_cpu_trap_init(true);
2352 * Copy the generic exception handlers to their final destination.
2353 * This will be overridden later as suitable for a particular
2356 set_handler(0x180, &except_vec3_generic, 0x80);
2359 * Setup default vectors
2361 for (i = 0; i <= 31; i++)
2362 set_except_vector(i, handle_reserved);
2365 * Copy the EJTAG debug exception vector handler code to it's final
2368 if (cpu_has_ejtag && board_ejtag_handler_setup)
2369 board_ejtag_handler_setup();
2372 * Only some CPUs have the watch exceptions.
2375 set_except_vector(EXCCODE_WATCH, handle_watch);
2378 * Initialise interrupt handlers
2380 if (cpu_has_veic || cpu_has_vint) {
2381 int nvec = cpu_has_veic ? 64 : 8;
2382 for (i = 0; i < nvec; i++)
2383 set_vi_handler(i, NULL);
2385 else if (cpu_has_divec)
2386 set_handler(0x200, &except_vec4, 0x8);
2389 * Some CPUs can enable/disable for cache parity detection, but does
2390 * it different ways.
2392 parity_protection_init();
2395 * The Data Bus Errors / Instruction Bus Errors are signaled
2396 * by external hardware. Therefore these two exceptions
2397 * may have board specific handlers.
2402 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2403 rollback_handle_int : handle_int);
2404 set_except_vector(EXCCODE_MOD, handle_tlbm);
2405 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2406 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2408 set_except_vector(EXCCODE_ADEL, handle_adel);
2409 set_except_vector(EXCCODE_ADES, handle_ades);
2411 set_except_vector(EXCCODE_IBE, handle_ibe);
2412 set_except_vector(EXCCODE_DBE, handle_dbe);
2414 set_except_vector(EXCCODE_SYS, handle_sys);
2415 set_except_vector(EXCCODE_BP, handle_bp);
2418 set_except_vector(EXCCODE_RI, handle_ri);
2420 if (cpu_has_vtag_icache)
2421 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2422 else if (current_cpu_type() == CPU_LOONGSON3)
2423 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2425 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2428 set_except_vector(EXCCODE_CPU, handle_cpu);
2429 set_except_vector(EXCCODE_OV, handle_ov);
2430 set_except_vector(EXCCODE_TR, handle_tr);
2431 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2433 if (current_cpu_type() == CPU_R6000 ||
2434 current_cpu_type() == CPU_R6000A) {
2436 * The R6000 is the only R-series CPU that features a machine
2437 * check exception (similar to the R4000 cache error) and
2438 * unaligned ldc1/sdc1 exception. The handlers have not been
2439 * written yet. Well, anyway there is no R6000 machine on the
2440 * current list of targets for Linux/MIPS.
2441 * (Duh, crap, there is someone with a triple R6k machine)
2443 //set_except_vector(14, handle_mc);
2444 //set_except_vector(15, handle_ndc);
2448 if (board_nmi_handler_setup)
2449 board_nmi_handler_setup();
2451 if (cpu_has_fpu && !cpu_has_nofpuex)
2452 set_except_vector(EXCCODE_FPE, handle_fpe);
2454 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2456 if (cpu_has_rixiex) {
2457 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2458 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2461 set_except_vector(EXCCODE_MSADIS, handle_msa);
2462 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2465 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2468 set_except_vector(EXCCODE_THREAD, handle_mt);
2470 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2472 if (board_cache_error_setup)
2473 board_cache_error_setup();
2476 /* Special exception: R4[04]00 uses also the divec space. */
2477 set_handler(0x180, &except_vec3_r4000, 0x100);
2478 else if (cpu_has_4kex)
2479 set_handler(0x180, &except_vec3_generic, 0x80);
2481 set_handler(0x080, &except_vec3_generic, 0x80);
2483 local_flush_icache_range(ebase, ebase + 0x400);
2485 sort_extable(__start___dbe_table, __stop___dbe_table);
2487 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
2490 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2494 case CPU_PM_ENTER_FAILED:
2498 configure_exception_vector();
2500 /* Restore register with CPU number for TLB handlers */
2501 TLBMISS_HANDLER_RESTORE();
2509 static struct notifier_block trap_pm_notifier_block = {
2510 .notifier_call = trap_pm_notifier,
2513 static int __init trap_pm_init(void)
2515 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2517 arch_initcall(trap_pm_init);