1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Regents of the University of California
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/signal.h>
12 #include <linux/signal.h>
13 #include <linux/kdebug.h>
14 #include <linux/uaccess.h>
15 #include <linux/kprobes.h>
17 #include <linux/module.h>
18 #include <linux/irq.h>
19 #include <linux/kexec.h>
21 #include <asm/asm-prototypes.h>
24 #include <asm/processor.h>
25 #include <asm/ptrace.h>
26 #include <asm/thread_info.h>
28 int show_unhandled_signals = 1;
30 static DEFINE_SPINLOCK(die_lock);
32 void die(struct pt_regs *regs, const char *str)
34 static int die_counter;
41 spin_lock_irqsave(&die_lock, flags);
45 pr_emerg("%s [#%d]\n", str, ++die_counter);
50 cause = regs ? regs->cause : -1;
51 ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
53 if (kexec_should_crash(current))
57 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
58 spin_unlock_irqrestore(&die_lock, flags);
62 panic("Fatal exception in interrupt");
64 panic("Fatal exception");
65 if (ret != NOTIFY_STOP)
66 make_task_dead(SIGSEGV);
69 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
71 struct task_struct *tsk = current;
73 if (show_unhandled_signals && unhandled_signal(tsk, signo)
74 && printk_ratelimit()) {
75 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
76 tsk->comm, task_pid_nr(tsk), signo, code, addr);
77 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
82 force_sig_fault(signo, code, (void __user *)addr);
85 static void do_trap_error(struct pt_regs *regs, int signo, int code,
86 unsigned long addr, const char *str)
88 current->thread.bad_cause = regs->cause;
90 if (user_mode(regs)) {
91 do_trap(regs, signo, code, addr);
93 if (!fixup_exception(regs))
98 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
99 #define __trap_section __section(".xip.traps")
101 #define __trap_section
103 #define DO_ERROR_INFO(name, signo, code, str) \
104 asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
106 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
109 DO_ERROR_INFO(do_trap_unknown,
110 SIGILL, ILL_ILLTRP, "unknown exception");
111 DO_ERROR_INFO(do_trap_insn_misaligned,
112 SIGBUS, BUS_ADRALN, "instruction address misaligned");
113 DO_ERROR_INFO(do_trap_insn_fault,
114 SIGSEGV, SEGV_ACCERR, "instruction access fault");
115 DO_ERROR_INFO(do_trap_insn_illegal,
116 SIGILL, ILL_ILLOPC, "illegal instruction");
117 DO_ERROR_INFO(do_trap_load_fault,
118 SIGSEGV, SEGV_ACCERR, "load access fault");
119 #ifndef CONFIG_RISCV_M_MODE
120 DO_ERROR_INFO(do_trap_load_misaligned,
121 SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
122 DO_ERROR_INFO(do_trap_store_misaligned,
123 SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
125 int handle_misaligned_load(struct pt_regs *regs);
126 int handle_misaligned_store(struct pt_regs *regs);
128 asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
130 if (!handle_misaligned_load(regs))
132 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
133 "Oops - load address misaligned");
136 asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
138 if (!handle_misaligned_store(regs))
140 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
141 "Oops - store (or AMO) address misaligned");
144 DO_ERROR_INFO(do_trap_store_fault,
145 SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
146 DO_ERROR_INFO(do_trap_ecall_u,
147 SIGILL, ILL_ILLTRP, "environment call from U-mode");
148 DO_ERROR_INFO(do_trap_ecall_s,
149 SIGILL, ILL_ILLTRP, "environment call from S-mode");
150 DO_ERROR_INFO(do_trap_ecall_m,
151 SIGILL, ILL_ILLTRP, "environment call from M-mode");
153 static inline unsigned long get_break_insn_length(unsigned long pc)
157 if (get_kernel_nofault(insn, (bug_insn_t *)pc))
160 return GET_INSN_LENGTH(insn);
163 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
165 #ifdef CONFIG_KPROBES
166 if (kprobe_single_step_handler(regs))
169 if (kprobe_breakpoint_handler(regs))
172 #ifdef CONFIG_UPROBES
173 if (uprobe_single_step_handler(regs))
176 if (uprobe_breakpoint_handler(regs))
179 current->thread.bad_cause = regs->cause;
182 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
184 else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
188 else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
189 regs->epc += get_break_insn_length(regs->epc);
191 die(regs, "Kernel BUG");
193 NOKPROBE_SYMBOL(do_trap_break);
195 #ifdef CONFIG_GENERIC_BUG
196 int is_valid_bugaddr(unsigned long pc)
200 if (pc < VMALLOC_START)
202 if (get_kernel_nofault(insn, (bug_insn_t *)pc))
204 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
205 return (insn == __BUG_INSN_32);
207 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
209 #endif /* CONFIG_GENERIC_BUG */
211 #ifdef CONFIG_VMAP_STACK
212 static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
213 overflow_stack)__aligned(16);
215 * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used
216 * to get per-cpu overflow stack(get_overflow_stack).
218 long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
219 asmlinkage unsigned long get_overflow_stack(void)
221 return (unsigned long)this_cpu_ptr(overflow_stack) +
226 * A pseudo spinlock to protect the shadow stack from being used by multiple
227 * harts concurrently. This isn't a real spinlock because the lock side must
228 * be taken without a valid stack and only a single register, it's only taken
229 * while in the process of panicing anyway so the performance and error
230 * checking a proper spinlock gives us doesn't matter.
232 unsigned long spin_shadow_stack;
234 asmlinkage void handle_bad_stack(struct pt_regs *regs)
236 unsigned long tsk_stk = (unsigned long)current->stack;
237 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
240 * We're done with the shadow stack by this point, as we're on the
241 * overflow stack. Tell any other concurrent overflowing harts that
242 * they can proceed with panicing by releasing the pseudo-spinlock.
244 * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
246 smp_store_release(&spin_shadow_stack, 0);
250 pr_emerg("Insufficient stack space to handle exception!\n");
251 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
252 tsk_stk, tsk_stk + THREAD_SIZE);
253 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
254 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
257 panic("Kernel stack overflow");
260 wait_for_interrupt();