1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
7 #include <linux/init.h>
8 #include <linux/linkage.h>
12 #include <asm/unistd.h>
13 #include <asm/thread_info.h>
14 #include <asm/asm-offsets.h>
15 #include <asm/errata_list.h>
17 #if !IS_ENABLED(CONFIG_PREEMPTION)
18 .set resume_kernel, restore_all
21 ENTRY(handle_exception)
23 * If coming from userspace, preserve the user thread pointer and load
24 * the kernel thread pointer. If we came from the kernel, the scratch
25 * register will contain 0, and we should continue on the current TP.
27 csrrw tp, CSR_SCRATCH, tp
28 bnez tp, _save_context
32 REG_S sp, TASK_TI_KERNEL_SP(tp)
34 #ifdef CONFIG_VMAP_STACK
35 addi sp, sp, -(PT_SIZE_ON_STACK)
36 srli sp, sp, THREAD_SHIFT
38 bnez sp, handle_kernel_stack_overflow
39 REG_L sp, TASK_TI_KERNEL_SP(tp)
43 REG_S sp, TASK_TI_USER_SP(tp)
44 REG_L sp, TASK_TI_KERNEL_SP(tp)
45 addi sp, sp, -(PT_SIZE_ON_STACK)
77 * Disable user-mode memory access as it should only be set in the
78 * actual user copy routines.
80 * Disable the FPU to detect illegal usage of floating point in kernel
85 REG_L s0, TASK_TI_USER_SP(tp)
86 csrrc s1, CSR_STATUS, t0
92 REG_S s1, PT_STATUS(sp)
94 REG_S s3, PT_BADADDR(sp)
95 REG_S s4, PT_CAUSE(sp)
99 * Set the scratch register to 0, so that if a recursive exception
100 * occurs, the exception vector knows it came from the kernel
104 /* Load the global pointer */
107 la gp, __global_pointer$
110 #ifdef CONFIG_TRACE_IRQFLAGS
111 call __trace_hardirqs_off
114 #ifdef CONFIG_CONTEXT_TRACKING_USER
115 /* If previous state is in user mode, call user_exit_callable(). */
118 bnez a0, skip_context_tracking
119 call user_exit_callable
120 skip_context_tracking:
124 * MSB of cause differentiates between
125 * interrupts and exceptions
129 la ra, ret_from_exception
131 /* Handle interrupts */
132 move a0, sp /* pt_regs */
133 la a1, generic_handle_arch_irq
137 * Exceptions run with interrupts enabled or disabled depending on the
138 * state of SR_PIE in m/sstatus.
142 /* kprobes, entered via ebreak, must have interrupts disabled. */
143 li t0, EXC_BREAKPOINT
145 #ifdef CONFIG_TRACE_IRQFLAGS
146 call __trace_hardirqs_on
148 csrs CSR_STATUS, SR_IE
151 la ra, ret_from_exception
152 /* Handle syscalls */
154 beq s4, t0, handle_syscall
156 /* Handle other exceptions */
157 slli t0, s4, RISCV_LGPTR
158 la t1, excp_vect_table
159 la t2, excp_vect_table_end
160 move a0, sp /* pt_regs */
162 /* Check if exception code lies within bounds */
170 #ifdef CONFIG_RISCV_M_MODE
172 * When running is M-Mode (no MMU config), MPIE does not get set.
173 * As a result, we need to force enable interrupts here because
174 * handle_exception did not do set SR_IE as it always sees SR_PIE
177 csrs CSR_STATUS, SR_IE
179 #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)
180 /* Recover a0 - a7 for system calls */
190 /* save the initial A0 value (needed in signal handlers) */
191 REG_S a0, PT_ORIG_A0(sp)
193 * Advance SEPC to avoid executing the original
194 * scall instruction on sret
198 /* Trace syscalls, but only if requested by the user. */
199 REG_L t0, TASK_TI_FLAGS(tp)
200 andi t0, t0, _TIF_SYSCALL_WORK
201 bnez t0, handle_syscall_trace_enter
203 /* Check to make sure we don't jump to a bogus syscall number. */
205 la s0, sys_ni_syscall
207 * Syscall number held in a7.
208 * If syscall number is above allowed value, redirect to ni_syscall.
212 REG_L s0, PT_STATUS(sp)
213 srli s0, s0, SR_UXL_SHIFT
214 andi s0, s0, (SR_UXL >> SR_UXL_SHIFT)
215 li t0, (SR_UXL_32 >> SR_UXL_SHIFT)
219 /* Call compat_syscall */
220 la s0, compat_sys_call_table
225 la s0, sys_call_table
227 slli t0, a7, RISCV_LGPTR
234 /* Set user a0 to kernel a0 */
237 * We didn't execute the actual syscall.
238 * Seccomp already set return value for the current task pt_regs.
239 * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
241 ret_from_syscall_rejected:
242 #ifdef CONFIG_DEBUG_RSEQ
246 /* Trace syscalls, but only if requested by the user. */
247 REG_L t0, TASK_TI_FLAGS(tp)
248 andi t0, t0, _TIF_SYSCALL_WORK
249 bnez t0, handle_syscall_trace_exit
252 REG_L s0, PT_STATUS(sp)
253 csrc CSR_STATUS, SR_IE
254 #ifdef CONFIG_TRACE_IRQFLAGS
255 call __trace_hardirqs_off
257 #ifdef CONFIG_RISCV_M_MODE
258 /* the MPP value is too large to be used as an immediate arg for addi */
264 bnez s0, resume_kernel
266 /* Interrupts must be disabled here so flags are checked atomically */
267 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
268 andi s1, s0, _TIF_WORK_MASK
269 bnez s1, resume_userspace_slow
271 #ifdef CONFIG_CONTEXT_TRACKING_USER
272 call user_enter_callable
275 /* Save unwound kernel stack pointer in thread_info */
276 addi s0, sp, PT_SIZE_ON_STACK
277 REG_S s0, TASK_TI_KERNEL_SP(tp)
280 * Save TP into the scratch register , so we can find the kernel data
286 #ifdef CONFIG_TRACE_IRQFLAGS
287 REG_L s1, PT_STATUS(sp)
290 call __trace_hardirqs_on
293 call __trace_hardirqs_off
296 REG_L a0, PT_STATUS(sp)
298 * The current load reservation is effectively part of the processor's
299 * state, in the sense that load reservations cannot be shared between
300 * different hart contexts. We can't actually save and restore a load
301 * reservation, so instead here we clear any existing reservation --
302 * it's always legal for implementations to clear load reservations at
303 * any point (as long as the forward progress guarantee is kept, but
304 * we'll ignore that here).
306 * Dangling load reservations can be the result of taking a trap in the
307 * middle of an LR/SC sequence, but can also be the result of a taken
308 * forward branch around an SC -- which is how we implement CAS. As a
309 * result we need to clear reservations between the last CAS and the
310 * jump back to the new context. While it is unlikely the store
311 * completes, implementations are allowed to expand reservations to be
315 REG_SC x0, a2, PT_EPC(sp)
344 REG_L x26, PT_S10(sp)
345 REG_L x27, PT_S11(sp)
353 #ifdef CONFIG_RISCV_M_MODE
359 #if IS_ENABLED(CONFIG_PREEMPTION)
361 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
363 REG_L s0, TASK_TI_FLAGS(tp)
364 andi s0, s0, _TIF_NEED_RESCHED
366 call preempt_schedule_irq
370 resume_userspace_slow:
371 /* Enter slow path for supplementary processing */
372 move a0, sp /* pt_regs */
373 move a1, s0 /* current_thread_info->flags */
377 /* Slow paths for ptrace. */
378 handle_syscall_trace_enter:
380 call do_syscall_trace_enter
390 bnez t0, ret_from_syscall_rejected
392 handle_syscall_trace_exit:
394 call do_syscall_trace_exit
397 #ifdef CONFIG_VMAP_STACK
398 handle_kernel_stack_overflow:
400 * Takes the psuedo-spinlock for the shadow stack, in case multiple
401 * harts are concurrently overflowing their kernel stacks. We could
402 * store any value here, but since we're overflowing the kernel stack
403 * already we only have SP to use as a scratch register. So we just
404 * swap in the address of the spinlock, as that's definately non-zero.
406 * Pairs with a store_release in handle_bad_stack().
408 1: la sp, spin_shadow_stack
409 REG_AMOSWAP_AQ sp, sp, (sp)
413 addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
415 //save caller register to shadow stack
416 addi sp, sp, -(PT_SIZE_ON_STACK)
434 la ra, restore_caller_reg
435 tail get_overflow_stack
438 //save per-cpu overflow stack
440 //restore caller register from shadow_stack
458 //load per-cpu overflow stack
460 addi sp, sp, -(PT_SIZE_ON_STACK)
462 //save context to overflow stack
486 REG_S x26, PT_S10(sp)
487 REG_S x27, PT_S11(sp)
493 REG_L s0, TASK_TI_KERNEL_SP(tp)
500 REG_S s1, PT_STATUS(sp)
502 REG_S s3, PT_BADADDR(sp)
503 REG_S s4, PT_CAUSE(sp)
506 tail handle_bad_stack
509 END(handle_exception)
512 la ra, ret_from_exception
514 ENDPROC(ret_from_fork)
516 ENTRY(ret_from_kernel_thread)
519 la ra, ret_from_exception
522 ENDPROC(ret_from_kernel_thread)
526 * Integer register context switch
527 * The callee-saved registers must be saved and restored.
529 * a0: previous task_struct (must be preserved across the switch)
530 * a1: next task_struct
532 * The value of a0 and a1 must be preserved by this function, as that's how
533 * arguments are passed to schedule_tail.
536 /* Save context into prev->thread */
537 li a4, TASK_THREAD_RA
540 REG_S ra, TASK_THREAD_RA_RA(a3)
541 REG_S sp, TASK_THREAD_SP_RA(a3)
542 REG_S s0, TASK_THREAD_S0_RA(a3)
543 REG_S s1, TASK_THREAD_S1_RA(a3)
544 REG_S s2, TASK_THREAD_S2_RA(a3)
545 REG_S s3, TASK_THREAD_S3_RA(a3)
546 REG_S s4, TASK_THREAD_S4_RA(a3)
547 REG_S s5, TASK_THREAD_S5_RA(a3)
548 REG_S s6, TASK_THREAD_S6_RA(a3)
549 REG_S s7, TASK_THREAD_S7_RA(a3)
550 REG_S s8, TASK_THREAD_S8_RA(a3)
551 REG_S s9, TASK_THREAD_S9_RA(a3)
552 REG_S s10, TASK_THREAD_S10_RA(a3)
553 REG_S s11, TASK_THREAD_S11_RA(a3)
554 /* Restore context from next->thread */
555 REG_L ra, TASK_THREAD_RA_RA(a4)
556 REG_L sp, TASK_THREAD_SP_RA(a4)
557 REG_L s0, TASK_THREAD_S0_RA(a4)
558 REG_L s1, TASK_THREAD_S1_RA(a4)
559 REG_L s2, TASK_THREAD_S2_RA(a4)
560 REG_L s3, TASK_THREAD_S3_RA(a4)
561 REG_L s4, TASK_THREAD_S4_RA(a4)
562 REG_L s5, TASK_THREAD_S5_RA(a4)
563 REG_L s6, TASK_THREAD_S6_RA(a4)
564 REG_L s7, TASK_THREAD_S7_RA(a4)
565 REG_L s8, TASK_THREAD_S8_RA(a4)
566 REG_L s9, TASK_THREAD_S9_RA(a4)
567 REG_L s10, TASK_THREAD_S10_RA(a4)
568 REG_L s11, TASK_THREAD_S11_RA(a4)
569 /* The offset of thread_info in task_struct is zero. */
575 #define do_page_fault do_trap_unknown
580 /* Exception vector table */
581 ENTRY(excp_vect_table)
582 RISCV_PTR do_trap_insn_misaligned
583 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
584 RISCV_PTR do_trap_insn_illegal
585 RISCV_PTR do_trap_break
586 RISCV_PTR do_trap_load_misaligned
587 RISCV_PTR do_trap_load_fault
588 RISCV_PTR do_trap_store_misaligned
589 RISCV_PTR do_trap_store_fault
590 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
591 RISCV_PTR do_trap_ecall_s
592 RISCV_PTR do_trap_unknown
593 RISCV_PTR do_trap_ecall_m
594 /* instruciton page fault */
595 ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
596 RISCV_PTR do_page_fault /* load page fault */
597 RISCV_PTR do_trap_unknown
598 RISCV_PTR do_page_fault /* store page fault */
603 ENTRY(__user_rt_sigreturn)
604 li a7, __NR_rt_sigreturn
606 END(__user_rt_sigreturn)