1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
7 #include <linux/init.h>
8 #include <linux/linkage.h>
12 #include <asm/unistd.h>
13 #include <asm/thread_info.h>
14 #include <asm/asm-offsets.h>
20 * Prepares to enter a system call or exception by saving all registers to the
24 LOCAL _restore_kernel_tpsp
28 * If coming from userspace, preserve the user thread pointer and load
29 * the kernel thread pointer. If we came from the kernel, sscratch
30 * will contain 0, and we should continue on the current TP.
32 csrrw tp, CSR_SSCRATCH, tp
33 bnez tp, _save_context
37 REG_S sp, TASK_TI_KERNEL_SP(tp)
39 REG_S sp, TASK_TI_USER_SP(tp)
40 REG_L sp, TASK_TI_KERNEL_SP(tp)
41 addi sp, sp, -(PT_SIZE_ON_STACK)
73 * Disable user-mode memory access as it should only be set in the
74 * actual user copy routines.
76 * Disable the FPU to detect illegal usage of floating point in kernel
81 REG_L s0, TASK_TI_USER_SP(tp)
82 csrrc s1, CSR_SSTATUS, t0
88 REG_S s1, PT_SSTATUS(sp)
90 REG_S s3, PT_SBADADDR(sp)
91 REG_S s4, PT_SCAUSE(sp)
96 * Prepares to return from a system call or exception by restoring all
97 * registers from the stack.
100 REG_L a0, PT_SSTATUS(sp)
102 * The current load reservation is effectively part of the processor's
103 * state, in the sense that load reservations cannot be shared between
104 * different hart contexts. We can't actually save and restore a load
105 * reservation, so instead here we clear any existing reservation --
106 * it's always legal for implementations to clear load reservations at
107 * any point (as long as the forward progress guarantee is kept, but
108 * we'll ignore that here).
110 * Dangling load reservations can be the result of taking a trap in the
111 * middle of an LR/SC sequence, but can also be the result of a taken
112 * forward branch around an SC -- which is how we implement CAS. As a
113 * result we need to clear reservations between the last CAS and the
114 * jump back to the new context. While it is unlikely the store
115 * completes, implementations are allowed to expand reservations to be
118 REG_L a2, PT_SEPC(sp)
119 REG_SC x0, a2, PT_SEPC(sp)
148 REG_L x26, PT_S10(sp)
149 REG_L x27, PT_S11(sp)
158 #if !IS_ENABLED(CONFIG_PREEMPT)
159 .set resume_kernel, restore_all
162 ENTRY(handle_exception)
166 * Set sscratch register to 0, so that if a recursive exception
167 * occurs, the exception vector knows it came from the kernel
169 csrw CSR_SSCRATCH, x0
171 /* Load the global pointer */
174 la gp, __global_pointer$
177 la ra, ret_from_exception
179 * MSB of cause differentiates between
180 * interrupts and exceptions
184 /* Handle interrupts */
185 move a0, sp /* pt_regs */
188 /* Exceptions run with interrupts enabled or disabled
189 depending on the state of sstatus.SR_SPIE */
192 csrs CSR_SSTATUS, SR_SIE
195 /* Handle syscalls */
197 beq s4, t0, handle_syscall
199 /* Handle other exceptions */
200 slli t0, s4, RISCV_LGPTR
201 la t1, excp_vect_table
202 la t2, excp_vect_table_end
203 move a0, sp /* pt_regs */
205 /* Check if exception code lies within bounds */
213 /* save the initial A0 value (needed in signal handlers) */
214 REG_S a0, PT_ORIG_A0(sp)
216 * Advance SEPC to avoid executing the original
217 * scall instruction on sret
220 REG_S s2, PT_SEPC(sp)
221 /* Trace syscalls, but only if requested by the user. */
222 REG_L t0, TASK_TI_FLAGS(tp)
223 andi t0, t0, _TIF_SYSCALL_WORK
224 bnez t0, handle_syscall_trace_enter
226 /* Check to make sure we don't jump to a bogus syscall number. */
228 la s0, sys_ni_syscall
229 /* Syscall number held in a7 */
231 la s0, sys_call_table
232 slli t0, a7, RISCV_LGPTR
239 /* Set user a0 to kernel a0 */
241 /* Trace syscalls, but only if requested by the user. */
242 REG_L t0, TASK_TI_FLAGS(tp)
243 andi t0, t0, _TIF_SYSCALL_WORK
244 bnez t0, handle_syscall_trace_exit
247 REG_L s0, PT_SSTATUS(sp)
248 csrc CSR_SSTATUS, SR_SIE
250 bnez s0, resume_kernel
253 /* Interrupts must be disabled here so flags are checked atomically */
254 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
255 andi s1, s0, _TIF_WORK_MASK
256 bnez s1, work_pending
258 /* Save unwound kernel stack pointer in thread_info */
259 addi s0, sp, PT_SIZE_ON_STACK
260 REG_S s0, TASK_TI_KERNEL_SP(tp)
263 * Save TP into sscratch, so we can find the kernel data structures
266 csrw CSR_SSCRATCH, tp
272 #if IS_ENABLED(CONFIG_PREEMPT)
274 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
276 REG_L s0, TASK_TI_FLAGS(tp)
277 andi s0, s0, _TIF_NEED_RESCHED
279 call preempt_schedule_irq
284 /* Enter slow path for supplementary processing */
285 la ra, ret_from_exception
286 andi s1, s0, _TIF_NEED_RESCHED
287 bnez s1, work_resched
289 /* Handle pending signals and notify-resume requests */
290 csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */
291 move a0, sp /* pt_regs */
292 move a1, s0 /* current_thread_info->flags */
293 tail do_notify_resume
297 /* Slow paths for ptrace. */
298 handle_syscall_trace_enter:
300 call do_syscall_trace_enter
310 handle_syscall_trace_exit:
312 call do_syscall_trace_exit
315 END(handle_exception)
318 la ra, ret_from_exception
320 ENDPROC(ret_from_fork)
322 ENTRY(ret_from_kernel_thread)
325 la ra, ret_from_exception
328 ENDPROC(ret_from_kernel_thread)
332 * Integer register context switch
333 * The callee-saved registers must be saved and restored.
335 * a0: previous task_struct (must be preserved across the switch)
336 * a1: next task_struct
338 * The value of a0 and a1 must be preserved by this function, as that's how
339 * arguments are passed to schedule_tail.
342 /* Save context into prev->thread */
343 li a4, TASK_THREAD_RA
346 REG_S ra, TASK_THREAD_RA_RA(a3)
347 REG_S sp, TASK_THREAD_SP_RA(a3)
348 REG_S s0, TASK_THREAD_S0_RA(a3)
349 REG_S s1, TASK_THREAD_S1_RA(a3)
350 REG_S s2, TASK_THREAD_S2_RA(a3)
351 REG_S s3, TASK_THREAD_S3_RA(a3)
352 REG_S s4, TASK_THREAD_S4_RA(a3)
353 REG_S s5, TASK_THREAD_S5_RA(a3)
354 REG_S s6, TASK_THREAD_S6_RA(a3)
355 REG_S s7, TASK_THREAD_S7_RA(a3)
356 REG_S s8, TASK_THREAD_S8_RA(a3)
357 REG_S s9, TASK_THREAD_S9_RA(a3)
358 REG_S s10, TASK_THREAD_S10_RA(a3)
359 REG_S s11, TASK_THREAD_S11_RA(a3)
360 /* Restore context from next->thread */
361 REG_L ra, TASK_THREAD_RA_RA(a4)
362 REG_L sp, TASK_THREAD_SP_RA(a4)
363 REG_L s0, TASK_THREAD_S0_RA(a4)
364 REG_L s1, TASK_THREAD_S1_RA(a4)
365 REG_L s2, TASK_THREAD_S2_RA(a4)
366 REG_L s3, TASK_THREAD_S3_RA(a4)
367 REG_L s4, TASK_THREAD_S4_RA(a4)
368 REG_L s5, TASK_THREAD_S5_RA(a4)
369 REG_L s6, TASK_THREAD_S6_RA(a4)
370 REG_L s7, TASK_THREAD_S7_RA(a4)
371 REG_L s8, TASK_THREAD_S8_RA(a4)
372 REG_L s9, TASK_THREAD_S9_RA(a4)
373 REG_L s10, TASK_THREAD_S10_RA(a4)
374 REG_L s11, TASK_THREAD_S11_RA(a4)
375 /* Swap the CPU entry around. */
376 lw a3, TASK_TI_CPU(a0)
377 lw a4, TASK_TI_CPU(a1)
378 sw a3, TASK_TI_CPU(a1)
379 sw a4, TASK_TI_CPU(a0)
381 #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
391 /* Exception vector table */
392 ENTRY(excp_vect_table)
393 RISCV_PTR do_trap_insn_misaligned
394 RISCV_PTR do_trap_insn_fault
395 RISCV_PTR do_trap_insn_illegal
396 RISCV_PTR do_trap_break
397 RISCV_PTR do_trap_load_misaligned
398 RISCV_PTR do_trap_load_fault
399 RISCV_PTR do_trap_store_misaligned
400 RISCV_PTR do_trap_store_fault
401 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
402 RISCV_PTR do_trap_ecall_s
403 RISCV_PTR do_trap_unknown
404 RISCV_PTR do_trap_ecall_m
405 RISCV_PTR do_page_fault /* instruction page fault */
406 RISCV_PTR do_page_fault /* load page fault */
407 RISCV_PTR do_trap_unknown
408 RISCV_PTR do_page_fault /* store page fault */