1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/context_tracking.h>
4 #include <linux/entry-common.h>
5 #include <linux/highmem.h>
6 #include <linux/livepatch.h>
7 #include <linux/audit.h>
8 #include <linux/tick.h>
12 #define CREATE_TRACE_POINTS
13 #include <trace/events/syscalls.h>
15 /* See comment for enter_from_user_mode() in entry-common.h */
16 static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
18 arch_check_user_regs(regs);
19 lockdep_hardirqs_off(CALLER_ADDR0);
21 CT_WARN_ON(ct_state() != CONTEXT_USER);
24 instrumentation_begin();
25 trace_hardirqs_off_finish();
26 instrumentation_end();
29 void noinstr enter_from_user_mode(struct pt_regs *regs)
31 __enter_from_user_mode(regs);
34 static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
36 if (unlikely(audit_context())) {
37 unsigned long args[6];
39 syscall_get_arguments(current, regs, args);
40 audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
44 static long syscall_trace_enter(struct pt_regs *regs, long syscall,
50 * Handle Syscall User Dispatch. This must comes first, since
51 * the ABI here can be something that doesn't make sense for
52 * other syscall_work features.
54 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
55 if (syscall_user_dispatch(regs))
60 if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
61 ret = arch_syscall_enter_tracehook(regs);
62 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
66 /* Do seccomp after ptrace, to catch any tracer changes. */
67 if (work & SYSCALL_WORK_SECCOMP) {
68 ret = __secure_computing(NULL);
73 /* Either of the above might have changed the syscall number */
74 syscall = syscall_get_nr(current, regs);
76 if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
77 trace_sys_enter(regs, syscall);
79 syscall_enter_audit(regs, syscall);
81 return ret ? : syscall;
84 static __always_inline long
85 __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
87 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
89 if (work & SYSCALL_WORK_ENTER)
90 syscall = syscall_trace_enter(regs, syscall, work);
95 long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
97 return __syscall_enter_from_user_work(regs, syscall);
100 noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
104 __enter_from_user_mode(regs);
106 instrumentation_begin();
108 ret = __syscall_enter_from_user_work(regs, syscall);
109 instrumentation_end();
114 noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
116 __enter_from_user_mode(regs);
117 instrumentation_begin();
119 instrumentation_end();
122 /* See comment for exit_to_user_mode() in entry-common.h */
123 static __always_inline void __exit_to_user_mode(void)
125 instrumentation_begin();
126 trace_hardirqs_on_prepare();
127 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
128 instrumentation_end();
131 arch_exit_to_user_mode();
132 lockdep_hardirqs_on(CALLER_ADDR0);
135 void noinstr exit_to_user_mode(void)
137 __exit_to_user_mode();
140 /* Workaround to allow gradual conversion of architecture code */
141 void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { }
143 static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
145 if (ti_work & _TIF_NOTIFY_SIGNAL)
146 tracehook_notify_signal();
148 arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
151 static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
152 unsigned long ti_work)
155 * Before returning to user space ensure that all pending work
156 * items have been completed.
158 while (ti_work & EXIT_TO_USER_MODE_WORK) {
160 local_irq_enable_exit_to_user(ti_work);
162 if (ti_work & _TIF_NEED_RESCHED)
165 if (ti_work & _TIF_UPROBE)
166 uprobe_notify_resume(regs);
168 if (ti_work & _TIF_PATCH_PENDING)
169 klp_update_patch_state(current);
171 if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
172 handle_signal_work(regs, ti_work);
174 if (ti_work & _TIF_NOTIFY_RESUME) {
175 tracehook_notify_resume(regs);
176 rseq_handle_notify_resume(NULL, regs);
179 /* Architecture specific TIF work */
180 arch_exit_to_user_mode_work(regs, ti_work);
183 * Disable interrupts and reevaluate the work flags as they
184 * might have changed while interrupts and preemption was
187 local_irq_disable_exit_to_user();
189 /* Check if any of the above work has queued a deferred wakeup */
190 tick_nohz_user_enter_prepare();
192 ti_work = READ_ONCE(current_thread_info()->flags);
195 /* Return the latest work state for arch_exit_to_user_mode() */
199 static void exit_to_user_mode_prepare(struct pt_regs *regs)
201 unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
203 lockdep_assert_irqs_disabled();
205 /* Flush pending rcuog wakeup before the last need_resched() check */
206 tick_nohz_user_enter_prepare();
208 if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
209 ti_work = exit_to_user_mode_loop(regs, ti_work);
211 arch_exit_to_user_mode_prepare(regs, ti_work);
213 /* Ensure that the address limit is intact and no locks are held */
214 addr_limit_user_check();
216 lockdep_assert_irqs_disabled();
221 * If SYSCALL_EMU is set, then the only reason to report is when
222 * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
223 * instruction has been already reported in syscall_enter_from_user_mode().
225 static inline bool report_single_step(unsigned long work)
227 if (work & SYSCALL_WORK_SYSCALL_EMU)
230 return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
233 static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
238 * If the syscall was rolled back due to syscall user dispatching,
239 * then the tracers below are not invoked for the same reason as
240 * the entry side was not invoked in syscall_trace_enter(): The ABI
241 * of these syscalls is unknown.
243 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
244 if (unlikely(current->syscall_dispatch.on_dispatch)) {
245 current->syscall_dispatch.on_dispatch = false;
250 audit_syscall_exit(regs);
252 if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
253 trace_sys_exit(regs, syscall_get_return_value(current, regs));
255 step = report_single_step(work);
256 if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
257 arch_syscall_exit_tracehook(regs, step);
261 * Syscall specific exit to user mode preparation. Runs with interrupts
264 static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
266 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
267 unsigned long nr = syscall_get_nr(current, regs);
269 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
271 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
272 if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
279 * Do one-time syscall specific work. If these work items are
280 * enabled, we want to run them exactly once per syscall exit with
281 * interrupts enabled.
283 if (unlikely(work & SYSCALL_WORK_EXIT))
284 syscall_exit_work(regs, work);
287 static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
289 syscall_exit_to_user_mode_prepare(regs);
290 local_irq_disable_exit_to_user();
291 exit_to_user_mode_prepare(regs);
294 void syscall_exit_to_user_mode_work(struct pt_regs *regs)
296 __syscall_exit_to_user_mode_work(regs);
299 __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
301 instrumentation_begin();
302 __syscall_exit_to_user_mode_work(regs);
303 instrumentation_end();
304 __exit_to_user_mode();
307 noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
309 __enter_from_user_mode(regs);
312 noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
314 instrumentation_begin();
315 exit_to_user_mode_prepare(regs);
316 instrumentation_end();
317 __exit_to_user_mode();
320 noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
322 irqentry_state_t ret = {
326 if (user_mode(regs)) {
327 irqentry_enter_from_user_mode(regs);
332 * If this entry hit the idle task invoke rcu_irq_enter() whether
333 * RCU is watching or not.
335 * Interrupts can nest when the first interrupt invokes softirq
336 * processing on return which enables interrupts.
338 * Scheduler ticks in the idle task can mark quiescent state and
339 * terminate a grace period, if and only if the timer interrupt is
340 * not nested into another interrupt.
342 * Checking for rcu_is_watching() here would prevent the nesting
343 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
344 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
345 * assume that it is the first interrupt and eventually claim
346 * quiescent state and end grace periods prematurely.
348 * Unconditionally invoke rcu_irq_enter() so RCU state stays
351 * TINY_RCU does not support EQS, so let the compiler eliminate
352 * this part when enabled.
354 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
356 * If RCU is not watching then the same careful
357 * sequence vs. lockdep and tracing is required
358 * as in irqentry_enter_from_user_mode().
360 lockdep_hardirqs_off(CALLER_ADDR0);
362 instrumentation_begin();
363 trace_hardirqs_off_finish();
364 instrumentation_end();
371 * If RCU is watching then RCU only wants to check whether it needs
372 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
373 * already contains a warning when RCU is not watching, so no point
374 * in having another one here.
376 lockdep_hardirqs_off(CALLER_ADDR0);
377 instrumentation_begin();
378 rcu_irq_enter_check_tick();
379 trace_hardirqs_off_finish();
380 instrumentation_end();
385 void irqentry_exit_cond_resched(void)
387 if (!preempt_count()) {
388 /* Sanity check RCU and thread stack */
389 rcu_irq_exit_check_preempt();
390 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
391 WARN_ON_ONCE(!on_thread_stack());
393 preempt_schedule_irq();
396 #ifdef CONFIG_PREEMPT_DYNAMIC
397 DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
400 noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
402 lockdep_assert_irqs_disabled();
404 /* Check whether this returns to user mode */
405 if (user_mode(regs)) {
406 irqentry_exit_to_user_mode(regs);
407 } else if (!regs_irqs_disabled(regs)) {
409 * If RCU was not watching on entry this needs to be done
410 * carefully and needs the same ordering of lockdep/tracing
411 * and RCU as the return to user mode path.
413 if (state.exit_rcu) {
414 instrumentation_begin();
415 /* Tell the tracer that IRET will enable interrupts */
416 trace_hardirqs_on_prepare();
417 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
418 instrumentation_end();
420 lockdep_hardirqs_on(CALLER_ADDR0);
424 instrumentation_begin();
425 if (IS_ENABLED(CONFIG_PREEMPTION)) {
426 #ifdef CONFIG_PREEMPT_DYNAMIC
427 static_call(irqentry_exit_cond_resched)();
429 irqentry_exit_cond_resched();
432 /* Covers both tracing and lockdep */
434 instrumentation_end();
437 * IRQ flags state is correct already. Just tell RCU if it
438 * was not watching on entry.
445 irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
447 irqentry_state_t irq_state;
449 irq_state.lockdep = lockdep_hardirqs_enabled();
452 lockdep_hardirqs_off(CALLER_ADDR0);
453 lockdep_hardirq_enter();
456 instrumentation_begin();
457 trace_hardirqs_off_finish();
459 instrumentation_end();
464 void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
466 instrumentation_begin();
468 if (irq_state.lockdep) {
469 trace_hardirqs_on_prepare();
470 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
472 instrumentation_end();
475 lockdep_hardirq_exit();
476 if (irq_state.lockdep)
477 lockdep_hardirqs_on(CALLER_ADDR0);