1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/context_tracking.h>
5 #include <linux/compat.h>
6 #include <linux/rseq.h>
7 #include <linux/sched/debug.h> /* for show_regs */
10 #include <asm/cputime.h>
11 #include <asm/hw_irq.h>
12 #include <asm/interrupt.h>
13 #include <asm/kprobes.h>
15 #include <asm/ptrace.h>
17 #include <asm/signal.h>
18 #include <asm/switch_to.h>
19 #include <asm/syscall.h>
22 #include <asm/unistd.h>
24 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
25 unsigned long global_dbcr0[NR_CPUS];
28 #ifdef CONFIG_PPC_BOOK3S_64
29 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
30 static inline bool exit_must_hard_disable(void)
32 return static_branch_unlikely(&interrupt_exit_not_reentrant);
35 static inline bool exit_must_hard_disable(void)
42 * local irqs must be disabled. Returns false if the caller must re-enable
43 * them, check for new work, and try again.
45 * This should be called with local irqs disabled, but if they were previously
46 * enabled when the interrupt handler returns (indicating a process-context /
47 * synchronous interrupt) then irqs_enabled should be true.
49 * restartable is true then EE/RI can be left on because interrupts are handled
50 * with a restart sequence.
52 static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
54 bool must_hard_disable = (exit_must_hard_disable() || !restartable);
56 /* This must be done with RI=1 because tracing may touch vmaps */
59 if (must_hard_disable)
60 __hard_EE_RI_disable();
63 /* This pattern matches prep_irq_for_idle */
64 if (unlikely(lazy_irq_pending_nocheck())) {
65 if (must_hard_disable) {
66 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
77 static notrace void booke_load_dbcr0(void)
79 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
80 unsigned long dbcr0 = current->thread.debug.dbcr0;
82 if (likely(!(dbcr0 & DBCR0_IDM)))
86 * Check to see if the dbcr0 register is set up to debug.
87 * Use the internal debug mode bit to do this.
89 mtmsr(mfmsr() & ~MSR_DE);
90 if (IS_ENABLED(CONFIG_PPC32)) {
92 global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
94 mtspr(SPRN_DBCR0, dbcr0);
99 static notrace void check_return_regs_valid(struct pt_regs *regs)
101 #ifdef CONFIG_PPC_BOOK3S_64
102 unsigned long trap, srr0, srr1;
107 if (trap_is_scv(regs))
111 // EE in HV mode sets HSRRs like 0xea0
112 if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
117 case INTERRUPT_H_DATA_STORAGE:
123 case INTERRUPT_H_FAC_UNAVAIL:
128 validp = &local_paca->hsrr_valid;
129 if (!READ_ONCE(*validp))
132 srr0 = mfspr(SPRN_HSRR0);
133 srr1 = mfspr(SPRN_HSRR1);
138 validp = &local_paca->srr_valid;
139 if (!READ_ONCE(*validp))
142 srr0 = mfspr(SPRN_SRR0);
143 srr1 = mfspr(SPRN_SRR1);
148 if (srr0 == regs->nip && srr1 == regs->msr)
152 * A NMI / soft-NMI interrupt may have come in after we found
153 * srr_valid and before the SRRs are loaded. The interrupt then
154 * comes in and clobbers SRRs and clears srr_valid. Then we load
155 * the SRRs here and test them above and find they don't match.
157 * Test validity again after that, to catch such false positives.
159 * This test in general will have some window for false negatives
160 * and may not catch and fix all such cases if an NMI comes in
161 * later and clobbers SRRs without clearing srr_valid, but hopefully
162 * such things will get caught most of the time, statistically
163 * enough to be able to get a warning out.
165 if (!READ_ONCE(*validp))
168 if (!data_race(warned)) {
169 data_race(warned = true);
170 printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
171 printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
175 WRITE_ONCE(*validp, 0); /* fixup */
179 static notrace unsigned long
180 interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
182 unsigned long ti_flags;
185 ti_flags = read_thread_flags();
186 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
188 if (ti_flags & _TIF_NEED_RESCHED) {
192 * SIGPENDING must restore signal handler function
193 * argument GPRs, and some non-volatiles (e.g., r1).
194 * Restore all for now. This could be made lighter.
196 if (ti_flags & _TIF_SIGPENDING)
197 ret |= _TIF_RESTOREALL;
198 do_notify_resume(regs, ti_flags);
201 ti_flags = read_thread_flags();
204 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
205 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
206 unlikely((ti_flags & _TIF_RESTORE_TM))) {
207 restore_tm_state(regs);
209 unsigned long mathflags = MSR_FP;
211 if (cpu_has_feature(CPU_FTR_VSX))
212 mathflags |= MSR_VEC | MSR_VSX;
213 else if (cpu_has_feature(CPU_FTR_ALTIVEC))
214 mathflags |= MSR_VEC;
217 * If userspace MSR has all available FP bits set,
218 * then they are live and no need to restore. If not,
219 * it means the regs were given up and restore_math
220 * may decide to restore them (to avoid taking an FP
223 if ((regs->msr & mathflags) != mathflags)
228 check_return_regs_valid(regs);
231 if (!prep_irq_for_enabled_exit(true)) {
238 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
239 local_paca->tm_scratch = regs->msr;
244 account_cpu_user_exit();
246 /* Restore user access locks last */
247 kuap_user_restore(regs);
253 * This should be called after a syscall returns, with r3 the return value
254 * from the syscall. If this function returns non-zero, the system call
255 * exit assembly should additionally load all GPR registers and CTR and XER
256 * from the interrupt frame.
258 * The function graph tracer can not trace the return side of this function,
259 * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
261 notrace unsigned long syscall_exit_prepare(unsigned long r3,
262 struct pt_regs *regs,
265 unsigned long ti_flags;
266 unsigned long ret = 0;
267 bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
269 CT_WARN_ON(ct_state() == CONTEXT_USER);
271 kuap_assert_locked();
275 /* Check whether the syscall is issued inside a restartable sequence */
278 ti_flags = read_thread_flags();
280 if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
281 if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
283 regs->ccr |= 0x10000000; /* Set SO bit in CR */
287 if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
288 if (ti_flags & _TIF_RESTOREALL)
289 ret = _TIF_RESTOREALL;
292 clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags);
297 if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
298 do_syscall_trace_leave(regs);
299 ret |= _TIF_RESTOREALL;
303 ret = interrupt_exit_user_prepare_main(ret, regs);
306 regs->exit_result = ret;
313 notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
316 * This is called when detecting a soft-pending interrupt as well as
317 * an alternate-return interrupt. So we can't just have the alternate
318 * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
319 * the soft-pending case were to fix things up as well). RI might be
320 * disabled, in which case it gets re-enabled by __hard_irq_disable().
322 __hard_irq_disable();
323 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
325 #ifdef CONFIG_PPC_BOOK3S_64
326 set_kuap(AMR_KUAP_BLOCKED);
329 trace_hardirqs_off();
331 account_cpu_user_entry();
333 BUG_ON(!user_mode(regs));
335 regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
337 return regs->exit_result;
341 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
345 BUG_ON(regs_is_unrecoverable(regs));
346 BUG_ON(arch_irq_disabled_regs(regs));
347 CT_WARN_ON(ct_state() == CONTEXT_USER);
350 * We don't need to restore AMR on the way back to userspace for KUAP.
351 * AMR can only have been unlocked if we interrupted the kernel.
353 kuap_assert_locked();
357 ret = interrupt_exit_user_prepare_main(0, regs);
360 regs->exit_result = ret;
366 void preempt_schedule_irq(void);
368 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
370 unsigned long ret = 0;
372 bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
374 if (regs_is_unrecoverable(regs))
375 unrecoverable_exception(regs);
377 * CT_WARN_ON comes here via program_check_exception, so avoid
380 * Skip the assertion on PMIs on 64e to work around a problem caused
381 * by NMI PMIs incorrectly taking this interrupt return path, it's
382 * possible for this to hit after interrupt exit to user switches
383 * context to user. See also the comment in the performance monitor
384 * handler in exceptions-64e.S
386 if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
387 TRAP(regs) != INTERRUPT_PROGRAM &&
388 TRAP(regs) != INTERRUPT_PERFMON)
389 CT_WARN_ON(ct_state() == CONTEXT_USER);
391 kuap = kuap_get_and_assert_locked();
395 if (!arch_irq_disabled_regs(regs)) {
396 /* Returning to a kernel context with local irqs enabled. */
397 WARN_ON_ONCE(!(regs->msr & MSR_EE));
399 if (IS_ENABLED(CONFIG_PREEMPT)) {
400 /* Return to preemptible kernel context */
401 if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
402 if (preempt_count() == 0)
403 preempt_schedule_irq();
407 check_return_regs_valid(regs);
410 * Stack store exit can't be restarted because the interrupt
411 * stack frame might have been clobbered.
413 if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
415 * Replay pending soft-masked interrupts now. Don't
416 * just local_irq_enabe(); local_irq_disable(); because
417 * if we are returning from an asynchronous interrupt
418 * here, another one might hit after irqs are enabled,
419 * and it would exit via this same path allowing
420 * another to fire, and so on unbounded.
423 replay_soft_interrupts();
424 /* Took an interrupt, may have more exit work to do. */
429 * An interrupt may clear MSR[EE] and set this concurrently,
430 * but it will be marked pending and the exit will be retried.
431 * This leaves a racy window where MSR[EE]=0 and HARD_DIS is
432 * clear, until interrupt_exit_kernel_restart() calls
433 * hard_irq_disable(), which will set HARD_DIS again.
435 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
438 check_return_regs_valid(regs);
440 if (unlikely(stack_store))
441 __hard_EE_RI_disable();
442 #endif /* CONFIG_PPC64 */
445 if (unlikely(stack_store)) {
446 clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
450 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
451 local_paca->tm_scratch = regs->msr;
455 * 64s does not want to mfspr(SPRN_AMR) here, because this comes after
456 * mtmsr, which would cause Read-After-Write stalls. Hence, take the
457 * AMR value from the check above.
459 kuap_kernel_restore(regs, kuap);
465 notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
467 __hard_irq_disable();
468 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
470 #ifdef CONFIG_PPC_BOOK3S_64
471 set_kuap(AMR_KUAP_BLOCKED);
474 trace_hardirqs_off();
476 account_cpu_user_entry();
478 BUG_ON(!user_mode(regs));
480 regs->exit_result |= interrupt_exit_user_prepare(regs);
482 return regs->exit_result;
486 * No real need to return a value here because the stack store case does not
489 notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
491 __hard_irq_disable();
492 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
494 #ifdef CONFIG_PPC_BOOK3S_64
495 set_kuap(AMR_KUAP_BLOCKED);
498 if (regs->softe == IRQS_ENABLED)
499 trace_hardirqs_off();
501 BUG_ON(user_mode(regs));
503 return interrupt_exit_kernel_prepare(regs);