1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/x86_64/entry.S
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
9 * entry.S contains the system-call and fault low-level handling routines.
11 * Some of this is documented in Documentation/arch/x86/entry_64.rst
13 * A note on terminology:
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
15 * at the top of the kernel process stack.
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
21 #include <linux/export.h>
22 #include <linux/linkage.h>
23 #include <asm/segment.h>
24 #include <asm/cache.h>
25 #include <asm/errno.h>
26 #include <asm/asm-offsets.h>
28 #include <asm/unistd.h>
29 #include <asm/thread_info.h>
30 #include <asm/hw_irq.h>
31 #include <asm/page_types.h>
32 #include <asm/irqflags.h>
33 #include <asm/paravirt.h>
34 #include <asm/percpu.h>
37 #include <asm/pgtable_types.h>
38 #include <asm/frame.h>
39 #include <asm/trapnr.h>
40 #include <asm/nospec-branch.h>
41 #include <asm/fsgsbase.h>
42 #include <linux/err.h>
47 .section .entry.text, "ax"
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
52 * This is the only entry point used for 64-bit system calls. The
53 * hardware interface is reasonably well designed and the register to
54 * argument mapping Linux uses fits well with the registers that are
55 * available when SYSCALL is used.
57 * SYSCALL instructions can be found inlined in libc implementations as
58 * well as some other programs and libraries. There are also a handful
59 * of SYSCALL instructions in the vDSO used, for example, as a
60 * clock_gettimeofday fallback.
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
63 * then loads new ss, cs, and rip from previously programmed MSRs.
64 * rflags gets masked by a value from another MSR (so CLD and CLAC
65 * are not needed). SYSCALL does not save anything on the stack
66 * and does not change rsp.
69 * rax system call number
71 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
75 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
80 * Only called from user space.
82 * When user can change pt_regs->foo always force IRET. That is because
83 * it deals with uncanonical addresses better. SYSRET has trouble
84 * with them due to bugs in both AMD and Intel CPUs.
87 SYM_CODE_START(entry_SYSCALL_64)
92 /* tss.sp2 is scratch space. */
93 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
94 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
95 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
97 SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
100 /* Construct struct pt_regs on stack */
101 pushq $__USER_DS /* pt_regs->ss */
102 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
103 pushq %r11 /* pt_regs->flags */
104 pushq $__USER_CS /* pt_regs->cs */
105 pushq %rcx /* pt_regs->ip */
106 SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
107 pushq %rax /* pt_regs->orig_ax */
109 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
113 /* Sign extend the lower 32bit as syscall numbers are treated as int */
116 /* clobbers %rax, make sure it is after saving the syscall nr */
121 call do_syscall_64 /* returns with IRQs disabled */
124 * Try to use SYSRET instead of IRET if we're returning to
125 * a completely clean 64-bit userspace context. If we're not,
126 * go to the slow exit path.
127 * In the Xen PV case we must use iret anyway.
130 ALTERNATIVE "testb %al, %al; jz swapgs_restore_regs_and_return_to_usermode", \
131 "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
134 * We win! This label is here just for ease of understanding
135 * perf profiles. Nothing jumps here.
137 syscall_return_via_sysret:
142 * Now all regs are restored except RSP and RDI.
143 * Save old stack pointer and switch to trampoline stack.
146 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
147 UNWIND_HINT_END_OF_STACK
149 pushq RSP-RDI(%rdi) /* RSP */
150 pushq (%rdi) /* RDI */
153 * We are on the trampoline stack. All regs except RDI are live.
154 * We can do future final exit work right here.
156 STACKLEAK_ERASE_NOCLOBBER
158 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
162 SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
167 SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
170 SYM_CODE_END(entry_SYSCALL_64)
176 .pushsection .text, "ax"
177 SYM_FUNC_START(__switch_to_asm)
179 * Save callee-saved registers
180 * This must match the order in inactive_task_frame
190 movq %rsp, TASK_threadsp(%rdi)
191 movq TASK_threadsp(%rsi), %rsp
193 #ifdef CONFIG_STACKPROTECTOR
194 movq TASK_stack_canary(%rsi), %rbx
195 movq %rbx, PER_CPU_VAR(fixed_percpu_data) + FIXED_stack_canary
199 * When switching from a shallower to a deeper call stack
200 * the RSB may either underflow or use entries populated
201 * with userspace addresses. On CPUs where those concerns
202 * exist, overwrite the RSB with entries which capture
203 * speculative execution to prevent attack.
205 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
207 /* restore callee-saved registers */
216 SYM_FUNC_END(__switch_to_asm)
220 * A newly forked process directly context switches into this address.
222 * rax: prev task we switched from
223 * rbx: kernel thread func (NULL for user thread)
224 * r12: kernel thread arg
226 .pushsection .text, "ax"
227 SYM_CODE_START(ret_from_fork_asm)
229 * This is the start of the kernel stack; even through there's a
230 * register set at the top, the regset isn't necessarily coherent
231 * (consider kthreads) and one cannot unwind further.
233 * This ensures stack unwinds of kernel threads terminate in a known
236 UNWIND_HINT_END_OF_STACK
237 ANNOTATE_NOENDBR // copy_thread
240 movq %rax, %rdi /* prev */
241 movq %rsp, %rsi /* regs */
242 movq %rbx, %rdx /* fn */
243 movq %r12, %rcx /* fn_arg */
247 * Set the stack state to what is expected for the target function
248 * -- at this point the register set should be a valid user set
249 * and unwind should work normally.
252 jmp swapgs_restore_regs_and_return_to_usermode
253 SYM_CODE_END(ret_from_fork_asm)
256 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
257 #ifdef CONFIG_DEBUG_ENTRY
260 testl $X86_EFLAGS_IF, %eax
268 SYM_CODE_START(xen_error_entry)
271 PUSH_AND_CLEAR_REGS save_ret=1
272 ENCODE_FRAME_POINTER 8
273 UNTRAIN_RET_FROM_CALL
275 SYM_CODE_END(xen_error_entry)
278 * idtentry_body - Macro to emit code calling the C function
279 * @cfunc: C function to be called
280 * @has_error_code: Hardware pushed error code on stack
282 .macro idtentry_body cfunc has_error_code:req
285 * Call error_entry() and switch to the task stack if from userspace.
287 * When in XENPV, it is already in the task stack, and it can't fault
288 * for native_iret() nor native_load_gs_index() since XENPV uses its
289 * own pvops for IRET and load_gs_index(). And it doesn't need to
290 * switch the CR3. So it can skip invoking error_entry().
292 ALTERNATIVE "call error_entry; movq %rax, %rsp", \
293 "call xen_error_entry", X86_FEATURE_XENPV
298 movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
300 .if \has_error_code == 1
301 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
302 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
307 /* For some configurations \cfunc ends up being a noreturn. */
314 * idtentry - Macro to generate entry stubs for simple IDT entries
315 * @vector: Vector number
316 * @asmsym: ASM symbol for the entry point
317 * @cfunc: C function to be called
318 * @has_error_code: Hardware pushed error code on stack
320 * The macro emits code to set up the kernel context for straight forward
321 * and simple IDT entries. No IST stack, no paranoid entry checks.
323 .macro idtentry vector asmsym cfunc has_error_code:req
324 SYM_CODE_START(\asmsym)
326 .if \vector == X86_TRAP_BP
327 /* #BP advances %rip to the next instruction */
328 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 signal=0
330 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8
337 .if \has_error_code == 0
338 pushq $-1 /* ORIG_RAX: no syscall to restart */
341 .if \vector == X86_TRAP_BP
343 * If coming from kernel space, create a 6-word gap to allow the
344 * int3 handler to emulate a call instruction.
346 testb $3, CS-ORIG_RAX(%rsp)
347 jnz .Lfrom_usermode_no_gap_\@
351 UNWIND_HINT_IRET_REGS offset=8
352 .Lfrom_usermode_no_gap_\@:
355 idtentry_body \cfunc \has_error_code
357 _ASM_NOKPROBE(\asmsym)
358 SYM_CODE_END(\asmsym)
362 * Interrupt entry/exit.
364 + The interrupt stubs push (vector) onto the stack, which is the error_code
365 * position of idtentry exceptions, and jump to one of the two idtentry points
368 * common_interrupt is a hotpath, align it to a cache line
370 .macro idtentry_irq vector cfunc
371 .p2align CONFIG_X86_L1_CACHE_SHIFT
372 idtentry \vector asm_\cfunc \cfunc has_error_code=1
376 * System vectors which invoke their handlers directly and are not
377 * going through the regular common device interrupt handling code.
379 .macro idtentry_sysvec vector cfunc
380 idtentry \vector asm_\cfunc \cfunc has_error_code=0
384 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
385 * @vector: Vector number
386 * @asmsym: ASM symbol for the entry point
387 * @cfunc: C function to be called
389 * The macro emits code to set up the kernel context for #MC and #DB
391 * If the entry comes from user space it uses the normal entry path
392 * including the return to user space work and preemption checks on
395 * If hits in kernel mode then it needs to go through the paranoid
396 * entry as the exception can hit any random state. No preemption
397 * check on exit to keep the paranoid path simple.
399 .macro idtentry_mce_db vector asmsym cfunc
400 SYM_CODE_START(\asmsym)
401 UNWIND_HINT_IRET_ENTRY
406 pushq $-1 /* ORIG_RAX: no syscall to restart */
409 * If the entry is from userspace, switch stacks and treat it as
412 testb $3, CS-ORIG_RAX(%rsp)
413 jnz .Lfrom_usermode_switch_stack_\@
415 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
420 movq %rsp, %rdi /* pt_regs pointer */
426 /* Switch to the regular task stack and use the noist entry point */
427 .Lfrom_usermode_switch_stack_\@:
428 idtentry_body noist_\cfunc, has_error_code=0
430 _ASM_NOKPROBE(\asmsym)
431 SYM_CODE_END(\asmsym)
434 #ifdef CONFIG_AMD_MEM_ENCRYPT
436 * idtentry_vc - Macro to generate entry stub for #VC
437 * @vector: Vector number
438 * @asmsym: ASM symbol for the entry point
439 * @cfunc: C function to be called
441 * The macro emits code to set up the kernel context for #VC. The #VC handler
442 * runs on an IST stack and needs to be able to cause nested #VC exceptions.
444 * To make this work the #VC entry code tries its best to pretend it doesn't use
445 * an IST stack by switching to the task stack if coming from user-space (which
446 * includes early SYSCALL entry path) or back to the stack in the IRET frame if
447 * entered from kernel-mode.
449 * If entered from kernel-mode the return stack is validated first, and if it is
450 * not safe to use (e.g. because it points to the entry stack) the #VC handler
451 * will switch to a fall-back stack (VC2) and call a special handler function.
453 * The macro is only used for one vector, but it is planned to be extended in
454 * the future for the #HV exception.
456 .macro idtentry_vc vector asmsym cfunc
457 SYM_CODE_START(\asmsym)
458 UNWIND_HINT_IRET_ENTRY
464 * If the entry is from userspace, switch stacks and treat it as
467 testb $3, CS-ORIG_RAX(%rsp)
468 jnz .Lfrom_usermode_switch_stack_\@
471 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
472 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
479 * Switch off the IST stack to make it free for nested exceptions. The
480 * vc_switch_off_ist() function will switch back to the interrupted
481 * stack if it is safe to do so. If not it switches to the VC fall-back
484 movq %rsp, %rdi /* pt_regs pointer */
485 call vc_switch_off_ist
486 movq %rax, %rsp /* Switch to new stack */
492 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
493 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
495 movq %rsp, %rdi /* pt_regs pointer */
500 * No need to switch back to the IST stack. The current stack is either
501 * identical to the stack in the IRET frame or the VC fall-back stack,
502 * so it is definitely mapped even with PTI enabled.
506 /* Switch to the regular task stack */
507 .Lfrom_usermode_switch_stack_\@:
508 idtentry_body user_\cfunc, has_error_code=1
510 _ASM_NOKPROBE(\asmsym)
511 SYM_CODE_END(\asmsym)
516 * Double fault entry. Straight paranoid. No checks from which context
517 * this comes because for the espfix induced #DF this would do the wrong
520 .macro idtentry_df vector asmsym cfunc
521 SYM_CODE_START(\asmsym)
522 UNWIND_HINT_IRET_ENTRY offset=8
527 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
531 movq %rsp, %rdi /* pt_regs pointer into first argument */
532 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
533 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
536 /* For some configurations \cfunc ends up being a noreturn. */
541 _ASM_NOKPROBE(\asmsym)
542 SYM_CODE_END(\asmsym)
546 * Include the defines which emit the idt entries which are shared
547 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
548 * so the stacktrace boundary checks work.
551 .globl __irqentry_text_start
552 __irqentry_text_start:
554 #include <asm/idtentry.h>
557 .globl __irqentry_text_end
561 SYM_CODE_START_LOCAL(common_interrupt_return)
562 SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
565 ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
567 #ifdef CONFIG_PAGE_TABLE_ISOLATION
568 ALTERNATIVE "", "jmp .Lpti_restore_regs_and_return_to_usermode", X86_FEATURE_PTI
573 add $8, %rsp /* orig_ax */
574 UNWIND_HINT_IRET_REGS
579 /* Assert that the IRET frame indicates user mode. */
584 #ifdef CONFIG_PAGE_TABLE_ISOLATION
585 .Lpti_restore_regs_and_return_to_usermode:
589 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
590 * Save old stack pointer and switch to trampoline stack.
593 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
594 UNWIND_HINT_END_OF_STACK
596 /* Copy the IRET frame to the trampoline stack. */
597 pushq 6*8(%rdi) /* SS */
598 pushq 5*8(%rdi) /* RSP */
599 pushq 4*8(%rdi) /* EFLAGS */
600 pushq 3*8(%rdi) /* CS */
601 pushq 2*8(%rdi) /* RIP */
603 /* Push user RDI on the trampoline stack. */
607 * We are on the trampoline stack. All regs except RDI are live.
608 * We can do future final exit work right here.
610 STACKLEAK_ERASE_NOCLOBBER
613 SWITCH_TO_USER_CR3 scratch_reg=%rdi scratch_reg2=%rax
618 jmp .Lswapgs_and_iret
621 SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
622 #ifdef CONFIG_DEBUG_ENTRY
623 /* Assert that pt_regs indicates kernel mode. */
630 addq $8, %rsp /* skip regs->orig_ax */
632 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
633 * when returning from IPI handler.
636 SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
639 .long .Lnative_iret - (. + 4)
643 UNWIND_HINT_IRET_REGS
645 * Are we returning to a stack segment from the LDT? Note: in
646 * 64-bit mode SS:RSP on the exception stack is always valid.
648 #ifdef CONFIG_X86_ESPFIX64
649 testb $4, (SS-RIP)(%rsp)
650 jnz native_irq_return_ldt
653 SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
654 ANNOTATE_NOENDBR // exc_double_fault
656 * This may fault. Non-paranoid faults on return to userspace are
657 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
658 * Double-faults due to espfix64 are handled in exc_double_fault.
659 * Other faults here are fatal.
663 #ifdef CONFIG_X86_ESPFIX64
664 native_irq_return_ldt:
666 * We are running with user GSBASE. All GPRs contain their user
667 * values. We have a percpu ESPFIX stack that is eight slots
668 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
669 * of the ESPFIX stack.
671 * We clobber RAX and RDI in this code. We stash RDI on the
672 * normal stack and RAX on the ESPFIX stack.
674 * The ESPFIX stack layout we set up looks like this:
676 * --- top of ESPFIX stack ---
681 * RIP <-- RSP points here when we're done
682 * RAX <-- espfix_waddr points here
683 * --- bottom of ESPFIX stack ---
686 pushq %rdi /* Stash user RDI */
687 swapgs /* to kernel GS */
688 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
690 movq PER_CPU_VAR(espfix_waddr), %rdi
691 movq %rax, (0*8)(%rdi) /* user RAX */
692 movq (1*8)(%rsp), %rax /* user RIP */
693 movq %rax, (1*8)(%rdi)
694 movq (2*8)(%rsp), %rax /* user CS */
695 movq %rax, (2*8)(%rdi)
696 movq (3*8)(%rsp), %rax /* user RFLAGS */
697 movq %rax, (3*8)(%rdi)
698 movq (5*8)(%rsp), %rax /* user SS */
699 movq %rax, (5*8)(%rdi)
700 movq (4*8)(%rsp), %rax /* user RSP */
701 movq %rax, (4*8)(%rdi)
702 /* Now RAX == RSP. */
704 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
707 * espfix_stack[31:16] == 0. The page tables are set up such that
708 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
709 * espfix_waddr for any X. That is, there are 65536 RO aliases of
710 * the same page. Set up RSP so that RSP[31:16] contains the
711 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
712 * still points to an RO alias of the ESPFIX stack.
714 orq PER_CPU_VAR(espfix_stack), %rax
716 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
717 swapgs /* to user GS */
718 popq %rdi /* Restore user RDI */
721 UNWIND_HINT_IRET_REGS offset=8
724 * At this point, we cannot write to the stack any more, but we can
727 popq %rax /* Restore user RAX */
732 * RSP now points to an ordinary IRET frame, except that the page
733 * is read-only and RSP[31:16] are preloaded with the userspace
734 * values. We can now IRET back to userspace.
736 jmp native_irq_return_iret
738 SYM_CODE_END(common_interrupt_return)
739 _ASM_NOKPROBE(common_interrupt_return)
742 * Reload gs selector with exception handling
745 * Is in entry.text as it shouldn't be instrumented.
747 SYM_FUNC_START(asm_load_gs_index)
751 ANNOTATE_NOENDBR // error_entry
753 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
758 /* running with kernelgs */
760 swapgs /* switch back to user gs */
762 /* This can't be a string because the preprocessor needs to see it. */
763 movl $__USER_DS, %eax
766 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
771 _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
773 SYM_FUNC_END(asm_load_gs_index)
774 EXPORT_SYMBOL(asm_load_gs_index)
778 * A note on the "critical region" in our callback handler.
779 * We want to avoid stacking callback handlers due to events occurring
780 * during handling of the last event. To do this, we keep events disabled
781 * until we've done all processing. HOWEVER, we must enable events before
782 * popping the stack frame (can't be done atomically) and so it would still
783 * be possible to get enough handler activations to overflow the stack.
784 * Although unlikely, bugs of that kind are hard to track down, so we'd
785 * like to avoid the possibility.
786 * So, on entry to the handler we detect whether we interrupted an
787 * existing activation in its critical region -- if so, we pop the current
788 * activation and restart the handler using the previous one.
790 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
793 SYM_CODE_START_LOCAL_NOALIGN(exc_xen_hypervisor_callback)
796 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
797 * see the correct pointer to the pt_regs
800 movq %rdi, %rsp /* we don't return, adjust the stack frame */
803 call xen_pv_evtchn_do_upcall
806 SYM_CODE_END(exc_xen_hypervisor_callback)
809 * Hypervisor uses this for application faults while it executes.
810 * We get here for two reasons:
811 * 1. Fault while reloading DS, ES, FS or GS
812 * 2. Fault while executing IRET
813 * Category 1 we do not need to fix up as Xen has already reloaded all segment
814 * registers that could be reloaded and zeroed the others.
815 * Category 2 we fix up by killing the current process. We cannot use the
816 * normal Linux return path in this case because if we use the IRET hypercall
817 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
818 * We distinguish between categories by comparing each saved segment register
819 * with its current contents: any discrepancy means we in category 1.
822 SYM_CODE_START_NOALIGN(xen_failsafe_callback)
823 UNWIND_HINT_UNDEFINED
837 /* All segments match their saved values => Category 2 (Bad IRET). */
842 UNWIND_HINT_IRET_REGS offset=8
843 jmp asm_exc_general_protection
844 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
848 UNWIND_HINT_IRET_REGS
849 pushq $-1 /* orig_ax = -1 => not a system call */
853 SYM_CODE_END(xen_failsafe_callback)
854 #endif /* CONFIG_XEN_PV */
857 * Save all registers in pt_regs. Return GSBASE related information
858 * in EBX depending on the availability of the FSGSBASE instructions:
861 * N 0 -> SWAPGS on exit
862 * 1 -> no SWAPGS on exit
864 * Y GSBASE value at entry, must be restored in paranoid_exit
867 * R15 - old SPEC_CTRL
869 SYM_CODE_START(paranoid_entry)
872 PUSH_AND_CLEAR_REGS save_ret=1
873 ENCODE_FRAME_POINTER 8
876 * Always stash CR3 in %r14. This value will be restored,
877 * verbatim, at exit. Needed if paranoid_entry interrupted
878 * another entry that already switched to the user CR3 value
879 * but has not yet returned to userspace.
881 * This is also why CS (stashed in the "iret frame" by the
882 * hardware at entry) can not be used: this may be a return
883 * to kernel code, but with a user CR3 value.
885 * Switching CR3 does not depend on kernel GSBASE so it can
886 * be done before switching to the kernel GSBASE. This is
887 * required for FSGSBASE because the kernel GSBASE has to
888 * be retrieved from a kernel internal table.
890 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
893 * Handling GSBASE depends on the availability of FSGSBASE.
895 * Without FSGSBASE the kernel enforces that negative GSBASE
896 * values indicate kernel GSBASE. With FSGSBASE no assumptions
897 * can be made about the GSBASE value when entering from user
900 ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
903 * Read the current GSBASE and store it in %rbx unconditionally,
904 * retrieve and set the current CPUs kernel GSBASE. The stored value
905 * has to be restored in paranoid_exit unconditionally.
907 * The unconditional write to GS base below ensures that no subsequent
908 * loads based on a mispredicted GS base can happen, therefore no LFENCE
911 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
912 jmp .Lparanoid_gsbase_done
914 .Lparanoid_entry_checkgs:
915 /* EBX = 1 -> kernel GSBASE active, no restore required */
919 * The kernel-enforced convention is a negative GSBASE indicates
920 * a kernel value. No SWAPGS needed on entry and exit.
922 movl $MSR_GS_BASE, %ecx
925 js .Lparanoid_kernel_gsbase
927 /* EBX = 0 -> SWAPGS required on exit */
930 .Lparanoid_kernel_gsbase:
931 FENCE_SWAPGS_KERNEL_ENTRY
932 .Lparanoid_gsbase_done:
935 * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
936 * CR3 above, keep the old value in a callee saved register.
938 IBRS_ENTER save_reg=%r15
939 UNTRAIN_RET_FROM_CALL
942 SYM_CODE_END(paranoid_entry)
945 * "Paranoid" exit path from exception stack. This is invoked
946 * only on return from non-NMI IST interrupts that came
949 * We may be returning to very strange contexts (e.g. very early
950 * in syscall entry), so checking for preemption here would
951 * be complicated. Fortunately, there's no good reason to try
952 * to handle preemption here.
954 * R/EBX contains the GSBASE related information depending on the
955 * availability of the FSGSBASE instructions:
958 * N 0 -> SWAPGS on exit
959 * 1 -> no SWAPGS on exit
961 * Y User space GSBASE, must be restored unconditionally
964 * R15 - old SPEC_CTRL
966 SYM_CODE_START_LOCAL(paranoid_exit)
970 * Must restore IBRS state before both CR3 and %GS since we need access
971 * to the per-CPU x86_spec_ctrl_shadow variable.
973 IBRS_EXIT save_reg=%r15
976 * The order of operations is important. RESTORE_CR3 requires
979 * NB to anyone to try to optimize this code: this code does
980 * not execute at all for exceptions from user mode. Those
981 * exceptions go through error_return instead.
983 RESTORE_CR3 scratch_reg=%rax save_reg=%r14
985 /* Handle the three GSBASE cases */
986 ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
988 /* With FSGSBASE enabled, unconditionally restore GSBASE */
990 jmp restore_regs_and_return_to_kernel
992 .Lparanoid_exit_checkgs:
993 /* On non-FSGSBASE systems, conditionally do SWAPGS */
995 jnz restore_regs_and_return_to_kernel
997 /* We are returning to a context with user GSBASE */
999 jmp restore_regs_and_return_to_kernel
1000 SYM_CODE_END(paranoid_exit)
1003 * Switch GS and CR3 if needed.
1005 SYM_CODE_START(error_entry)
1009 PUSH_AND_CLEAR_REGS save_ret=1
1010 ENCODE_FRAME_POINTER 8
1012 testb $3, CS+8(%rsp)
1013 jz .Lerror_kernelspace
1016 * We entered from user mode or we're pretending to have entered
1017 * from user mode due to an IRET fault.
1020 FENCE_SWAPGS_USER_ENTRY
1021 /* We have user CR3. Change to kernel CR3. */
1022 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1024 UNTRAIN_RET_FROM_CALL
1026 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1027 /* Put us onto the real thread stack. */
1031 * There are two places in the kernel that can potentially fault with
1032 * usergs. Handle them here. B stepping K8s sometimes report a
1033 * truncated RIP for IRET exceptions returning to compat mode. Check
1034 * for these here too.
1036 .Lerror_kernelspace:
1037 leaq native_irq_return_iret(%rip), %rcx
1038 cmpq %rcx, RIP+8(%rsp)
1040 movl %ecx, %eax /* zero extend */
1041 cmpq %rax, RIP+8(%rsp)
1043 cmpq $.Lgs_change, RIP+8(%rsp)
1044 jne .Lerror_entry_done_lfence
1047 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
1048 * gsbase and proceed. We'll fix up the exception and land in
1049 * .Lgs_change's error handler with kernel gsbase.
1054 * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
1055 * kernel or user gsbase.
1057 .Lerror_entry_done_lfence:
1058 FENCE_SWAPGS_KERNEL_ENTRY
1060 leaq 8(%rsp), %rax /* return pt_regs pointer */
1065 /* Fix truncated RIP */
1066 movq %rcx, RIP+8(%rsp)
1071 * We came from an IRET to user mode, so we have user
1072 * gsbase and CR3. Switch to kernel gsbase and CR3:
1075 FENCE_SWAPGS_USER_ENTRY
1076 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1078 UNTRAIN_RET_FROM_CALL
1081 * Pretend that the exception came from user mode: set up pt_regs
1082 * as if we faulted immediately after IRET.
1084 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1088 SYM_CODE_END(error_entry)
1090 SYM_CODE_START_LOCAL(error_return)
1092 DEBUG_ENTRY_ASSERT_IRQS_OFF
1094 jz restore_regs_and_return_to_kernel
1095 jmp swapgs_restore_regs_and_return_to_usermode
1096 SYM_CODE_END(error_return)
1099 * Runs on exception stack. Xen PV does not go through this path at all,
1100 * so we can use real assembly here.
1103 * %r14: Used to save/restore the CR3 of the interrupted context
1104 * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
1106 SYM_CODE_START(asm_exc_nmi)
1107 UNWIND_HINT_IRET_ENTRY
1111 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1112 * the iretq it performs will take us out of NMI context.
1113 * This means that we can have nested NMIs where the next
1114 * NMI is using the top of the stack of the previous NMI. We
1115 * can't let it execute because the nested NMI will corrupt the
1116 * stack of the previous NMI. NMI handlers are not re-entrant
1119 * To handle this case we do the following:
1120 * Check a special location on the stack that contains a
1121 * variable that is set when NMIs are executing.
1122 * The interrupted task's stack is also checked to see if it
1124 * If the variable is not set and the stack is not the NMI
1126 * o Set the special variable on the stack
1127 * o Copy the interrupt frame into an "outermost" location on the
1129 * o Copy the interrupt frame into an "iret" location on the stack
1130 * o Continue processing the NMI
1131 * If the variable is set or the previous stack is the NMI stack:
1132 * o Modify the "iret" location to jump to the repeat_nmi
1133 * o return back to the first NMI
1135 * Now on exit of the first NMI, we first clear the stack variable
1136 * The NMI stack will tell any nested NMIs at that point that it is
1137 * nested. Then we pop the stack normally with iret, and if there was
1138 * a nested NMI that updated the copy interrupt stack frame, a
1139 * jump will be made to the repeat_nmi code that will handle the second
1142 * However, espfix prevents us from directly returning to userspace
1143 * with a single IRET instruction. Similarly, IRET to user mode
1144 * can fault. We therefore handle NMIs from user space like
1145 * other IST entries.
1151 /* Use %rdx as our temp variable throughout */
1154 testb $3, CS-RIP+8(%rsp)
1155 jz .Lnmi_from_kernel
1158 * NMI from user mode. We need to run on the thread stack, but we
1159 * can't go through the normal entry paths: NMIs are masked, and
1160 * we don't want to enable interrupts, because then we'll end
1161 * up in an awkward situation in which IRQs are on but NMIs
1164 * We also must not push anything to the stack before switching
1165 * stacks lest we corrupt the "NMI executing" variable.
1169 FENCE_SWAPGS_USER_ENTRY
1170 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1172 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
1173 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1174 pushq 5*8(%rdx) /* pt_regs->ss */
1175 pushq 4*8(%rdx) /* pt_regs->rsp */
1176 pushq 3*8(%rdx) /* pt_regs->flags */
1177 pushq 2*8(%rdx) /* pt_regs->cs */
1178 pushq 1*8(%rdx) /* pt_regs->rip */
1179 UNWIND_HINT_IRET_REGS
1180 pushq $-1 /* pt_regs->orig_ax */
1181 PUSH_AND_CLEAR_REGS rdx=(%rdx)
1182 ENCODE_FRAME_POINTER
1188 * At this point we no longer need to worry about stack damage
1189 * due to nesting -- we're on the normal thread stack and we're
1190 * done with the NMI stack.
1197 * Return back to user mode. We must *not* do the normal exit
1198 * work, because we don't want to enable interrupts.
1200 jmp swapgs_restore_regs_and_return_to_usermode
1204 * Here's what our stack frame will look like:
1205 * +---------------------------------------------------------+
1207 * | original Return RSP |
1208 * | original RFLAGS |
1211 * +---------------------------------------------------------+
1212 * | temp storage for rdx |
1213 * +---------------------------------------------------------+
1214 * | "NMI executing" variable |
1215 * +---------------------------------------------------------+
1216 * | iret SS } Copied from "outermost" frame |
1217 * | iret Return RSP } on each loop iteration; overwritten |
1218 * | iret RFLAGS } by a nested NMI to force another |
1219 * | iret CS } iteration if needed. |
1221 * +---------------------------------------------------------+
1222 * | outermost SS } initialized in first_nmi; |
1223 * | outermost Return RSP } will not be changed before |
1224 * | outermost RFLAGS } NMI processing is done. |
1225 * | outermost CS } Copied to "iret" frame on each |
1226 * | outermost RIP } iteration. |
1227 * +---------------------------------------------------------+
1229 * +---------------------------------------------------------+
1231 * The "original" frame is used by hardware. Before re-enabling
1232 * NMIs, we need to be done with it, and we need to leave enough
1233 * space for the asm code here.
1235 * We return by executing IRET while RSP points to the "iret" frame.
1236 * That will either return for real or it will loop back into NMI
1239 * The "outermost" frame is copied to the "iret" frame on each
1240 * iteration of the loop, so each iteration starts with the "iret"
1241 * frame pointing to the final return target.
1245 * Determine whether we're a nested NMI.
1247 * If we interrupted kernel code between repeat_nmi and
1248 * end_repeat_nmi, then we are a nested NMI. We must not
1249 * modify the "iret" frame because it's being written by
1250 * the outer NMI. That's okay; the outer NMI handler is
1251 * about to call exc_nmi() anyway, so we can just resume
1255 movq $repeat_nmi, %rdx
1258 movq $end_repeat_nmi, %rdx
1264 * Now check "NMI executing". If it's set, then we're nested.
1265 * This will not detect if we interrupted an outer NMI just
1272 * Now test if the previous stack was an NMI stack. This covers
1273 * the case where we interrupt an outer NMI after it clears
1274 * "NMI executing" but before IRET. We need to be careful, though:
1275 * there is one case in which RSP could point to the NMI stack
1276 * despite there being no NMI active: naughty userspace controls
1277 * RSP at the very beginning of the SYSCALL targets. We can
1278 * pull a fast one on naughty userspace, though: we program
1279 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1280 * if it controls the kernel's RSP. We set DF before we clear
1284 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1285 cmpq %rdx, 4*8(%rsp)
1286 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1289 subq $EXCEPTION_STKSZ, %rdx
1290 cmpq %rdx, 4*8(%rsp)
1291 /* If it is below the NMI stack, it is a normal NMI */
1294 /* Ah, it is within the NMI stack. */
1296 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1297 jz first_nmi /* RSP was user controlled. */
1299 /* This is a nested NMI. */
1303 * Modify the "iret" frame to point to repeat_nmi, forcing another
1304 * iteration of NMI handling.
1307 leaq -10*8(%rsp), %rdx
1314 /* Put stack back */
1320 /* We are returning to kernel mode, so this cannot result in a fault. */
1327 /* Make room for "NMI executing". */
1330 /* Leave room for the "iret" frame */
1333 /* Copy the "original" frame to the "outermost" frame */
1337 UNWIND_HINT_IRET_REGS
1339 /* Everything up to here is safe from nested NMIs */
1341 #ifdef CONFIG_DEBUG_ENTRY
1343 * For ease of testing, unmask NMIs right away. Disabled by
1344 * default because IRET is very expensive.
1347 pushq %rsp /* RSP (minus 8 because of the previous push) */
1348 addq $8, (%rsp) /* Fix up RSP */
1350 pushq $__KERNEL_CS /* CS */
1352 iretq /* continues at repeat_nmi below */
1353 UNWIND_HINT_IRET_REGS
1358 ANNOTATE_NOENDBR // this code
1360 * If there was a nested NMI, the first NMI's iret will return
1361 * here. But NMIs are still enabled and we can take another
1362 * nested NMI. The nested NMI checks the interrupted RIP to see
1363 * if it is between repeat_nmi and end_repeat_nmi, and if so
1364 * it will just return, as we are about to repeat an NMI anyway.
1365 * This makes it safe to copy to the stack frame that a nested
1368 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1369 * we're repeating an NMI, gsbase has the same value that it had on
1370 * the first iteration. paranoid_entry will load the kernel
1371 * gsbase if needed before we call exc_nmi(). "NMI executing"
1374 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1377 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1378 * here must not modify the "iret" frame while we're writing to
1379 * it or it will end up containing garbage.
1387 ANNOTATE_NOENDBR // this code
1390 * Everything below this point can be preempted by a nested NMI.
1391 * If this happens, then the inner NMI will change the "iret"
1392 * frame to point back to repeat_nmi.
1394 pushq $-1 /* ORIG_RAX: no syscall to restart */
1397 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1398 * as we should not be calling schedule in NMI context.
1399 * Even with normal interrupts enabled. An NMI should not be
1400 * setting NEED_RESCHED or anything that normal interrupts and
1401 * exceptions might do.
1409 /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
1410 IBRS_EXIT save_reg=%r15
1412 /* Always restore stashed CR3 value (see paranoid_entry) */
1413 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1416 * The above invocation of paranoid_entry stored the GSBASE
1417 * related information in R/EBX depending on the availability
1420 * If FSGSBASE is enabled, restore the saved GSBASE value
1421 * unconditionally, otherwise take the conditional SWAPGS path.
1423 ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
1429 /* EBX == 0 -> invoke SWAPGS */
1440 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1441 * at the "iret" frame.
1446 * Clear "NMI executing". Set DF first so that we can easily
1447 * distinguish the remaining code between here and IRET from
1448 * the SYSCALL entry and exit paths.
1450 * We arguably should just inspect RIP instead, but I (Andy) wrote
1451 * this code when I had the misapprehension that Xen PV supported
1452 * NMIs, and Xen PV would break that approach.
1455 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1458 * Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
1459 * NMI in kernel after user state is restored. For an unprivileged user
1460 * these conditions are hard to meet.
1464 * iretq reads the "iret" frame and exits the NMI stack in a
1465 * single instruction. We are returning to kernel mode, so this
1466 * cannot result in a fault. Similarly, we don't need to worry
1467 * about espfix64 on the way back to kernel mode.
1470 SYM_CODE_END(asm_exc_nmi)
1473 * This handles SYSCALL from 32-bit code. There is no way to program
1474 * MSRs to fully disable 32-bit SYSCALL.
1476 SYM_CODE_START(entry_SYSCALL32_ignore)
1477 UNWIND_HINT_END_OF_STACK
1482 SYM_CODE_END(entry_SYSCALL32_ignore)
1484 .pushsection .text, "ax"
1486 SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
1488 /* Prevent any naive code from trying to unwind to our caller. */
1491 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax
1492 leaq -PTREGS_SIZE(%rax), %rsp
1496 SYM_CODE_END(rewind_stack_and_make_dead)
1500 * This sequence executes branches in order to remove user branch information
1501 * from the branch history tracker in the Branch Predictor, therefore removing
1502 * user influence on subsequent BTB lookups.
1504 * It should be used on parts prior to Alder Lake. Newer parts should use the
1505 * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
1506 * virtualized on newer hardware the VMM should protect against BHI attacks by
1507 * setting BHI_DIS_S for the guests.
1509 * CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging
1510 * and not clearing the branch history. The call tree looks like:
1525 * This means that the stack is non-constant and ORC can't unwind it with %rsp
1526 * alone. Therefore we unconditionally set up the frame pointer, which allows
1527 * ORC to unwind properly.
1529 * The alignment is for performance and not for safety, and may be safely
1530 * refactored in the future if needed.
1532 SYM_FUNC_START(clear_bhb_loop)
1536 ANNOTATE_INTRA_FUNCTION_CALL
1540 ANNOTATE_INTRA_FUNCTION_CALL
1555 SYM_FUNC_END(clear_bhb_loop)
1556 EXPORT_SYMBOL_GPL(clear_bhb_loop)
1557 STACK_FRAME_NON_STANDARD(clear_bhb_loop)