2 * Copyright (C) 1991,1992 Linus Torvalds
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 * Stack layout while running C code:
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
38 #include <asm/page_types.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/ftrace.h>
42 #include <asm/irq_vectors.h>
43 #include <asm/cpufeatures.h>
44 #include <asm/alternative-asm.h>
47 #include <asm/nospec-branch.h>
49 .section .entry.text, "ax"
52 * We use macros for low-level operations which need to be overridden
53 * for paravirtualization. The following will never clobber any registers:
54 * INTERRUPT_RETURN (aka. "iret")
55 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
56 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
58 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
59 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
60 * Allowing a register to be clobbered can shrink the paravirt replacement
61 * enough to patch inline, increasing performance.
65 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
67 # define preempt_stop(clobbers)
68 # define resume_kernel restore_all
71 .macro TRACE_IRQS_IRET
72 #ifdef CONFIG_TRACE_IRQFLAGS
73 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
81 * User gs save/restore
83 * %gs is used for userland TLS and kernel only uses it for stack
84 * canary which is required to be at %gs:20 by gcc. Read the comment
85 * at the top of stackprotector.h for more info.
87 * Local labels 98 and 99 are used.
89 #ifdef CONFIG_X86_32_LAZY_GS
91 /* unfortunately push/pop can't be no-op */
96 addl $(4 + \pop), %esp
101 /* all the rest are no-op */
108 .macro REG_TO_PTGS reg
110 .macro SET_KERNEL_GS reg
113 #else /* CONFIG_X86_32_LAZY_GS */
126 .pushsection .fixup, "ax"
130 _ASM_EXTABLE(98b, 99b)
134 98: mov PT_GS(%esp), %gs
137 .pushsection .fixup, "ax"
138 99: movl $0, PT_GS(%esp)
141 _ASM_EXTABLE(98b, 99b)
147 .macro REG_TO_PTGS reg
148 movl \reg, PT_GS(%esp)
150 .macro SET_KERNEL_GS reg
151 movl $(__KERNEL_STACK_CANARY), \reg
155 #endif /* CONFIG_X86_32_LAZY_GS */
157 .macro SAVE_ALL pt_regs_ax=%eax
170 movl $(__USER_DS), %edx
173 movl $(__KERNEL_PERCPU), %edx
178 .macro RESTORE_INT_REGS
188 .macro RESTORE_REGS pop=0
194 .pushsection .fixup, "ax"
211 GET_THREAD_INFO(%ebp)
213 pushl $0x0202 # Reset kernel eflags
216 /* When we fork, we trace the syscall return in the child, too. */
218 call syscall_return_slowpath
222 ENTRY(ret_from_kernel_thread)
225 GET_THREAD_INFO(%ebp)
227 pushl $0x0202 # Reset kernel eflags
229 movl PT_EBP(%esp), %eax
230 movl PT_EBX(%esp), %edx
232 movl $0, PT_EAX(%esp)
235 * Kernel threads return to userspace as if returning from a syscall.
236 * We should check whether anything actually uses this path and, if so,
237 * consider switching it over to ret_from_fork.
240 call syscall_return_slowpath
242 ENDPROC(ret_from_kernel_thread)
245 * Return to user mode is not as complex as all this looks,
246 * but we want the default path for a system call return to
247 * go as quickly as possible which is why some of this is
248 * less clear than it otherwise should be.
251 # userspace resumption stub bypassing syscall exit tracing
254 preempt_stop(CLBR_ANY)
256 GET_THREAD_INFO(%ebp)
258 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
259 movb PT_CS(%esp), %al
260 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
263 * We can be coming here from child spawned by kernel_thread().
265 movl PT_CS(%esp), %eax
266 andl $SEGMENT_RPL_MASK, %eax
269 jb resume_kernel # not returning to v8086 or userspace
271 ENTRY(resume_userspace)
272 DISABLE_INTERRUPTS(CLBR_ANY)
275 call prepare_exit_to_usermode
277 END(ret_from_exception)
279 #ifdef CONFIG_PREEMPT
281 DISABLE_INTERRUPTS(CLBR_ANY)
283 cmpl $0, PER_CPU_VAR(__preempt_count)
285 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
287 call preempt_schedule_irq
292 # SYSENTER call handler stub
293 ENTRY(entry_SYSENTER_32)
294 movl TSS_sysenter_sp0(%esp), %esp
296 pushl $__USER_DS /* pt_regs->ss */
297 pushl %ebp /* pt_regs->sp (stashed in bp) */
298 pushfl /* pt_regs->flags (except IF = 0) */
299 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
300 pushl $__USER_CS /* pt_regs->cs */
301 pushl $0 /* pt_regs->ip = 0 (placeholder) */
302 pushl %eax /* pt_regs->orig_ax */
303 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
306 * User mode is traced as though IRQs are on, and SYSENTER
312 call do_fast_syscall_32
313 /* XEN PV guests always use IRET path */
314 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
315 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
317 /* Opportunistic SYSEXIT */
318 TRACE_IRQS_ON /* User mode traces as IRQs on. */
319 movl PT_EIP(%esp), %edx /* pt_regs->ip */
320 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
321 1: mov PT_FS(%esp), %fs
323 popl %ebx /* pt_regs->bx */
324 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
325 popl %esi /* pt_regs->si */
326 popl %edi /* pt_regs->di */
327 popl %ebp /* pt_regs->bp */
328 popl %eax /* pt_regs->ax */
331 * Return back to the vDSO, which will pop ecx and edx.
332 * Don't bother with DS and ES (they already contain __USER_DS).
337 .pushsection .fixup, "ax"
338 2: movl $0, PT_FS(%esp)
343 ENDPROC(entry_SYSENTER_32)
345 # system call handler stub
346 ENTRY(entry_INT80_32)
348 pushl %eax /* pt_regs->orig_ax */
349 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
352 * User mode is traced as though IRQs are on. Unlike the 64-bit
353 * case, INT80 is a trap gate on 32-bit kernels, so interrupts
354 * are already on (unless user code is messing around with iopl).
358 call do_syscall_32_irqs_on
364 #ifdef CONFIG_X86_ESPFIX32
365 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
367 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
368 * are returning to the kernel.
369 * See comments in process.c:copy_thread() for details.
371 movb PT_OLDSS(%esp), %ah
372 movb PT_CS(%esp), %al
373 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
374 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
375 je ldt_ss # returning to user-space with LDT SS
378 RESTORE_REGS 4 # skip orig_eax/error_code
381 .section .fixup, "ax"
383 pushl $0 # no error code
387 _ASM_EXTABLE(irq_return, iret_exc)
389 #ifdef CONFIG_X86_ESPFIX32
391 #ifdef CONFIG_PARAVIRT
393 * The kernel can't run on a non-flat stack if paravirt mode
394 * is active. Rather than try to fixup the high bits of
395 * ESP, bypass this code entirely. This may break DOSemu
396 * and/or Wine support in a paravirt VM, although the option
397 * is still available to implement the setting of the high
398 * 16-bits in the INTERRUPT_RETURN paravirt-op.
400 cmpl $0, pv_info+PARAVIRT_enabled
405 * Setup and switch to ESPFIX stack
407 * We're returning to userspace with a 16 bit stack. The CPU will not
408 * restore the high word of ESP for us on executing iret... This is an
409 * "official" bug of all the x86-compatible CPUs, which we can work
410 * around to make dosemu and wine happy. We do this by preloading the
411 * high word of ESP with the high word of the userspace ESP while
412 * compensating for the offset by changing to the ESPFIX segment with
413 * a base address that matches for the difference.
415 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
416 mov %esp, %edx /* load kernel esp */
417 mov PT_OLDESP(%esp), %eax /* load userspace esp */
418 mov %dx, %ax /* eax: new kernel esp */
419 sub %eax, %edx /* offset (low word is 0) */
421 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
422 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
424 pushl %eax /* new kernel esp */
426 * Disable interrupts, but do not irqtrace this section: we
427 * will soon execute iret and the tracer was already set to
428 * the irqstate after the IRET:
430 DISABLE_INTERRUPTS(CLBR_EAX)
431 lss (%esp), %esp /* switch to espfix segment */
434 ENDPROC(entry_INT80_32)
436 .macro FIXUP_ESPFIX_STACK
438 * Switch back for ESPFIX stack to the normal zerobased stack
440 * We can't call C functions using the ESPFIX stack. This code reads
441 * the high word of the segment base from the GDT and swiches to the
442 * normal stack and adjusts ESP with the matching offset.
444 #ifdef CONFIG_X86_ESPFIX32
445 /* fixup the stack */
446 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
447 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
449 addl %esp, %eax /* the adjusted stack pointer */
452 lss (%esp), %esp /* switch to the normal stack segment */
455 .macro UNWIND_ESPFIX_STACK
456 #ifdef CONFIG_X86_ESPFIX32
458 /* see if on espfix stack */
459 cmpw $__ESPFIX_SS, %ax
461 movl $__KERNEL_DS, %eax
464 /* switch to normal stack */
471 * Build the entry stubs with some assembler magic.
472 * We pack 1 stub into every 8-byte block.
475 ENTRY(irq_entries_start)
476 vector=FIRST_EXTERNAL_VECTOR
477 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
478 pushl $(~vector+0x80) /* Note: always in signed byte range */
483 END(irq_entries_start)
486 * the CPU automatically disables interrupts when executing an IRQ vector,
487 * so IRQ-flags tracing has to follow that:
489 .p2align CONFIG_X86_L1_CACHE_SHIFT
492 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
498 ENDPROC(common_interrupt)
500 #define BUILD_INTERRUPT3(name, nr, fn) \
512 #ifdef CONFIG_TRACING
513 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
515 # define TRACE_BUILD_INTERRUPT(name, nr)
518 #define BUILD_INTERRUPT(name, nr) \
519 BUILD_INTERRUPT3(name, nr, smp_##name); \
520 TRACE_BUILD_INTERRUPT(name, nr)
522 /* The include is where all of the SMP etc. interrupts come from */
523 #include <asm/entry_arch.h>
525 ENTRY(coprocessor_error)
528 pushl $do_coprocessor_error
530 END(coprocessor_error)
532 ENTRY(simd_coprocessor_error)
535 #ifdef CONFIG_X86_INVD_BUG
536 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
537 ALTERNATIVE "pushl $do_general_protection", \
538 "pushl $do_simd_coprocessor_error", \
541 pushl $do_simd_coprocessor_error
544 END(simd_coprocessor_error)
546 ENTRY(device_not_available)
548 pushl $-1 # mark this as an int
549 pushl $do_device_not_available
551 END(device_not_available)
553 #ifdef CONFIG_PARAVIRT
556 _ASM_EXTABLE(native_iret, iret_exc)
581 ENTRY(coprocessor_segment_overrun)
584 pushl $do_coprocessor_segment_overrun
586 END(coprocessor_segment_overrun)
590 pushl $do_invalid_TSS
594 ENTRY(segment_not_present)
596 pushl $do_segment_not_present
598 END(segment_not_present)
602 pushl $do_stack_segment
606 ENTRY(alignment_check)
608 pushl $do_alignment_check
614 pushl $0 # no error code
615 pushl $do_divide_error
619 #ifdef CONFIG_X86_MCE
623 pushl machine_check_vector
628 ENTRY(spurious_interrupt_bug)
631 pushl $do_spurious_interrupt_bug
633 END(spurious_interrupt_bug)
637 * Xen doesn't set %esp to be precisely what the normal SYSENTER
638 * entry point expects, so fix it up before using the normal path.
640 ENTRY(xen_sysenter_target)
641 addl $5*4, %esp /* remove xen-provided frame */
642 jmp sysenter_past_esp
644 ENTRY(xen_hypervisor_callback)
645 pushl $-1 /* orig_ax = -1 => not a system call */
650 * Check to see if we got the event in the critical
651 * region in xen_iret_direct, after we've reenabled
652 * events and checked for pending events. This simulates
653 * iret instruction's behaviour where it delivers a
654 * pending interrupt when enabling interrupts:
656 movl PT_EIP(%esp), %eax
657 cmpl $xen_iret_start_crit, %eax
659 cmpl $xen_iret_end_crit, %eax
662 jmp xen_iret_crit_fixup
666 call xen_evtchn_do_upcall
667 #ifndef CONFIG_PREEMPT
668 call xen_maybe_preempt_hcall
671 ENDPROC(xen_hypervisor_callback)
674 * Hypervisor uses this for application faults while it executes.
675 * We get here for two reasons:
676 * 1. Fault while reloading DS, ES, FS or GS
677 * 2. Fault while executing IRET
678 * Category 1 we fix up by reattempting the load, and zeroing the segment
679 * register if the load fails.
680 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
681 * normal Linux return path in this case because if we use the IRET hypercall
682 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
683 * We distinguish between categories by maintaining a status value in EAX.
685 ENTRY(xen_failsafe_callback)
692 /* EAX == 0 => Category 1 (Bad segment)
693 EAX != 0 => Category 2 (Bad IRET) */
699 5: pushl $-1 /* orig_ax = -1 => not a system call */
701 jmp ret_from_exception
703 .section .fixup, "ax"
721 ENDPROC(xen_failsafe_callback)
723 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
724 xen_evtchn_do_upcall)
726 #endif /* CONFIG_XEN */
728 #if IS_ENABLED(CONFIG_HYPERV)
730 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
731 hyperv_vector_handler)
733 #endif /* CONFIG_HYPERV */
735 #ifdef CONFIG_FUNCTION_TRACER
736 #ifdef CONFIG_DYNAMIC_FTRACE
746 pushl $0 /* Pass NULL as regs pointer */
749 movl function_trace_op, %ecx
750 subl $MCOUNT_INSN_SIZE, %eax
756 addl $4, %esp /* skip NULL pointer */
761 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
762 .globl ftrace_graph_call
767 /* This is weak to keep gas from relaxing the jumps */
772 ENTRY(ftrace_regs_caller)
773 pushf /* push flags before compare (in cs location) */
776 * i386 does not save SS and ESP when coming from kernel.
777 * Instead, to get sp, ®s->sp is used (see ptrace.h).
778 * Unfortunately, that means eflags must be at the same location
779 * as the current return ip is. We move the return ip into the
780 * ip location, and move flags into the return ip location.
782 pushl 4(%esp) /* save return ip into ip slot */
784 pushl $0 /* Load 0 into orig_ax */
797 movl 13*4(%esp), %eax /* Get the saved flags */
798 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
799 /* clobbering return ip */
800 movl $__KERNEL_CS, 13*4(%esp)
802 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
803 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
804 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
805 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
806 pushl %esp /* Save pt_regs as 4th parameter */
808 GLOBAL(ftrace_regs_call)
811 addl $4, %esp /* Skip pt_regs */
812 movl 14*4(%esp), %eax /* Move flags back into cs */
813 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
814 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
815 movl %eax, 14*4(%esp) /* Put return ip back for ret */
828 addl $8, %esp /* Skip orig_ax and ip */
829 popf /* Pop flags at end (no addl to corrupt flags) */
834 #else /* ! CONFIG_DYNAMIC_FTRACE */
837 cmpl $__PAGE_OFFSET, %esp
838 jb ftrace_stub /* Paging not enabled yet? */
840 cmpl $ftrace_stub, ftrace_trace_function
842 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
843 cmpl $ftrace_stub, ftrace_graph_return
844 jnz ftrace_graph_caller
846 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
847 jnz ftrace_graph_caller
853 /* taken from glibc */
860 subl $MCOUNT_INSN_SIZE, %eax
862 movl ftrace_trace_function, %ecx
870 #endif /* CONFIG_DYNAMIC_FTRACE */
871 #endif /* CONFIG_FUNCTION_TRACER */
873 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
874 ENTRY(ftrace_graph_caller)
881 subl $MCOUNT_INSN_SIZE, %eax
882 call prepare_ftrace_return
887 END(ftrace_graph_caller)
889 .globl return_to_handler
894 call ftrace_return_to_handler
901 #ifdef CONFIG_TRACING
902 ENTRY(trace_page_fault)
904 pushl $trace_do_page_fault
906 END(trace_page_fault)
914 /* the function address is in %gs's slot on the stack */
926 movl $(__KERNEL_PERCPU), %ecx
930 movl PT_GS(%esp), %edi # get the function address
931 movl PT_ORIG_EAX(%esp), %edx # get the error code
932 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
935 movl $(__USER_DS), %ecx
939 movl %esp, %eax # pt_regs pointer
941 jmp ret_from_exception
945 * Debug traps and NMI can happen at the one SYSENTER instruction
946 * that sets up the real kernel stack. Check here, since we can't
947 * allow the wrong stack to be used.
949 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
950 * already pushed 3 words if it hits on the sysenter instruction:
951 * eflags, cs and eip.
953 * We just load the right stack, and push the three (known) values
954 * by hand onto the new stack - while updating the return eip past
955 * the instruction that would have done it for sysenter.
957 .macro FIX_STACK offset ok label
958 cmpw $__KERNEL_CS, 4(%esp)
961 movl TSS_sysenter_sp0 + \offset(%esp), %esp
964 pushl $sysenter_past_esp
969 cmpl $entry_SYSENTER_32, (%esp)
970 jne debug_stack_correct
971 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
973 pushl $-1 # mark this as an int
976 xorl %edx, %edx # error code 0
977 movl %esp, %eax # pt_regs pointer
979 jmp ret_from_exception
983 * NMI is doubly nasty. It can happen _while_ we're handling
984 * a debug fault, and the debug fault hasn't yet been able to
985 * clear up the stack. So we first check whether we got an
986 * NMI on the sysenter entry path, but after that we need to
987 * check whether we got an NMI on the debug path where the debug
988 * fault happened on the sysenter path.
992 #ifdef CONFIG_X86_ESPFIX32
995 cmpw $__ESPFIX_SS, %ax
999 cmpl $entry_SYSENTER_32, (%esp)
1004 * Do not access memory above the end of our stack page,
1005 * it might not exist.
1007 andl $(THREAD_SIZE-1), %eax
1008 cmpl $(THREAD_SIZE-20), %eax
1010 jae nmi_stack_correct
1011 cmpl $entry_SYSENTER_32, 12(%esp)
1012 je nmi_debug_stack_check
1016 xorl %edx, %edx # zero error code
1017 movl %esp, %eax # pt_regs pointer
1019 jmp restore_all_notrace
1022 FIX_STACK 12, nmi_stack_correct, 1
1023 jmp nmi_stack_correct
1025 nmi_debug_stack_check:
1026 cmpw $__KERNEL_CS, 16(%esp)
1027 jne nmi_stack_correct
1029 jb nmi_stack_correct
1030 cmpl $debug_esp_fix_insn, (%esp)
1031 ja nmi_stack_correct
1032 FIX_STACK 24, nmi_stack_correct, 1
1033 jmp nmi_stack_correct
1035 #ifdef CONFIG_X86_ESPFIX32
1038 * create the pointer to lss back
1043 /* copy the iret frame of 12 bytes */
1049 FIXUP_ESPFIX_STACK # %eax == %esp
1050 xorl %edx, %edx # zero error code
1053 lss 12+4(%esp), %esp # back to espfix stack
1060 pushl $-1 # mark this as an int
1063 xorl %edx, %edx # zero error code
1064 movl %esp, %eax # pt_regs pointer
1066 jmp ret_from_exception
1069 ENTRY(general_protection)
1071 pushl $do_general_protection
1073 END(general_protection)
1075 #ifdef CONFIG_KVM_GUEST
1076 ENTRY(async_page_fault)
1078 pushl $do_async_page_fault
1080 END(async_page_fault)