1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Asm versions of Xen pv-ops, suitable for direct use.
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
6 * operations here; the indirect forms are better handled in C.
10 #include <asm/asm-offsets.h>
11 #include <asm/percpu.h>
12 #include <asm/processor-flags.h>
13 #include <asm/segment.h>
14 #include <asm/thread_info.h>
16 #include <asm/frame.h>
17 #include <asm/unwind_hints.h>
19 #include <xen/interface/xen.h>
21 #include <linux/init.h>
22 #include <linux/linkage.h>
23 #include <../entry/calling.h>
26 * Enable events. This clears the event mask and tests the pending
27 * event status with one and operation. If there are pending events,
28 * then enter the hypervisor to get them handled.
30 SYM_FUNC_START(xen_irq_enable_direct)
33 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
36 * Preempt here doesn't matter because that will deal with any
37 * pending interrupts. The pending check may end up being run
38 * on the wrong CPU, but that doesn't hurt.
41 /* Test for pending */
42 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
49 SYM_FUNC_END(xen_irq_enable_direct)
53 * Disabling events is simply a matter of making the event mask
56 SYM_FUNC_START(xen_irq_disable_direct)
57 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
59 SYM_FUNC_END(xen_irq_disable_direct)
62 * (xen_)save_fl is used to get the current interrupt enable status.
63 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
64 * may be set in the return value. We take advantage of this by
65 * making sure that X86_EFLAGS_IF has the right value (and other bits
66 * in that byte are 0), but other bits in the return value are
67 * undefined. We need to toggle the state of the bit, because Xen and
68 * x86 use opposite senses (mask vs enable).
70 SYM_FUNC_START(xen_save_fl_direct)
71 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
75 SYM_FUNC_END(xen_save_fl_direct)
79 * In principle the caller should be passing us a value return from
80 * xen_save_fl_direct, but for robustness sake we test only the
81 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
82 * interrupt mask state, it checks for unmasked pending events and
83 * enters the hypervisor to get them delivered if so.
85 SYM_FUNC_START(xen_restore_fl_direct)
87 testw $X86_EFLAGS_IF, %di
88 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
90 * Preempt here doesn't matter because that will deal with any
91 * pending interrupts. The pending check may end up being run
92 * on the wrong CPU, but that doesn't hurt.
95 /* check for unmasked and pending */
96 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
102 SYM_FUNC_END(xen_restore_fl_direct)
106 * Force an event check by making a hypercall, but preserve regs
107 * before making the call.
109 SYM_FUNC_START(check_events)
120 call xen_force_evtchn_callback
132 SYM_FUNC_END(check_events)
134 SYM_FUNC_START(xen_read_cr2)
136 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
137 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
140 SYM_FUNC_END(xen_read_cr2);
142 SYM_FUNC_START(xen_read_cr2_direct)
144 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
147 SYM_FUNC_END(xen_read_cr2_direct);
149 .macro xen_pv_trap name
150 SYM_CODE_START(xen_\name)
155 SYM_CODE_END(xen_\name)
156 _ASM_NOKPROBE(xen_\name)
159 xen_pv_trap asm_exc_divide_error
160 xen_pv_trap asm_xenpv_exc_debug
161 xen_pv_trap asm_exc_int3
162 xen_pv_trap asm_xenpv_exc_nmi
163 xen_pv_trap asm_exc_overflow
164 xen_pv_trap asm_exc_bounds
165 xen_pv_trap asm_exc_invalid_op
166 xen_pv_trap asm_exc_device_not_available
167 xen_pv_trap asm_exc_double_fault
168 xen_pv_trap asm_exc_coproc_segment_overrun
169 xen_pv_trap asm_exc_invalid_tss
170 xen_pv_trap asm_exc_segment_not_present
171 xen_pv_trap asm_exc_stack_segment
172 xen_pv_trap asm_exc_general_protection
173 xen_pv_trap asm_exc_page_fault
174 xen_pv_trap asm_exc_spurious_interrupt_bug
175 xen_pv_trap asm_exc_coprocessor_error
176 xen_pv_trap asm_exc_alignment_check
177 #ifdef CONFIG_X86_MCE
178 xen_pv_trap asm_exc_machine_check
179 #endif /* CONFIG_X86_MCE */
180 xen_pv_trap asm_exc_simd_coprocessor_error
181 #ifdef CONFIG_IA32_EMULATION
182 xen_pv_trap entry_INT80_compat
184 xen_pv_trap asm_exc_xen_unknown_trap
185 xen_pv_trap asm_exc_xen_hypervisor_callback
188 SYM_CODE_START(xen_early_idt_handler_array)
190 .rept NUM_EXCEPTION_VECTORS
194 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
196 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
198 SYM_CODE_END(xen_early_idt_handler_array)
201 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
209 * rip <-- standard iret frame
214 * r11 }<-- pushed by hypercall page
217 SYM_CODE_START(xen_iret)
221 SYM_CODE_END(xen_iret)
223 SYM_CODE_START(xen_sysret64)
226 * We're already on the usermode stack at this point, but
227 * still with the kernel gs, so we can easily switch back.
229 * tss.sp2 is scratch space.
231 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
232 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
235 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
240 pushq $VGCF_in_syscall
242 SYM_CODE_END(xen_sysret64)
245 * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
246 * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
247 * in XEN pv would cause %rsp to move up to the top of the kernel stack and
248 * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
249 * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
250 * frame at the same address is useless.
252 SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
256 /* stackleak_erase() can work safely on the kernel stack. */
257 STACKLEAK_ERASE_NOCLOBBER
259 addq $8, %rsp /* skip regs->orig_ax */
261 SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
264 * Xen handles syscall callbacks much like ordinary exceptions, which
268 * - an iret-like stack frame on the stack (including rcx and r11):
278 /* Normal 64-bit system call target */
279 SYM_CODE_START(xen_entry_SYSCALL_64)
285 * Neither Xen nor the kernel really knows what the old SS and
286 * CS were. The kernel expects __USER_DS and __USER_CS, so
287 * report those values even though Xen will guess its own values.
289 movq $__USER_DS, 4*8(%rsp)
290 movq $__USER_CS, 1*8(%rsp)
292 jmp entry_SYSCALL_64_after_hwframe
293 SYM_CODE_END(xen_entry_SYSCALL_64)
295 #ifdef CONFIG_IA32_EMULATION
297 /* 32-bit compat syscall target */
298 SYM_CODE_START(xen_entry_SYSCALL_compat)
304 * Neither Xen nor the kernel really knows what the old SS and
305 * CS were. The kernel expects __USER32_DS and __USER32_CS, so
306 * report those values even though Xen will guess its own values.
308 movq $__USER32_DS, 4*8(%rsp)
309 movq $__USER32_CS, 1*8(%rsp)
311 jmp entry_SYSCALL_compat_after_hwframe
312 SYM_CODE_END(xen_entry_SYSCALL_compat)
314 /* 32-bit compat sysenter target */
315 SYM_CODE_START(xen_entry_SYSENTER_compat)
318 * NB: Xen is polite and clears TF from EFLAGS for us. This means
319 * that we don't need to guard against single step exceptions here.
325 * Neither Xen nor the kernel really knows what the old SS and
326 * CS were. The kernel expects __USER32_DS and __USER32_CS, so
327 * report those values even though Xen will guess its own values.
329 movq $__USER32_DS, 4*8(%rsp)
330 movq $__USER32_CS, 1*8(%rsp)
332 jmp entry_SYSENTER_compat_after_hwframe
333 SYM_CODE_END(xen_entry_SYSENTER_compat)
335 #else /* !CONFIG_IA32_EMULATION */
337 SYM_CODE_START(xen_entry_SYSCALL_compat)
338 SYM_CODE_START(xen_entry_SYSENTER_compat)
340 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
344 SYM_CODE_END(xen_entry_SYSENTER_compat)
345 SYM_CODE_END(xen_entry_SYSCALL_compat)
347 #endif /* CONFIG_IA32_EMULATION */