1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/paravirt.h>
3 #include <asm/asm-offsets.h>
4 #include <linux/stringify.h>
6 DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
7 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
8 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
9 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
10 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
11 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
12 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
13 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
15 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
16 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
18 DEF_NATIVE(, mov32, "mov %edi, %eax");
19 DEF_NATIVE(, mov64, "mov %rdi, %rax");
21 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
22 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
23 DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
26 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
28 return paravirt_patch_insns(insnbuf, len,
29 start__mov32, end__mov32);
32 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
34 return paravirt_patch_insns(insnbuf, len,
35 start__mov64, end__mov64);
38 extern bool pv_is_native_spin_unlock(void);
39 extern bool pv_is_native_vcpu_is_preempted(void);
41 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
42 unsigned long addr, unsigned len)
44 const unsigned char *start, *end;
47 #define PATCH_SITE(ops, x) \
48 case PARAVIRT_PATCH(ops.x): \
49 start = start_##ops##_##x; \
50 end = end_##ops##_##x; \
53 PATCH_SITE(pv_irq_ops, restore_fl);
54 PATCH_SITE(pv_irq_ops, save_fl);
55 PATCH_SITE(pv_irq_ops, irq_enable);
56 PATCH_SITE(pv_irq_ops, irq_disable);
57 PATCH_SITE(pv_cpu_ops, usergs_sysret64);
58 PATCH_SITE(pv_cpu_ops, swapgs);
59 PATCH_SITE(pv_mmu_ops, read_cr2);
60 PATCH_SITE(pv_mmu_ops, read_cr3);
61 PATCH_SITE(pv_mmu_ops, write_cr3);
62 PATCH_SITE(pv_cpu_ops, wbinvd);
63 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
64 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
65 if (pv_is_native_spin_unlock()) {
66 start = start_pv_lock_ops_queued_spin_unlock;
67 end = end_pv_lock_ops_queued_spin_unlock;
72 case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
73 if (pv_is_native_vcpu_is_preempted()) {
74 start = start_pv_lock_ops_vcpu_is_preempted;
75 end = end_pv_lock_ops_vcpu_is_preempted;
82 patch_default: __maybe_unused
83 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
87 ret = paravirt_patch_insns(ibuf, len, start, end);