GNU Linux-libre 5.15.137-gnu
[releases.git] / arch / x86 / include / asm / paravirt.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5  * para-virtualization: those hooks are defined here. */
6
7 #ifdef CONFIG_PARAVIRT
8 #include <asm/pgtable_types.h>
9 #include <asm/asm.h>
10 #include <asm/nospec-branch.h>
11
12 #include <asm/paravirt_types.h>
13
14 #ifndef __ASSEMBLY__
15 #include <linux/bug.h>
16 #include <linux/types.h>
17 #include <linux/cpumask.h>
18 #include <linux/static_call_types.h>
19 #include <asm/frame.h>
20
21 u64 dummy_steal_clock(int cpu);
22 u64 dummy_sched_clock(void);
23
24 DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
25 DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
26
27 void paravirt_set_sched_clock(u64 (*func)(void));
28
29 static inline u64 paravirt_sched_clock(void)
30 {
31         return static_call(pv_sched_clock)();
32 }
33
34 struct static_key;
35 extern struct static_key paravirt_steal_enabled;
36 extern struct static_key paravirt_steal_rq_enabled;
37
38 __visible void __native_queued_spin_unlock(struct qspinlock *lock);
39 bool pv_is_native_spin_unlock(void);
40 __visible bool __native_vcpu_is_preempted(long cpu);
41 bool pv_is_native_vcpu_is_preempted(void);
42
43 static inline u64 paravirt_steal_clock(int cpu)
44 {
45         return static_call(pv_steal_clock)(cpu);
46 }
47
48 #ifdef CONFIG_PARAVIRT_SPINLOCKS
49 void __init paravirt_set_cap(void);
50 #endif
51
52 /* The paravirtualized I/O functions */
53 static inline void slow_down_io(void)
54 {
55         pv_ops.cpu.io_delay();
56 #ifdef REALLY_SLOW_IO
57         pv_ops.cpu.io_delay();
58         pv_ops.cpu.io_delay();
59         pv_ops.cpu.io_delay();
60 #endif
61 }
62
63 void native_flush_tlb_local(void);
64 void native_flush_tlb_global(void);
65 void native_flush_tlb_one_user(unsigned long addr);
66 void native_flush_tlb_multi(const struct cpumask *cpumask,
67                              const struct flush_tlb_info *info);
68
69 static inline void __flush_tlb_local(void)
70 {
71         PVOP_VCALL0(mmu.flush_tlb_user);
72 }
73
74 static inline void __flush_tlb_global(void)
75 {
76         PVOP_VCALL0(mmu.flush_tlb_kernel);
77 }
78
79 static inline void __flush_tlb_one_user(unsigned long addr)
80 {
81         PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
82 }
83
84 static inline void __flush_tlb_multi(const struct cpumask *cpumask,
85                                       const struct flush_tlb_info *info)
86 {
87         PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
88 }
89
90 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
91 {
92         PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
93 }
94
95 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
96 {
97         PVOP_VCALL1(mmu.exit_mmap, mm);
98 }
99
100 #ifdef CONFIG_PARAVIRT_XXL
101 static inline void load_sp0(unsigned long sp0)
102 {
103         PVOP_VCALL1(cpu.load_sp0, sp0);
104 }
105
106 /* The paravirtualized CPUID instruction. */
107 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
108                            unsigned int *ecx, unsigned int *edx)
109 {
110         PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
111 }
112
113 /*
114  * These special macros can be used to get or set a debugging register
115  */
116 static inline unsigned long paravirt_get_debugreg(int reg)
117 {
118         return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
119 }
120 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
121 static inline void set_debugreg(unsigned long val, int reg)
122 {
123         PVOP_VCALL2(cpu.set_debugreg, reg, val);
124 }
125
126 static inline unsigned long read_cr0(void)
127 {
128         return PVOP_CALL0(unsigned long, cpu.read_cr0);
129 }
130
131 static inline void write_cr0(unsigned long x)
132 {
133         PVOP_VCALL1(cpu.write_cr0, x);
134 }
135
136 static inline unsigned long read_cr2(void)
137 {
138         return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
139                                 "mov %%cr2, %%rax;",
140                                 ALT_NOT(X86_FEATURE_XENPV));
141 }
142
143 static inline void write_cr2(unsigned long x)
144 {
145         PVOP_VCALL1(mmu.write_cr2, x);
146 }
147
148 static inline unsigned long __read_cr3(void)
149 {
150         return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
151                               "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
152 }
153
154 static inline void write_cr3(unsigned long x)
155 {
156         PVOP_ALT_VCALL1(mmu.write_cr3, x,
157                         "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
158 }
159
160 static inline void __write_cr4(unsigned long x)
161 {
162         PVOP_VCALL1(cpu.write_cr4, x);
163 }
164
165 static inline void arch_safe_halt(void)
166 {
167         PVOP_VCALL0(irq.safe_halt);
168 }
169
170 static inline void halt(void)
171 {
172         PVOP_VCALL0(irq.halt);
173 }
174
175 static inline void wbinvd(void)
176 {
177         PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
178 }
179
180 static inline u64 paravirt_read_msr(unsigned msr)
181 {
182         return PVOP_CALL1(u64, cpu.read_msr, msr);
183 }
184
185 static inline void paravirt_write_msr(unsigned msr,
186                                       unsigned low, unsigned high)
187 {
188         PVOP_VCALL3(cpu.write_msr, msr, low, high);
189 }
190
191 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
192 {
193         return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
194 }
195
196 static inline int paravirt_write_msr_safe(unsigned msr,
197                                           unsigned low, unsigned high)
198 {
199         return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
200 }
201
202 #define rdmsr(msr, val1, val2)                  \
203 do {                                            \
204         u64 _l = paravirt_read_msr(msr);        \
205         val1 = (u32)_l;                         \
206         val2 = _l >> 32;                        \
207 } while (0)
208
209 #define wrmsr(msr, val1, val2)                  \
210 do {                                            \
211         paravirt_write_msr(msr, val1, val2);    \
212 } while (0)
213
214 #define rdmsrl(msr, val)                        \
215 do {                                            \
216         val = paravirt_read_msr(msr);           \
217 } while (0)
218
219 static inline void wrmsrl(unsigned msr, u64 val)
220 {
221         wrmsr(msr, (u32)val, (u32)(val>>32));
222 }
223
224 #define wrmsr_safe(msr, a, b)   paravirt_write_msr_safe(msr, a, b)
225
226 /* rdmsr with exception handling */
227 #define rdmsr_safe(msr, a, b)                           \
228 ({                                                      \
229         int _err;                                       \
230         u64 _l = paravirt_read_msr_safe(msr, &_err);    \
231         (*a) = (u32)_l;                                 \
232         (*b) = _l >> 32;                                \
233         _err;                                           \
234 })
235
236 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
237 {
238         int err;
239
240         *p = paravirt_read_msr_safe(msr, &err);
241         return err;
242 }
243
244 static inline unsigned long long paravirt_read_pmc(int counter)
245 {
246         return PVOP_CALL1(u64, cpu.read_pmc, counter);
247 }
248
249 #define rdpmc(counter, low, high)               \
250 do {                                            \
251         u64 _l = paravirt_read_pmc(counter);    \
252         low = (u32)_l;                          \
253         high = _l >> 32;                        \
254 } while (0)
255
256 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
257
258 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
259 {
260         PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
261 }
262
263 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
264 {
265         PVOP_VCALL2(cpu.free_ldt, ldt, entries);
266 }
267
268 static inline void load_TR_desc(void)
269 {
270         PVOP_VCALL0(cpu.load_tr_desc);
271 }
272 static inline void load_gdt(const struct desc_ptr *dtr)
273 {
274         PVOP_VCALL1(cpu.load_gdt, dtr);
275 }
276 static inline void load_idt(const struct desc_ptr *dtr)
277 {
278         PVOP_VCALL1(cpu.load_idt, dtr);
279 }
280 static inline void set_ldt(const void *addr, unsigned entries)
281 {
282         PVOP_VCALL2(cpu.set_ldt, addr, entries);
283 }
284 static inline unsigned long paravirt_store_tr(void)
285 {
286         return PVOP_CALL0(unsigned long, cpu.store_tr);
287 }
288
289 #define store_tr(tr)    ((tr) = paravirt_store_tr())
290 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
291 {
292         PVOP_VCALL2(cpu.load_tls, t, cpu);
293 }
294
295 static inline void load_gs_index(unsigned int gs)
296 {
297         PVOP_VCALL1(cpu.load_gs_index, gs);
298 }
299
300 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
301                                    const void *desc)
302 {
303         PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
304 }
305
306 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
307                                    void *desc, int type)
308 {
309         PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
310 }
311
312 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
313 {
314         PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
315 }
316
317 #ifdef CONFIG_X86_IOPL_IOPERM
318 static inline void tss_invalidate_io_bitmap(void)
319 {
320         PVOP_VCALL0(cpu.invalidate_io_bitmap);
321 }
322
323 static inline void tss_update_io_bitmap(void)
324 {
325         PVOP_VCALL0(cpu.update_io_bitmap);
326 }
327 #endif
328
329 static inline void paravirt_activate_mm(struct mm_struct *prev,
330                                         struct mm_struct *next)
331 {
332         PVOP_VCALL2(mmu.activate_mm, prev, next);
333 }
334
335 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
336                                           struct mm_struct *mm)
337 {
338         PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
339 }
340
341 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
342 {
343         return PVOP_CALL1(int, mmu.pgd_alloc, mm);
344 }
345
346 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
347 {
348         PVOP_VCALL2(mmu.pgd_free, mm, pgd);
349 }
350
351 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
352 {
353         PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
354 }
355 static inline void paravirt_release_pte(unsigned long pfn)
356 {
357         PVOP_VCALL1(mmu.release_pte, pfn);
358 }
359
360 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
361 {
362         PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
363 }
364
365 static inline void paravirt_release_pmd(unsigned long pfn)
366 {
367         PVOP_VCALL1(mmu.release_pmd, pfn);
368 }
369
370 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
371 {
372         PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
373 }
374 static inline void paravirt_release_pud(unsigned long pfn)
375 {
376         PVOP_VCALL1(mmu.release_pud, pfn);
377 }
378
379 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
380 {
381         PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
382 }
383
384 static inline void paravirt_release_p4d(unsigned long pfn)
385 {
386         PVOP_VCALL1(mmu.release_p4d, pfn);
387 }
388
389 static inline pte_t __pte(pteval_t val)
390 {
391         return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
392                                           "mov %%rdi, %%rax",
393                                           ALT_NOT(X86_FEATURE_XENPV)) };
394 }
395
396 static inline pteval_t pte_val(pte_t pte)
397 {
398         return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
399                                 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
400 }
401
402 static inline pgd_t __pgd(pgdval_t val)
403 {
404         return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
405                                           "mov %%rdi, %%rax",
406                                           ALT_NOT(X86_FEATURE_XENPV)) };
407 }
408
409 static inline pgdval_t pgd_val(pgd_t pgd)
410 {
411         return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
412                                 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
413 }
414
415 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
416 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
417                                            pte_t *ptep)
418 {
419         pteval_t ret;
420
421         ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
422
423         return (pte_t) { .pte = ret };
424 }
425
426 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
427                                            pte_t *ptep, pte_t old_pte, pte_t pte)
428 {
429
430         PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
431 }
432
433 static inline void set_pte(pte_t *ptep, pte_t pte)
434 {
435         PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
436 }
437
438 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
439 {
440         PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
441 }
442
443 static inline pmd_t __pmd(pmdval_t val)
444 {
445         return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
446                                           "mov %%rdi, %%rax",
447                                           ALT_NOT(X86_FEATURE_XENPV)) };
448 }
449
450 static inline pmdval_t pmd_val(pmd_t pmd)
451 {
452         return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
453                                 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
454 }
455
456 static inline void set_pud(pud_t *pudp, pud_t pud)
457 {
458         PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
459 }
460
461 static inline pud_t __pud(pudval_t val)
462 {
463         pudval_t ret;
464
465         ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
466                                "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
467
468         return (pud_t) { ret };
469 }
470
471 static inline pudval_t pud_val(pud_t pud)
472 {
473         return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
474                                 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
475 }
476
477 static inline void pud_clear(pud_t *pudp)
478 {
479         set_pud(pudp, native_make_pud(0));
480 }
481
482 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
483 {
484         p4dval_t val = native_p4d_val(p4d);
485
486         PVOP_VCALL2(mmu.set_p4d, p4dp, val);
487 }
488
489 #if CONFIG_PGTABLE_LEVELS >= 5
490
491 static inline p4d_t __p4d(p4dval_t val)
492 {
493         p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
494                                         "mov %%rdi, %%rax",
495                                         ALT_NOT(X86_FEATURE_XENPV));
496
497         return (p4d_t) { ret };
498 }
499
500 static inline p4dval_t p4d_val(p4d_t p4d)
501 {
502         return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
503                                 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
504 }
505
506 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
507 {
508         PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
509 }
510
511 #define set_pgd(pgdp, pgdval) do {                                      \
512         if (pgtable_l5_enabled())                                               \
513                 __set_pgd(pgdp, pgdval);                                \
514         else                                                            \
515                 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });     \
516 } while (0)
517
518 #define pgd_clear(pgdp) do {                                            \
519         if (pgtable_l5_enabled())                                       \
520                 set_pgd(pgdp, native_make_pgd(0));                      \
521 } while (0)
522
523 #endif  /* CONFIG_PGTABLE_LEVELS == 5 */
524
525 static inline void p4d_clear(p4d_t *p4dp)
526 {
527         set_p4d(p4dp, native_make_p4d(0));
528 }
529
530 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
531 {
532         set_pte(ptep, pte);
533 }
534
535 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
536                              pte_t *ptep)
537 {
538         set_pte(ptep, native_make_pte(0));
539 }
540
541 static inline void pmd_clear(pmd_t *pmdp)
542 {
543         set_pmd(pmdp, native_make_pmd(0));
544 }
545
546 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
547 static inline void arch_start_context_switch(struct task_struct *prev)
548 {
549         PVOP_VCALL1(cpu.start_context_switch, prev);
550 }
551
552 static inline void arch_end_context_switch(struct task_struct *next)
553 {
554         PVOP_VCALL1(cpu.end_context_switch, next);
555 }
556
557 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
558 static inline void arch_enter_lazy_mmu_mode(void)
559 {
560         PVOP_VCALL0(mmu.lazy_mode.enter);
561 }
562
563 static inline void arch_leave_lazy_mmu_mode(void)
564 {
565         PVOP_VCALL0(mmu.lazy_mode.leave);
566 }
567
568 static inline void arch_flush_lazy_mmu_mode(void)
569 {
570         PVOP_VCALL0(mmu.lazy_mode.flush);
571 }
572
573 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
574                                 phys_addr_t phys, pgprot_t flags)
575 {
576         pv_ops.mmu.set_fixmap(idx, phys, flags);
577 }
578 #endif
579
580 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
581
582 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
583                                                         u32 val)
584 {
585         PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
586 }
587
588 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
589 {
590         PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
591                           "movb $0, (%%" _ASM_ARG1 ");",
592                           ALT_NOT(X86_FEATURE_PVUNLOCK));
593 }
594
595 static __always_inline void pv_wait(u8 *ptr, u8 val)
596 {
597         PVOP_VCALL2(lock.wait, ptr, val);
598 }
599
600 static __always_inline void pv_kick(int cpu)
601 {
602         PVOP_VCALL1(lock.kick, cpu);
603 }
604
605 static __always_inline bool pv_vcpu_is_preempted(long cpu)
606 {
607         return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
608                                 "xor %%" _ASM_AX ", %%" _ASM_AX ";",
609                                 ALT_NOT(X86_FEATURE_VCPUPREEMPT));
610 }
611
612 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
613 bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
614
615 #endif /* SMP && PARAVIRT_SPINLOCKS */
616
617 #ifdef CONFIG_X86_32
618 /* save and restore all caller-save registers, except return value */
619 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
620 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
621 #else
622 /* save and restore all caller-save registers, except return value */
623 #define PV_SAVE_ALL_CALLER_REGS                                         \
624         "push %rcx;"                                                    \
625         "push %rdx;"                                                    \
626         "push %rsi;"                                                    \
627         "push %rdi;"                                                    \
628         "push %r8;"                                                     \
629         "push %r9;"                                                     \
630         "push %r10;"                                                    \
631         "push %r11;"
632 #define PV_RESTORE_ALL_CALLER_REGS                                      \
633         "pop %r11;"                                                     \
634         "pop %r10;"                                                     \
635         "pop %r9;"                                                      \
636         "pop %r8;"                                                      \
637         "pop %rdi;"                                                     \
638         "pop %rsi;"                                                     \
639         "pop %rdx;"                                                     \
640         "pop %rcx;"
641 #endif
642
643 /*
644  * Generate a thunk around a function which saves all caller-save
645  * registers except for the return value.  This allows C functions to
646  * be called from assembler code where fewer than normal registers are
647  * available.  It may also help code generation around calls from C
648  * code if the common case doesn't use many registers.
649  *
650  * When a callee is wrapped in a thunk, the caller can assume that all
651  * arg regs and all scratch registers are preserved across the
652  * call. The return value in rax/eax will not be saved, even for void
653  * functions.
654  */
655 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
656 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
657         extern typeof(func) __raw_callee_save_##func;                   \
658                                                                         \
659         asm(".pushsection .text;"                                       \
660             ".globl " PV_THUNK_NAME(func) ";"                           \
661             ".type " PV_THUNK_NAME(func) ", @function;"                 \
662             PV_THUNK_NAME(func) ":"                                     \
663             FRAME_BEGIN                                                 \
664             PV_SAVE_ALL_CALLER_REGS                                     \
665             "call " #func ";"                                           \
666             PV_RESTORE_ALL_CALLER_REGS                                  \
667             FRAME_END                                                   \
668             ASM_RET                                                     \
669             ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
670             ".popsection")
671
672 /* Get a reference to a callee-save function */
673 #define PV_CALLEE_SAVE(func)                                            \
674         ((struct paravirt_callee_save) { __raw_callee_save_##func })
675
676 /* Promise that "func" already uses the right calling convention */
677 #define __PV_IS_CALLEE_SAVE(func)                       \
678         ((struct paravirt_callee_save) { func })
679
680 #ifdef CONFIG_PARAVIRT_XXL
681 static inline notrace unsigned long arch_local_save_flags(void)
682 {
683         return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
684                                 ALT_NOT(X86_FEATURE_XENPV));
685 }
686
687 static inline notrace void arch_local_irq_disable(void)
688 {
689         PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
690 }
691
692 static inline notrace void arch_local_irq_enable(void)
693 {
694         PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
695 }
696
697 static inline notrace unsigned long arch_local_irq_save(void)
698 {
699         unsigned long f;
700
701         f = arch_local_save_flags();
702         arch_local_irq_disable();
703         return f;
704 }
705 #endif
706
707
708 /* Make sure as little as possible of this mess escapes. */
709 #undef PARAVIRT_CALL
710 #undef __PVOP_CALL
711 #undef __PVOP_VCALL
712 #undef PVOP_VCALL0
713 #undef PVOP_CALL0
714 #undef PVOP_VCALL1
715 #undef PVOP_CALL1
716 #undef PVOP_VCALL2
717 #undef PVOP_CALL2
718 #undef PVOP_VCALL3
719 #undef PVOP_CALL3
720 #undef PVOP_VCALL4
721 #undef PVOP_CALL4
722
723 extern void default_banner(void);
724
725 #else  /* __ASSEMBLY__ */
726
727 #define _PVSITE(ptype, ops, word, algn)         \
728 771:;                                           \
729         ops;                                    \
730 772:;                                           \
731         .pushsection .parainstructions,"a";     \
732          .align algn;                           \
733          word 771b;                             \
734          .byte ptype;                           \
735          .byte 772b-771b;                       \
736         .popsection
737
738
739 #ifdef CONFIG_X86_64
740 #ifdef CONFIG_PARAVIRT_XXL
741
742 #define PARA_PATCH(off)         ((off) / 8)
743 #define PARA_SITE(ptype, ops)   _PVSITE(ptype, ops, .quad, 8)
744 #define PARA_INDIRECT(addr)     *addr(%rip)
745
746 #define INTERRUPT_RETURN                                                \
747         ANNOTATE_RETPOLINE_SAFE;                                        \
748         ALTERNATIVE_TERNARY("jmp *paravirt_iret(%rip);",                \
749                 X86_FEATURE_XENPV, "jmp xen_iret;", "jmp native_iret;")
750
751 #ifdef CONFIG_DEBUG_ENTRY
752 .macro PARA_IRQ_save_fl
753         PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),
754                   ANNOTATE_RETPOLINE_SAFE;
755                   call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
756 .endm
757
758 #define SAVE_FLAGS      ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
759                                     ALT_NOT(X86_FEATURE_XENPV)
760 #endif
761 #endif /* CONFIG_PARAVIRT_XXL */
762 #endif  /* CONFIG_X86_64 */
763
764 #endif /* __ASSEMBLY__ */
765 #else  /* CONFIG_PARAVIRT */
766 # define default_banner x86_init_noop
767 #endif /* !CONFIG_PARAVIRT */
768
769 #ifndef __ASSEMBLY__
770 #ifndef CONFIG_PARAVIRT_XXL
771 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
772                                           struct mm_struct *mm)
773 {
774 }
775 #endif
776
777 #ifndef CONFIG_PARAVIRT
778 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
779 {
780 }
781 #endif
782
783 #ifndef CONFIG_PARAVIRT_SPINLOCKS
784 static inline void paravirt_set_cap(void)
785 {
786 }
787 #endif
788 #endif /* __ASSEMBLY__ */
789 #endif /* _ASM_X86_PARAVIRT_H */