GNU Linux-libre 4.9.337-gnu1
[releases.git] / arch / arm64 / include / asm / kvm_asm.h
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #ifndef __ARM_KVM_ASM_H__
19 #define __ARM_KVM_ASM_H__
20
21 #include <asm/virt.h>
22
23 #define ARM_EXIT_WITH_SERROR_BIT  31
24 #define ARM_EXCEPTION_CODE(x)     ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
25 #define ARM_SERROR_PENDING(x)     !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
26
27 #define ARM_EXCEPTION_IRQ         0
28 #define ARM_EXCEPTION_EL1_SERROR  1
29 #define ARM_EXCEPTION_TRAP        2
30 /* The hyp-stub will return this for any kvm_call_hyp() call */
31 #define ARM_EXCEPTION_HYP_GONE    3
32
33 #define KVM_ARM64_DEBUG_DIRTY_SHIFT     0
34 #define KVM_ARM64_DEBUG_DIRTY           (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
35
36 #define VCPU_WORKAROUND_2_FLAG_SHIFT    0
37 #define VCPU_WORKAROUND_2_FLAG          (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
38
39 /* Translate a kernel address of @sym into its equivalent linear mapping */
40 #define kvm_ksym_ref(sym)                                               \
41         ({                                                              \
42                 void *val = &sym;                                       \
43                 if (!is_kernel_in_hyp_mode())                           \
44                         val = phys_to_virt((u64)&sym - kimage_voffset); \
45                 val;                                                    \
46          })
47
48 #ifndef __ASSEMBLY__
49 struct kvm;
50 struct kvm_vcpu;
51
52 extern char __kvm_hyp_init[];
53 extern char __kvm_hyp_init_end[];
54 extern char __kvm_hyp_reset[];
55
56 extern char __kvm_hyp_vector[];
57
58 extern void __kvm_flush_vm_context(void);
59 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
60 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
61 extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
62
63 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
64
65 extern u64 __vgic_v3_get_ich_vtr_el2(void);
66 extern void __vgic_v3_init_lrs(void);
67
68 extern u32 __kvm_get_mdcr_el2(void);
69
70 extern u32 __init_stage2_translation(void);
71
72 /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
73 #define __hyp_this_cpu_ptr(sym)                                         \
74         ({                                                              \
75                 void *__ptr = hyp_symbol_addr(sym);                     \
76                 __ptr += read_sysreg(tpidr_el2);                        \
77                 (typeof(&sym))__ptr;                                    \
78          })
79
80 #define __hyp_this_cpu_read(sym)                                        \
81         ({                                                              \
82                 *__hyp_this_cpu_ptr(sym);                               \
83          })
84
85 #define __KVM_EXTABLE(from, to)                                         \
86         "       .pushsection    __kvm_ex_table, \"a\"\n"                \
87         "       .align          3\n"                                    \
88         "       .long           (" #from " - .), (" #to " - .)\n"       \
89         "       .popsection\n"
90
91
92 #define __kvm_at(at_op, addr)                                           \
93 ( {                                                                     \
94         int __kvm_at_err = 0;                                           \
95         u64 spsr, elr;                                                  \
96         asm volatile(                                                   \
97         "       mrs     %1, spsr_el2\n"                                 \
98         "       mrs     %2, elr_el2\n"                                  \
99         "1:     at      "at_op", %3\n"                                  \
100         "       isb\n"                                                  \
101         "       b       9f\n"                                           \
102         "2:     msr     spsr_el2, %1\n"                                 \
103         "       msr     elr_el2, %2\n"                                  \
104         "       mov     %w0, %4\n"                                      \
105         "9:\n"                                                          \
106         __KVM_EXTABLE(1b, 2b)                                           \
107         : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
108         : "r" (addr), "i" (-EFAULT));                                   \
109         __kvm_at_err;                                                   \
110 } )
111
112
113 #else /* __ASSEMBLY__ */
114
115 .macro hyp_adr_this_cpu reg, sym, tmp
116         adr_l   \reg, \sym
117         mrs     \tmp, tpidr_el2
118         add     \reg, \reg, \tmp
119 .endm
120
121 .macro hyp_ldr_this_cpu reg, sym, tmp
122         adr_l   \reg, \sym
123         mrs     \tmp, tpidr_el2
124         ldr     \reg,  [\reg, \tmp]
125 .endm
126
127 .macro get_host_ctxt reg, tmp
128         hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
129 .endm
130
131 .macro get_vcpu_ptr vcpu, ctxt
132         get_host_ctxt \ctxt, \vcpu
133         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
134         kern_hyp_va     \vcpu
135 .endm
136
137 /*
138  * KVM extable for unexpected exceptions.
139  * In the same format _asm_extable, but output to a different section so that
140  * it can be mapped to EL2. The KVM version is not sorted. The caller must
141  * ensure:
142  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
143  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
144  */
145 .macro  _kvm_extable, from, to
146         .pushsection    __kvm_ex_table, "a"
147         .align          3
148         .long           (\from - .), (\to - .)
149         .popsection
150 .endm
151
152 #endif
153
154 #endif /* __ARM_KVM_ASM_H__ */