1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/cpufeature.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
18 .macro save_caller_saved_regs_vect
19 /* x0 and x1 were saved in the vector entry */
20 stp x2, x3, [sp, #-16]!
21 stp x4, x5, [sp, #-16]!
22 stp x6, x7, [sp, #-16]!
23 stp x8, x9, [sp, #-16]!
24 stp x10, x11, [sp, #-16]!
25 stp x12, x13, [sp, #-16]!
26 stp x14, x15, [sp, #-16]!
27 stp x16, x17, [sp, #-16]!
30 .macro restore_caller_saved_regs_vect
31 ldp x16, x17, [sp], #16
32 ldp x14, x15, [sp], #16
33 ldp x12, x13, [sp], #16
34 ldp x10, x11, [sp], #16
43 .pushsection .hyp.text, "ax"
47 * Shuffle the parameters before calling the function
48 * pointed to in x0. Assumes parameters in x[1,2,3].
59 el1_sync: // Guest trapped into EL2
62 lsr x0, x0, #ESR_ELx_EC_SHIFT
63 cmp x0, #ESR_ELx_EC_HVC64
64 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
67 mrs x1, vttbr_el2 // If vttbr is valid, the guest
68 cbnz x1, el1_hvc_guest // called HVC
70 /* Here, we're pretty sure the host called HVC. */
73 /* Check for a stub HVC call */
74 cmp x0, #HVC_STUB_HCALL_NR
78 * Compute the idmap address of __kvm_handle_stub_hvc and
79 * jump there. Since we use kimage_voffset, do not use the
80 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
81 * (by loading it from the constant pool).
83 * Preserve x0-x4, which may contain stub parameters.
85 ldr x5, =__kvm_handle_stub_hvc
86 ldr_l x6, kimage_voffset
94 * Perform the EL2 call
104 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
105 * The workaround has already been applied on the host,
106 * so let's quickly get back to the guest. We don't bother
107 * restoring x1, as it can be clobbered anyway.
109 ldr x1, [sp] // Guest's x0
110 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
113 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
114 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
115 ARM_SMCCC_ARCH_WORKAROUND_2)
118 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
119 ARM_SMCCC_ARCH_WORKAROUND_3)
122 #ifdef CONFIG_ARM64_SSBD
123 alternative_cb arm64_enable_wa2_handling
127 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
129 // Sanitize the argument and update the guest flags
130 ldr x1, [sp, #8] // Guest's x1
131 clz w1, w1 // Murphy's device:
132 lsr w1, w1, #5 // w1 = !!w1 without using
133 eor w1, w1, #1 // the flags...
134 bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
135 str x0, [x2, #VCPU_WORKAROUND_FLAGS]
137 /* Check that we actually need to perform the call */
138 hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
141 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
144 /* Don't leak data from the SMC call */
159 mov x0, #ARM_EXCEPTION_TRAP
164 mov x0, #ARM_EXCEPTION_IRQ
169 mov x0, #ARM_EXCEPTION_EL1_SERROR
173 /* Check for illegal exception return */
177 save_caller_saved_regs_vect
178 stp x29, x30, [sp, #-16]!
179 bl kvm_unexpected_el2_exception
180 ldp x29, x30, [sp], #16
181 restore_caller_saved_regs_vect
186 /* Let's attempt a recovery from the illegal exception return */
188 mov x0, #ARM_EXCEPTION_IL
193 save_caller_saved_regs_vect
194 stp x29, x30, [sp, #-16]!
196 bl kvm_unexpected_el2_exception
198 ldp x29, x30, [sp], #16
199 restore_caller_saved_regs_vect
204 ENTRY(__hyp_do_panic)
205 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
212 ENDPROC(__hyp_do_panic)
219 .macro invalid_vector label, target = __hyp_panic
226 /* None of these should ever happen */
227 invalid_vector el2t_sync_invalid
228 invalid_vector el2t_irq_invalid
229 invalid_vector el2t_fiq_invalid
230 invalid_vector el2t_error_invalid
231 invalid_vector el2h_sync_invalid
232 invalid_vector el2h_irq_invalid
233 invalid_vector el2h_fiq_invalid
234 invalid_vector el1_fiq_invalid
240 .macro check_preamble_length start, end
241 /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
242 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
243 .error "KVM vector preamble length mismatch"
247 .macro valid_vect target
251 stp x0, x1, [sp, #-16]!
255 check_preamble_length 661b, 662b
258 .macro invalid_vect target
264 ldp x0, x1, [sp], #16
267 check_preamble_length 661b, 662b
270 ENTRY(__kvm_hyp_vector)
271 invalid_vect el2t_sync_invalid // Synchronous EL2t
272 invalid_vect el2t_irq_invalid // IRQ EL2t
273 invalid_vect el2t_fiq_invalid // FIQ EL2t
274 invalid_vect el2t_error_invalid // Error EL2t
276 valid_vect el2_sync // Synchronous EL2h
277 invalid_vect el2h_irq_invalid // IRQ EL2h
278 invalid_vect el2h_fiq_invalid // FIQ EL2h
279 valid_vect el2_error // Error EL2h
281 valid_vect el1_sync // Synchronous 64-bit EL1
282 valid_vect el1_irq // IRQ 64-bit EL1
283 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
284 valid_vect el1_error // Error 64-bit EL1
286 valid_vect el1_sync // Synchronous 32-bit EL1
287 valid_vect el1_irq // IRQ 32-bit EL1
288 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
289 valid_vect el1_error // Error 32-bit EL1
290 ENDPROC(__kvm_hyp_vector)
292 #ifdef CONFIG_KVM_INDIRECT_VECTORS
300 * The default sequence is to directly branch to the KVM vectors,
301 * using the computed offset. This applies for VHE as well as
302 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
304 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
307 * stp x0, x1, [sp, #-16]!
308 * movz x0, #(addr & 0xffff)
309 * movk x0, #((addr >> 16) & 0xffff), lsl #16
310 * movk x0, #((addr >> 32) & 0xffff), lsl #32
314 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
315 * See kvm_patch_vector_branch for details.
317 alternative_cb kvm_patch_vector_branch
318 stp x0, x1, [sp, #-16]!
319 b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
326 .macro generate_vectors
331 .org 0b + SZ_2K // Safety measure
335 ENTRY(__bp_harden_hyp_vecs_start)
336 .rept BP_HARDEN_EL2_SLOTS
339 ENTRY(__bp_harden_hyp_vecs_end)
343 ENTRY(__smccc_workaround_1_smc_start)
346 stp x2, x3, [sp, #(8 * 0)]
347 stp x0, x1, [sp, #(8 * 2)]
348 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
350 ldp x2, x3, [sp, #(8 * 0)]
351 ldp x0, x1, [sp, #(8 * 2)]
353 ENTRY(__smccc_workaround_1_smc_end)
355 ENTRY(__smccc_workaround_3_smc_start)
358 stp x2, x3, [sp, #(8 * 0)]
359 stp x0, x1, [sp, #(8 * 2)]
360 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
362 ldp x2, x3, [sp, #(8 * 0)]
363 ldp x0, x1, [sp, #(8 * 2)]
365 ENTRY(__smccc_workaround_3_smc_end)
367 ENTRY(__spectre_bhb_loop_k8_start)
370 stp x0, x1, [sp, #(8 * 0)]
377 ldp x0, x1, [sp, #(8 * 0)]
379 ENTRY(__spectre_bhb_loop_k8_end)
381 ENTRY(__spectre_bhb_loop_k24_start)
384 stp x0, x1, [sp, #(8 * 0)]
391 ldp x0, x1, [sp, #(8 * 0)]
393 ENTRY(__spectre_bhb_loop_k24_end)
395 ENTRY(__spectre_bhb_loop_k32_start)
398 stp x0, x1, [sp, #(8 * 0)]
405 ldp x0, x1, [sp, #(8 * 0)]
407 ENTRY(__spectre_bhb_loop_k32_end)
409 ENTRY(__spectre_bhb_clearbhb_start)
413 ENTRY(__spectre_bhb_clearbhb_end)