1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/irqflags.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/tlbflush.h>
13 struct tlb_inv_context {
14 struct kvm_s2_mmu *mmu;
20 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
21 struct tlb_inv_context *cxt)
23 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
26 local_irq_save(cxt->flags);
28 if (vcpu && mmu != vcpu->arch.hw_mmu)
29 cxt->mmu = vcpu->arch.hw_mmu;
33 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
35 * For CPUs that are affected by ARM errata 1165522 or 1530923,
36 * we cannot trust stage-1 to be in a correct state at that
37 * point. Since we do not want to force a full load of the
38 * vcpu state, we prevent the EL1 page-table walker to
39 * allocate new TLBs. This is done by setting the EPD bits
40 * in the TCR_EL1 register. We also need to prevent it to
41 * allocate IPA->PA walks, so we enable the S1 MMU...
43 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
44 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
45 write_sysreg_el1(val, SYS_TCR);
46 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
48 write_sysreg_el1(val, SYS_SCTLR);
52 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
53 * most TLB operations target EL2/EL0. In order to affect the
54 * guest TLBs (EL1/EL0), we need to change one of these two
55 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
56 * let's flip TGE before executing the TLB operation.
58 * ARM erratum 1165522 requires some special handling (again),
59 * as we need to make sure both stages of translation are in
60 * place before clearing TGE. __load_stage2() already
61 * has an ISB in order to deal with this.
63 __load_stage2(mmu, mmu->arch);
64 val = read_sysreg(hcr_el2);
66 write_sysreg(val, hcr_el2);
70 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
73 * We're done with the TLB operation, let's restore the host's
76 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
79 /* ... and the stage-2 MMU context that we switched away from */
81 __load_stage2(cxt->mmu, cxt->mmu->arch);
83 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
84 /* Restore the registers to what they were */
85 write_sysreg_el1(cxt->tcr, SYS_TCR);
86 write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
89 local_irq_restore(cxt->flags);
92 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
93 phys_addr_t ipa, int level)
95 struct tlb_inv_context cxt;
99 /* Switch to requested VMID */
100 __tlb_switch_to_guest(mmu, &cxt);
103 * We could do so much better if we had the VA as well.
104 * Instead, we invalidate Stage-2 for this IPA, and the
105 * whole of Stage-1. Weep...
108 __tlbi_level(ipas2e1is, ipa, level);
111 * We have to ensure completion of the invalidation at Stage-2,
112 * since a table walk on another CPU could refill a TLB with a
113 * complete (S1 + S2) walk based on the old Stage-2 mapping if
114 * the Stage-1 invalidation happened first.
121 __tlb_switch_to_host(&cxt);
124 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
125 phys_addr_t ipa, int level)
127 struct tlb_inv_context cxt;
131 /* Switch to requested VMID */
132 __tlb_switch_to_guest(mmu, &cxt);
135 * We could do so much better if we had the VA as well.
136 * Instead, we invalidate Stage-2 for this IPA, and the
137 * whole of Stage-1. Weep...
140 __tlbi_level(ipas2e1, ipa, level);
143 * We have to ensure completion of the invalidation at Stage-2,
144 * since a table walk on another CPU could refill a TLB with a
145 * complete (S1 + S2) walk based on the old Stage-2 mapping if
146 * the Stage-1 invalidation happened first.
153 __tlb_switch_to_host(&cxt);
156 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
157 phys_addr_t start, unsigned long pages)
159 struct tlb_inv_context cxt;
160 unsigned long stride;
163 * Since the range of addresses may not be mapped at
164 * the same level, assume the worst case as PAGE_SIZE
167 start = round_down(start, stride);
171 /* Switch to requested VMID */
172 __tlb_switch_to_guest(mmu, &cxt);
174 __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
181 __tlb_switch_to_host(&cxt);
184 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
186 struct tlb_inv_context cxt;
190 /* Switch to requested VMID */
191 __tlb_switch_to_guest(mmu, &cxt);
193 __tlbi(vmalls12e1is);
197 __tlb_switch_to_host(&cxt);
200 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
202 struct tlb_inv_context cxt;
204 /* Switch to requested VMID */
205 __tlb_switch_to_guest(mmu, &cxt);
208 asm volatile("ic iallu");
212 __tlb_switch_to_host(&cxt);
215 void __kvm_flush_vm_context(void)
221 * VIPT and PIPT caches are not affected by VMID, so no maintenance
222 * is necessary across a VMID rollover.
224 * VPIPT caches constrain lookup and maintenance to the active VMID,
225 * so we need to invalidate lines with a stale VMID to avoid an ABA
226 * race after multiple rollovers.
229 if (icache_is_vpipt())
230 asm volatile("ic ialluis");