1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/irqflags.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/tlbflush.h>
13 struct tlb_inv_context {
19 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
20 struct tlb_inv_context *cxt)
24 local_irq_save(cxt->flags);
26 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
28 * For CPUs that are affected by ARM errata 1165522 or 1530923,
29 * we cannot trust stage-1 to be in a correct state at that
30 * point. Since we do not want to force a full load of the
31 * vcpu state, we prevent the EL1 page-table walker to
32 * allocate new TLBs. This is done by setting the EPD bits
33 * in the TCR_EL1 register. We also need to prevent it to
34 * allocate IPA->PA walks, so we enable the S1 MMU...
36 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
37 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
38 write_sysreg_el1(val, SYS_TCR);
39 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
41 write_sysreg_el1(val, SYS_SCTLR);
45 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
46 * most TLB operations target EL2/EL0. In order to affect the
47 * guest TLBs (EL1/EL0), we need to change one of these two
48 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
49 * let's flip TGE before executing the TLB operation.
51 * ARM erratum 1165522 requires some special handling (again),
52 * as we need to make sure both stages of translation are in
53 * place before clearing TGE. __load_guest_stage2() already
54 * has an ISB in order to deal with this.
56 __load_guest_stage2(mmu);
57 val = read_sysreg(hcr_el2);
59 write_sysreg(val, hcr_el2);
63 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
66 * We're done with the TLB operation, let's restore the host's
69 write_sysreg(0, vttbr_el2);
70 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
73 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
74 /* Restore the registers to what they were */
75 write_sysreg_el1(cxt->tcr, SYS_TCR);
76 write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
79 local_irq_restore(cxt->flags);
82 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
83 phys_addr_t ipa, int level)
85 struct tlb_inv_context cxt;
89 /* Switch to requested VMID */
90 __tlb_switch_to_guest(mmu, &cxt);
93 * We could do so much better if we had the VA as well.
94 * Instead, we invalidate Stage-2 for this IPA, and the
95 * whole of Stage-1. Weep...
98 __tlbi_level(ipas2e1is, ipa, level);
101 * We have to ensure completion of the invalidation at Stage-2,
102 * since a table walk on another CPU could refill a TLB with a
103 * complete (S1 + S2) walk based on the old Stage-2 mapping if
104 * the Stage-1 invalidation happened first.
111 __tlb_switch_to_host(&cxt);
114 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
116 struct tlb_inv_context cxt;
120 /* Switch to requested VMID */
121 __tlb_switch_to_guest(mmu, &cxt);
123 __tlbi(vmalls12e1is);
127 __tlb_switch_to_host(&cxt);
130 void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
132 struct tlb_inv_context cxt;
134 /* Switch to requested VMID */
135 __tlb_switch_to_guest(mmu, &cxt);
141 __tlb_switch_to_host(&cxt);
144 void __kvm_flush_vm_context(void)
150 * VIPT and PIPT caches are not affected by VMID, so no maintenance
151 * is necessary across a VMID rollover.
153 * VPIPT caches constrain lookup and maintenance to the active VMID,
154 * so we need to invalidate lines with a stale VMID to avoid an ABA
155 * race after multiple rollovers.
158 if (icache_is_vpipt())
159 asm volatile("ic ialluis");