1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM64_KVM_MMU_H__
8 #define __ARM64_KVM_MMU_H__
11 #include <asm/memory.h>
13 #include <asm/cpufeature.h>
16 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
17 * "negative" addresses. This makes it impossible to directly share
18 * mappings with the kernel.
20 * Instead, give the HYP mode its own VA region at a fixed offset from
21 * the kernel by just masking the top bits (which are all ones for a
22 * kernel address). We need to find out how many bits to mask.
24 * We want to build a set of page tables that cover both parts of the
25 * idmap (the trampoline page used to initialize EL2), and our normal
26 * runtime VA space, at the same time.
28 * Given that the kernel uses VA_BITS for its entire address space,
29 * and that half of that space (VA_BITS - 1) is used for the linear
30 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
32 * The main question is "Within the VA_BITS space, does EL2 use the
33 * top or the bottom half of that space to shadow the kernel's linear
34 * mapping?". As we need to idmap the trampoline page, this is
35 * determined by the range in which this page lives.
37 * If the page is in the bottom half, we have to use the top half. If
38 * the page is in the top half, we have to use the bottom half:
40 * T = __pa_symbol(__hyp_idmap_text_start)
41 * if (T & BIT(VA_BITS - 1))
42 * HYP_VA_MIN = 0 //idmap in upper half
44 * HYP_VA_MIN = 1 << (VA_BITS - 1)
45 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
47 * When using VHE, there are no separate hyp mappings and all KVM
48 * functionality is already mapped as part of the main kernel
49 * mappings, and none of this applies in that case.
54 #include <asm/alternative.h>
57 * Convert a kernel VA into a HYP VA.
58 * reg: VA to be converted.
60 * The actual code generation takes place in kvm_update_va_mask, and
61 * the instructions below are only there to reserve the space and
62 * perform the register allocation (kvm_update_va_mask uses the
63 * specific registers encoded in the instructions).
65 .macro kern_hyp_va reg
66 alternative_cb kvm_update_va_mask
67 and \reg, \reg, #1 /* mask with va_mask */
68 ror \reg, \reg, #1 /* rotate to the first tag bit */
69 add \reg, \reg, #0 /* insert the low 12 bits of the tag */
70 add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
71 ror \reg, \reg, #63 /* rotate back */
76 * Convert a hypervisor VA to a PA
77 * reg: hypervisor address to be converted in place
78 * tmp: temporary register
80 .macro hyp_pa reg, tmp
81 ldr_l \tmp, hyp_physvirt_offset
86 * Convert a hypervisor VA to a kernel image address
87 * reg: hypervisor address to be converted in place
88 * tmp: temporary register
90 * The actual code generation takes place in kvm_get_kimage_voffset, and
91 * the instructions below are only there to reserve the space and
92 * perform the register allocation (kvm_get_kimage_voffset uses the
93 * specific registers encoded in the instructions).
95 .macro hyp_kimg_va reg, tmp
96 /* Convert hyp VA -> PA. */
99 /* Load kimage_voffset. */
100 alternative_cb kvm_get_kimage_voffset
102 movk \tmp, #0, lsl #16
103 movk \tmp, #0, lsl #32
104 movk \tmp, #0, lsl #48
107 /* Convert PA -> kimg VA. */
113 #include <linux/pgtable.h>
114 #include <asm/pgalloc.h>
115 #include <asm/cache.h>
116 #include <asm/cacheflush.h>
117 #include <asm/mmu_context.h>
119 void kvm_update_va_mask(struct alt_instr *alt,
120 __le32 *origptr, __le32 *updptr, int nr_inst);
121 void kvm_compute_layout(void);
122 void kvm_apply_hyp_relocations(void);
124 static __always_inline unsigned long __kern_hyp_va(unsigned long v)
126 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
129 "add %0, %0, #0, lsl 12\n"
136 #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
139 * We currently support using a VM-specified IPA size. For backward
140 * compatibility, the default IPA size is fixed to 40bits.
142 #define KVM_PHYS_SHIFT (40)
144 #define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
145 #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
146 #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
148 #include <asm/kvm_pgtable.h>
149 #include <asm/stage2_pgtable.h>
151 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
152 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
153 void __iomem **kaddr,
154 void __iomem **haddr);
155 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
157 void free_hyp_pgds(void);
159 void stage2_unmap_vm(struct kvm *kvm);
160 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
161 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
162 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
163 phys_addr_t pa, unsigned long size, bool writable);
165 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
167 phys_addr_t kvm_mmu_get_httbr(void);
168 phys_addr_t kvm_get_idmap_vector(void);
169 int kvm_mmu_init(void);
173 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
175 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
177 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
180 static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
182 void *va = page_address(pfn_to_page(pfn));
185 * With FWB, we ensure that the guest always accesses memory using
186 * cacheable attributes, and we don't have to clean to PoC when
187 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
188 * PoU is not required either in this case.
190 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
193 kvm_flush_dcache_to_poc(va, size);
196 static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
199 if (icache_is_aliasing()) {
200 /* any kind of VIPT cache */
201 __flush_icache_all();
202 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
203 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
204 void *va = page_address(pfn_to_page(pfn));
206 invalidate_icache_range((unsigned long)va,
207 (unsigned long)va + size);
211 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
212 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
214 static inline unsigned int kvm_get_vmid_bits(void)
216 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
218 return get_vmid_bits(reg);
222 * We are not in the kvm->srcu critical section most of the time, so we take
223 * the SRCU read lock here. Since we copy the data from the user page, we
224 * can immediately drop the lock again.
226 static inline int kvm_read_guest_lock(struct kvm *kvm,
227 gpa_t gpa, void *data, unsigned long len)
229 int srcu_idx = srcu_read_lock(&kvm->srcu);
230 int ret = kvm_read_guest(kvm, gpa, data, len);
232 srcu_read_unlock(&kvm->srcu, srcu_idx);
237 static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
238 const void *data, unsigned long len)
240 int srcu_idx = srcu_read_lock(&kvm->srcu);
241 int ret = kvm_write_guest(kvm, gpa, data, len);
243 srcu_read_unlock(&kvm->srcu, srcu_idx);
248 #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
250 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
252 struct kvm_vmid *vmid = &mmu->vmid;
253 u64 vmid_field, baddr;
254 u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
256 baddr = mmu->pgd_phys;
257 vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
258 return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
262 * Must be called from hyp code running at EL2 with an updated VTTBR
263 * and interrupts disabled.
265 static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
267 write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
268 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
271 * ARM errata 1165522 and 1530923 require the actual execution of the
272 * above before we can switch to the EL1/EL0 translation regime used by
275 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
278 #endif /* __ASSEMBLY__ */
279 #endif /* __ARM64_KVM_MMU_H__ */