1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Google LLC
4 * Author: Will Deacon <will@kernel.org>
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
14 typedef u64 kvm_pte_t;
17 * struct kvm_pgtable - KVM page-table.
18 * @ia_bits: Maximum input address size, in bits.
19 * @start_level: Level at which the page-table walk starts.
20 * @pgd: Pointer to the first top-level entry of the page-table.
21 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
29 struct kvm_s2_mmu *mmu;
33 * enum kvm_pgtable_prot - Page-table permissions and attributes.
34 * @KVM_PGTABLE_PROT_X: Execute permission.
35 * @KVM_PGTABLE_PROT_W: Write permission.
36 * @KVM_PGTABLE_PROT_R: Read permission.
37 * @KVM_PGTABLE_PROT_DEVICE: Device attributes.
39 enum kvm_pgtable_prot {
40 KVM_PGTABLE_PROT_X = BIT(0),
41 KVM_PGTABLE_PROT_W = BIT(1),
42 KVM_PGTABLE_PROT_R = BIT(2),
44 KVM_PGTABLE_PROT_DEVICE = BIT(3),
47 #define PAGE_HYP (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
48 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
49 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R)
50 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
53 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
54 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
56 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their
58 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their
61 enum kvm_pgtable_walk_flags {
62 KVM_PGTABLE_WALK_LEAF = BIT(0),
63 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
64 KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
67 typedef int (*kvm_pgtable_visitor_fn_t)(u64 addr, u64 end, u32 level,
69 enum kvm_pgtable_walk_flags flag,
73 * struct kvm_pgtable_walker - Hook into a page-table walk.
74 * @cb: Callback function to invoke during the walk.
75 * @arg: Argument passed to the callback function.
76 * @flags: Bitwise-OR of flags to identify the entry types on which to
77 * invoke the callback function.
79 struct kvm_pgtable_walker {
80 const kvm_pgtable_visitor_fn_t cb;
82 const enum kvm_pgtable_walk_flags flags;
86 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
87 * @pgt: Uninitialised page-table structure to initialise.
88 * @va_bits: Maximum virtual address bits.
90 * Return: 0 on success, negative error code on failure.
92 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits);
95 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
96 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
98 * The page-table is assumed to be unreachable by any hardware walkers prior
99 * to freeing and therefore no TLB invalidation is performed.
101 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
104 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
105 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
106 * @addr: Virtual address at which to place the mapping.
107 * @size: Size of the mapping.
108 * @phys: Physical address of the memory to map.
109 * @prot: Permissions and attributes for the mapping.
111 * The offset of @addr within a page is ignored, @size is rounded-up to
112 * the next page boundary and @phys is rounded-down to the previous page
115 * If device attributes are not explicitly requested in @prot, then the
116 * mapping will be normal, cacheable. Attempts to install a new mapping
117 * for a virtual address that is already mapped will be rejected with an
118 * error and a WARN().
120 * Return: 0 on success, negative error code on failure.
122 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
123 enum kvm_pgtable_prot prot);
126 * kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
127 * @pgt: Uninitialised page-table structure to initialise.
128 * @kvm: KVM structure representing the guest virtual machine.
130 * Return: 0 on success, negative error code on failure.
132 int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm);
135 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
136 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
138 * The page-table is assumed to be unreachable by any hardware walkers prior
139 * to freeing and therefore no TLB invalidation is performed.
141 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
144 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
145 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
146 * @addr: Intermediate physical address at which to place the mapping.
147 * @size: Size of the mapping.
148 * @phys: Physical address of the memory to map.
149 * @prot: Permissions and attributes for the mapping.
150 * @mc: Cache of pre-allocated GFP_PGTABLE_USER memory from which to
151 * allocate page-table pages.
153 * The offset of @addr within a page is ignored, @size is rounded-up to
154 * the next page boundary and @phys is rounded-down to the previous page
157 * If device attributes are not explicitly requested in @prot, then the
158 * mapping will be normal, cacheable.
160 * Note that this function will both coalesce existing table entries and split
161 * existing block mappings, relying on page-faults to fault back areas outside
162 * of the new mapping lazily.
164 * Return: 0 on success, negative error code on failure.
166 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
167 u64 phys, enum kvm_pgtable_prot prot,
168 struct kvm_mmu_memory_cache *mc);
171 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
172 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
173 * @addr: Intermediate physical address from which to remove the mapping.
174 * @size: Size of the mapping.
176 * The offset of @addr within a page is ignored and @size is rounded-up to
177 * the next page boundary.
179 * TLB invalidation is performed for each page-table entry cleared during the
180 * unmapping operation and the reference count for the page-table page
181 * containing the cleared entry is decremented, with unreferenced pages being
182 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
183 * FWB is not supported by the CPU.
185 * Return: 0 on success, negative error code on failure.
187 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
190 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
191 * without TLB invalidation.
192 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
193 * @addr: Intermediate physical address from which to write-protect,
194 * @size: Size of the range.
196 * The offset of @addr within a page is ignored and @size is rounded-up to
197 * the next page boundary.
199 * Note that it is the caller's responsibility to invalidate the TLB after
200 * calling this function to ensure that the updated permissions are visible
203 * Return: 0 on success, negative error code on failure.
205 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
208 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
209 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
210 * @addr: Intermediate physical address to identify the page-table entry.
212 * The offset of @addr within a page is ignored.
214 * If there is a valid, leaf page-table entry used to translate @addr, then
215 * set the access flag in that entry.
217 * Return: The old page-table entry prior to setting the flag, 0 on failure.
219 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
222 * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
223 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
224 * @addr: Intermediate physical address to identify the page-table entry.
226 * The offset of @addr within a page is ignored.
228 * If there is a valid, leaf page-table entry used to translate @addr, then
229 * clear the access flag in that entry.
231 * Note that it is the caller's responsibility to invalidate the TLB after
232 * calling this function to ensure that the updated permissions are visible
235 * Return: The old page-table entry prior to clearing the flag, 0 on failure.
237 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
240 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
242 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
243 * @addr: Intermediate physical address to identify the page-table entry.
244 * @prot: Additional permissions to grant for the mapping.
246 * The offset of @addr within a page is ignored.
248 * If there is a valid, leaf page-table entry used to translate @addr, then
249 * relax the permissions in that entry according to the read, write and
250 * execute permissions specified by @prot. No permissions are removed, and
251 * TLB invalidation is performed after updating the entry.
253 * Return: 0 on success, negative error code on failure.
255 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
256 enum kvm_pgtable_prot prot);
259 * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
261 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
262 * @addr: Intermediate physical address to identify the page-table entry.
264 * The offset of @addr within a page is ignored.
266 * Return: True if the page-table entry has the access flag set, false otherwise.
268 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
271 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
272 * of Coherency for guest stage-2 address
274 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
275 * @addr: Intermediate physical address from which to flush.
276 * @size: Size of the range.
278 * The offset of @addr within a page is ignored and @size is rounded-up to
279 * the next page boundary.
281 * Return: 0 on success, negative error code on failure.
283 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
286 * kvm_pgtable_walk() - Walk a page-table.
287 * @pgt: Page-table structure initialised by kvm_pgtable_*_init().
288 * @addr: Input address for the start of the walk.
289 * @size: Size of the range to walk.
290 * @walker: Walker callback description.
292 * The offset of @addr within a page is ignored and @size is rounded-up to
293 * the next page boundary.
295 * The walker will walk the page-table entries corresponding to the input
296 * address range specified, visiting entries according to the walker flags.
297 * Invalid entries are treated as leaf entries. Leaf entries are reloaded
298 * after invoking the walker callback, allowing the walker to descend into
299 * a newly installed table.
301 * Returning a negative error code from the walker callback function will
302 * terminate the walk immediately with the same error code.
304 * Return: 0 on success, negative error code on failure.
306 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
307 struct kvm_pgtable_walker *walker);
309 #endif /* __ARM64_KVM_PGTABLE_H__ */