Merge tag '5.15-rc-first-ksmbd-merge' of git://git.samba.org/ksmbd
[linux-2.6-microblaze.git] / arch / arm64 / kvm / hyp / pgtable.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4  * No bombay mix was harmed in the writing of this file.
5  *
6  * Copyright (C) 2020 Google LLC
7  * Author: Will Deacon <will@kernel.org>
8  */
9
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14 #define KVM_PTE_VALID                   BIT(0)
15
16 #define KVM_PTE_TYPE                    BIT(1)
17 #define KVM_PTE_TYPE_BLOCK              0
18 #define KVM_PTE_TYPE_PAGE               1
19 #define KVM_PTE_TYPE_TABLE              1
20
21 #define KVM_PTE_ADDR_MASK               GENMASK(47, PAGE_SHIFT)
22 #define KVM_PTE_ADDR_51_48              GENMASK(15, 12)
23
24 #define KVM_PTE_LEAF_ATTR_LO            GENMASK(11, 2)
25
26 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
27 #define KVM_PTE_LEAF_ATTR_LO_S1_AP      GENMASK(7, 6)
28 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO   3
29 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW   1
30 #define KVM_PTE_LEAF_ATTR_LO_S1_SH      GENMASK(9, 8)
31 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS   3
32 #define KVM_PTE_LEAF_ATTR_LO_S1_AF      BIT(10)
33
34 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
35 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R  BIT(6)
36 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W  BIT(7)
37 #define KVM_PTE_LEAF_ATTR_LO_S2_SH      GENMASK(9, 8)
38 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS   3
39 #define KVM_PTE_LEAF_ATTR_LO_S2_AF      BIT(10)
40
41 #define KVM_PTE_LEAF_ATTR_HI            GENMASK(63, 51)
42
43 #define KVM_PTE_LEAF_ATTR_HI_S1_XN      BIT(54)
44
45 #define KVM_PTE_LEAF_ATTR_HI_S2_XN      BIT(54)
46
47 #define KVM_PTE_LEAF_ATTR_S2_PERMS      (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
48                                          KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
49                                          KVM_PTE_LEAF_ATTR_HI_S2_XN)
50
51 #define KVM_PTE_LEAF_ATTR_S2_IGNORED    GENMASK(58, 55)
52
53 #define KVM_INVALID_PTE_OWNER_MASK      GENMASK(63, 56)
54 #define KVM_MAX_OWNER_ID                1
55
56 struct kvm_pgtable_walk_data {
57         struct kvm_pgtable              *pgt;
58         struct kvm_pgtable_walker       *walker;
59
60         u64                             addr;
61         u64                             end;
62 };
63
64 static u64 kvm_granule_shift(u32 level)
65 {
66         /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
67         return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
68 }
69
70 static u64 kvm_granule_size(u32 level)
71 {
72         return BIT(kvm_granule_shift(level));
73 }
74
75 #define KVM_PHYS_INVALID (-1ULL)
76
77 static bool kvm_phys_is_valid(u64 phys)
78 {
79         return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
80 }
81
82 static bool kvm_level_supports_block_mapping(u32 level)
83 {
84         /*
85          * Reject invalid block mappings and don't bother with 4TB mappings for
86          * 52-bit PAs.
87          */
88         return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
89 }
90
91 static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
92 {
93         u64 granule = kvm_granule_size(level);
94
95         if (!kvm_level_supports_block_mapping(level))
96                 return false;
97
98         if (granule > (end - addr))
99                 return false;
100
101         if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
102                 return false;
103
104         return IS_ALIGNED(addr, granule);
105 }
106
107 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
108 {
109         u64 shift = kvm_granule_shift(level);
110         u64 mask = BIT(PAGE_SHIFT - 3) - 1;
111
112         return (data->addr >> shift) & mask;
113 }
114
115 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
116 {
117         u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
118         u64 mask = BIT(pgt->ia_bits) - 1;
119
120         return (addr & mask) >> shift;
121 }
122
123 static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
124 {
125         return __kvm_pgd_page_idx(data->pgt, data->addr);
126 }
127
128 static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
129 {
130         struct kvm_pgtable pgt = {
131                 .ia_bits        = ia_bits,
132                 .start_level    = start_level,
133         };
134
135         return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
136 }
137
138 static bool kvm_pte_valid(kvm_pte_t pte)
139 {
140         return pte & KVM_PTE_VALID;
141 }
142
143 static bool kvm_pte_table(kvm_pte_t pte, u32 level)
144 {
145         if (level == KVM_PGTABLE_MAX_LEVELS - 1)
146                 return false;
147
148         if (!kvm_pte_valid(pte))
149                 return false;
150
151         return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
152 }
153
154 static u64 kvm_pte_to_phys(kvm_pte_t pte)
155 {
156         u64 pa = pte & KVM_PTE_ADDR_MASK;
157
158         if (PAGE_SHIFT == 16)
159                 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
160
161         return pa;
162 }
163
164 static kvm_pte_t kvm_phys_to_pte(u64 pa)
165 {
166         kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
167
168         if (PAGE_SHIFT == 16)
169                 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
170
171         return pte;
172 }
173
174 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
175 {
176         return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
177 }
178
179 static void kvm_clear_pte(kvm_pte_t *ptep)
180 {
181         WRITE_ONCE(*ptep, 0);
182 }
183
184 static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
185                               struct kvm_pgtable_mm_ops *mm_ops)
186 {
187         kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
188
189         pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
190         pte |= KVM_PTE_VALID;
191
192         WARN_ON(kvm_pte_valid(old));
193         smp_store_release(ptep, pte);
194 }
195
196 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
197 {
198         kvm_pte_t pte = kvm_phys_to_pte(pa);
199         u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
200                                                            KVM_PTE_TYPE_BLOCK;
201
202         pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
203         pte |= FIELD_PREP(KVM_PTE_TYPE, type);
204         pte |= KVM_PTE_VALID;
205
206         return pte;
207 }
208
209 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
210 {
211         return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
212 }
213
214 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
215                                   u32 level, kvm_pte_t *ptep,
216                                   enum kvm_pgtable_walk_flags flag)
217 {
218         struct kvm_pgtable_walker *walker = data->walker;
219         return walker->cb(addr, data->end, level, ptep, flag, walker->arg);
220 }
221
222 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
223                               kvm_pte_t *pgtable, u32 level);
224
225 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
226                                       kvm_pte_t *ptep, u32 level)
227 {
228         int ret = 0;
229         u64 addr = data->addr;
230         kvm_pte_t *childp, pte = *ptep;
231         bool table = kvm_pte_table(pte, level);
232         enum kvm_pgtable_walk_flags flags = data->walker->flags;
233
234         if (table && (flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
235                 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
236                                              KVM_PGTABLE_WALK_TABLE_PRE);
237         }
238
239         if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
240                 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
241                                              KVM_PGTABLE_WALK_LEAF);
242                 pte = *ptep;
243                 table = kvm_pte_table(pte, level);
244         }
245
246         if (ret)
247                 goto out;
248
249         if (!table) {
250                 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
251                 data->addr += kvm_granule_size(level);
252                 goto out;
253         }
254
255         childp = kvm_pte_follow(pte, data->pgt->mm_ops);
256         ret = __kvm_pgtable_walk(data, childp, level + 1);
257         if (ret)
258                 goto out;
259
260         if (flags & KVM_PGTABLE_WALK_TABLE_POST) {
261                 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
262                                              KVM_PGTABLE_WALK_TABLE_POST);
263         }
264
265 out:
266         return ret;
267 }
268
269 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
270                               kvm_pte_t *pgtable, u32 level)
271 {
272         u32 idx;
273         int ret = 0;
274
275         if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
276                 return -EINVAL;
277
278         for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
279                 kvm_pte_t *ptep = &pgtable[idx];
280
281                 if (data->addr >= data->end)
282                         break;
283
284                 ret = __kvm_pgtable_visit(data, ptep, level);
285                 if (ret)
286                         break;
287         }
288
289         return ret;
290 }
291
292 static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
293 {
294         u32 idx;
295         int ret = 0;
296         struct kvm_pgtable *pgt = data->pgt;
297         u64 limit = BIT(pgt->ia_bits);
298
299         if (data->addr > limit || data->end > limit)
300                 return -ERANGE;
301
302         if (!pgt->pgd)
303                 return -EINVAL;
304
305         for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
306                 kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
307
308                 ret = __kvm_pgtable_walk(data, ptep, pgt->start_level);
309                 if (ret)
310                         break;
311         }
312
313         return ret;
314 }
315
316 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
317                      struct kvm_pgtable_walker *walker)
318 {
319         struct kvm_pgtable_walk_data walk_data = {
320                 .pgt    = pgt,
321                 .addr   = ALIGN_DOWN(addr, PAGE_SIZE),
322                 .end    = PAGE_ALIGN(walk_data.addr + size),
323                 .walker = walker,
324         };
325
326         return _kvm_pgtable_walk(&walk_data);
327 }
328
329 struct hyp_map_data {
330         u64                             phys;
331         kvm_pte_t                       attr;
332         struct kvm_pgtable_mm_ops       *mm_ops;
333 };
334
335 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
336 {
337         bool device = prot & KVM_PGTABLE_PROT_DEVICE;
338         u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
339         kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
340         u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
341         u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
342                                                KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
343
344         if (!(prot & KVM_PGTABLE_PROT_R))
345                 return -EINVAL;
346
347         if (prot & KVM_PGTABLE_PROT_X) {
348                 if (prot & KVM_PGTABLE_PROT_W)
349                         return -EINVAL;
350
351                 if (device)
352                         return -EINVAL;
353         } else {
354                 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
355         }
356
357         attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
358         attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
359         attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
360         *ptep = attr;
361
362         return 0;
363 }
364
365 static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
366                                     kvm_pte_t *ptep, struct hyp_map_data *data)
367 {
368         kvm_pte_t new, old = *ptep;
369         u64 granule = kvm_granule_size(level), phys = data->phys;
370
371         if (!kvm_block_mapping_supported(addr, end, phys, level))
372                 return false;
373
374         /* Tolerate KVM recreating the exact same mapping */
375         new = kvm_init_valid_leaf_pte(phys, data->attr, level);
376         if (old != new && !WARN_ON(kvm_pte_valid(old)))
377                 smp_store_release(ptep, new);
378
379         data->phys += granule;
380         return true;
381 }
382
383 static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
384                           enum kvm_pgtable_walk_flags flag, void * const arg)
385 {
386         kvm_pte_t *childp;
387         struct hyp_map_data *data = arg;
388         struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
389
390         if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
391                 return 0;
392
393         if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
394                 return -EINVAL;
395
396         childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
397         if (!childp)
398                 return -ENOMEM;
399
400         kvm_set_table_pte(ptep, childp, mm_ops);
401         return 0;
402 }
403
404 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
405                         enum kvm_pgtable_prot prot)
406 {
407         int ret;
408         struct hyp_map_data map_data = {
409                 .phys   = ALIGN_DOWN(phys, PAGE_SIZE),
410                 .mm_ops = pgt->mm_ops,
411         };
412         struct kvm_pgtable_walker walker = {
413                 .cb     = hyp_map_walker,
414                 .flags  = KVM_PGTABLE_WALK_LEAF,
415                 .arg    = &map_data,
416         };
417
418         ret = hyp_set_prot_attr(prot, &map_data.attr);
419         if (ret)
420                 return ret;
421
422         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
423         dsb(ishst);
424         isb();
425         return ret;
426 }
427
428 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
429                          struct kvm_pgtable_mm_ops *mm_ops)
430 {
431         u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
432
433         pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
434         if (!pgt->pgd)
435                 return -ENOMEM;
436
437         pgt->ia_bits            = va_bits;
438         pgt->start_level        = KVM_PGTABLE_MAX_LEVELS - levels;
439         pgt->mm_ops             = mm_ops;
440         pgt->mmu                = NULL;
441         return 0;
442 }
443
444 static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
445                            enum kvm_pgtable_walk_flags flag, void * const arg)
446 {
447         struct kvm_pgtable_mm_ops *mm_ops = arg;
448
449         mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
450         return 0;
451 }
452
453 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
454 {
455         struct kvm_pgtable_walker walker = {
456                 .cb     = hyp_free_walker,
457                 .flags  = KVM_PGTABLE_WALK_TABLE_POST,
458                 .arg    = pgt->mm_ops,
459         };
460
461         WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
462         pgt->mm_ops->put_page(pgt->pgd);
463         pgt->pgd = NULL;
464 }
465
466 struct stage2_map_data {
467         u64                             phys;
468         kvm_pte_t                       attr;
469         u8                              owner_id;
470
471         kvm_pte_t                       *anchor;
472         kvm_pte_t                       *childp;
473
474         struct kvm_s2_mmu               *mmu;
475         void                            *memcache;
476
477         struct kvm_pgtable_mm_ops       *mm_ops;
478 };
479
480 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
481 {
482         u64 vtcr = VTCR_EL2_FLAGS;
483         u8 lvls;
484
485         vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
486         vtcr |= VTCR_EL2_T0SZ(phys_shift);
487         /*
488          * Use a minimum 2 level page table to prevent splitting
489          * host PMD huge pages at stage2.
490          */
491         lvls = stage2_pgtable_levels(phys_shift);
492         if (lvls < 2)
493                 lvls = 2;
494         vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
495
496         /*
497          * Enable the Hardware Access Flag management, unconditionally
498          * on all CPUs. The features is RES0 on CPUs without the support
499          * and must be ignored by the CPUs.
500          */
501         vtcr |= VTCR_EL2_HA;
502
503         /* Set the vmid bits */
504         vtcr |= (get_vmid_bits(mmfr1) == 16) ?
505                 VTCR_EL2_VS_16BIT :
506                 VTCR_EL2_VS_8BIT;
507
508         return vtcr;
509 }
510
511 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
512 {
513         if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
514                 return false;
515
516         return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
517 }
518
519 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
520
521 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
522                                 kvm_pte_t *ptep)
523 {
524         bool device = prot & KVM_PGTABLE_PROT_DEVICE;
525         kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
526                             KVM_S2_MEMATTR(pgt, NORMAL);
527         u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
528
529         if (!(prot & KVM_PGTABLE_PROT_X))
530                 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
531         else if (device)
532                 return -EINVAL;
533
534         if (prot & KVM_PGTABLE_PROT_R)
535                 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
536
537         if (prot & KVM_PGTABLE_PROT_W)
538                 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
539
540         attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
541         attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
542         *ptep = attr;
543
544         return 0;
545 }
546
547 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
548 {
549         if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
550                 return true;
551
552         return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
553 }
554
555 static bool stage2_pte_is_counted(kvm_pte_t pte)
556 {
557         /*
558          * The refcount tracks valid entries as well as invalid entries if they
559          * encode ownership of a page to another entity than the page-table
560          * owner, whose id is 0.
561          */
562         return !!pte;
563 }
564
565 static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
566                            u32 level, struct kvm_pgtable_mm_ops *mm_ops)
567 {
568         /*
569          * Clear the existing PTE, and perform break-before-make with
570          * TLB maintenance if it was valid.
571          */
572         if (kvm_pte_valid(*ptep)) {
573                 kvm_clear_pte(ptep);
574                 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
575         }
576
577         mm_ops->put_page(ptep);
578 }
579
580 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
581 {
582         u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
583         return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
584 }
585
586 static bool stage2_pte_executable(kvm_pte_t pte)
587 {
588         return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
589 }
590
591 static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
592                                       kvm_pte_t *ptep,
593                                       struct stage2_map_data *data)
594 {
595         kvm_pte_t new, old = *ptep;
596         u64 granule = kvm_granule_size(level), phys = data->phys;
597         struct kvm_pgtable *pgt = data->mmu->pgt;
598         struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
599
600         if (!kvm_block_mapping_supported(addr, end, phys, level))
601                 return -E2BIG;
602
603         if (kvm_phys_is_valid(phys))
604                 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
605         else
606                 new = kvm_init_invalid_leaf_owner(data->owner_id);
607
608         if (stage2_pte_is_counted(old)) {
609                 /*
610                  * Skip updating the PTE if we are trying to recreate the exact
611                  * same mapping or only change the access permissions. Instead,
612                  * the vCPU will exit one more time from guest if still needed
613                  * and then go through the path of relaxing permissions.
614                  */
615                 if (!stage2_pte_needs_update(old, new))
616                         return -EAGAIN;
617
618                 stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
619         }
620
621         /* Perform CMOs before installation of the guest stage-2 PTE */
622         if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
623                 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
624                                                 granule);
625
626         if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
627                 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
628
629         smp_store_release(ptep, new);
630         if (stage2_pte_is_counted(new))
631                 mm_ops->get_page(ptep);
632         if (kvm_phys_is_valid(phys))
633                 data->phys += granule;
634         return 0;
635 }
636
637 static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
638                                      kvm_pte_t *ptep,
639                                      struct stage2_map_data *data)
640 {
641         if (data->anchor)
642                 return 0;
643
644         if (!kvm_block_mapping_supported(addr, end, data->phys, level))
645                 return 0;
646
647         data->childp = kvm_pte_follow(*ptep, data->mm_ops);
648         kvm_clear_pte(ptep);
649
650         /*
651          * Invalidate the whole stage-2, as we may have numerous leaf
652          * entries below us which would otherwise need invalidating
653          * individually.
654          */
655         kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
656         data->anchor = ptep;
657         return 0;
658 }
659
660 static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
661                                 struct stage2_map_data *data)
662 {
663         struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
664         kvm_pte_t *childp, pte = *ptep;
665         int ret;
666
667         if (data->anchor) {
668                 if (stage2_pte_is_counted(pte))
669                         mm_ops->put_page(ptep);
670
671                 return 0;
672         }
673
674         ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
675         if (ret != -E2BIG)
676                 return ret;
677
678         if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
679                 return -EINVAL;
680
681         if (!data->memcache)
682                 return -ENOMEM;
683
684         childp = mm_ops->zalloc_page(data->memcache);
685         if (!childp)
686                 return -ENOMEM;
687
688         /*
689          * If we've run into an existing block mapping then replace it with
690          * a table. Accesses beyond 'end' that fall within the new table
691          * will be mapped lazily.
692          */
693         if (stage2_pte_is_counted(pte))
694                 stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
695
696         kvm_set_table_pte(ptep, childp, mm_ops);
697         mm_ops->get_page(ptep);
698
699         return 0;
700 }
701
702 static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
703                                       kvm_pte_t *ptep,
704                                       struct stage2_map_data *data)
705 {
706         struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
707         kvm_pte_t *childp;
708         int ret = 0;
709
710         if (!data->anchor)
711                 return 0;
712
713         if (data->anchor == ptep) {
714                 childp = data->childp;
715                 data->anchor = NULL;
716                 data->childp = NULL;
717                 ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
718         } else {
719                 childp = kvm_pte_follow(*ptep, mm_ops);
720         }
721
722         mm_ops->put_page(childp);
723         mm_ops->put_page(ptep);
724
725         return ret;
726 }
727
728 /*
729  * This is a little fiddly, as we use all three of the walk flags. The idea
730  * is that the TABLE_PRE callback runs for table entries on the way down,
731  * looking for table entries which we could conceivably replace with a
732  * block entry for this mapping. If it finds one, then it sets the 'anchor'
733  * field in 'struct stage2_map_data' to point at the table entry, before
734  * clearing the entry to zero and descending into the now detached table.
735  *
736  * The behaviour of the LEAF callback then depends on whether or not the
737  * anchor has been set. If not, then we're not using a block mapping higher
738  * up the table and we perform the mapping at the existing leaves instead.
739  * If, on the other hand, the anchor _is_ set, then we drop references to
740  * all valid leaves so that the pages beneath the anchor can be freed.
741  *
742  * Finally, the TABLE_POST callback does nothing if the anchor has not
743  * been set, but otherwise frees the page-table pages while walking back up
744  * the page-table, installing the block entry when it revisits the anchor
745  * pointer and clearing the anchor to NULL.
746  */
747 static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
748                              enum kvm_pgtable_walk_flags flag, void * const arg)
749 {
750         struct stage2_map_data *data = arg;
751
752         switch (flag) {
753         case KVM_PGTABLE_WALK_TABLE_PRE:
754                 return stage2_map_walk_table_pre(addr, end, level, ptep, data);
755         case KVM_PGTABLE_WALK_LEAF:
756                 return stage2_map_walk_leaf(addr, end, level, ptep, data);
757         case KVM_PGTABLE_WALK_TABLE_POST:
758                 return stage2_map_walk_table_post(addr, end, level, ptep, data);
759         }
760
761         return -EINVAL;
762 }
763
764 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
765                            u64 phys, enum kvm_pgtable_prot prot,
766                            void *mc)
767 {
768         int ret;
769         struct stage2_map_data map_data = {
770                 .phys           = ALIGN_DOWN(phys, PAGE_SIZE),
771                 .mmu            = pgt->mmu,
772                 .memcache       = mc,
773                 .mm_ops         = pgt->mm_ops,
774         };
775         struct kvm_pgtable_walker walker = {
776                 .cb             = stage2_map_walker,
777                 .flags          = KVM_PGTABLE_WALK_TABLE_PRE |
778                                   KVM_PGTABLE_WALK_LEAF |
779                                   KVM_PGTABLE_WALK_TABLE_POST,
780                 .arg            = &map_data,
781         };
782
783         if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
784                 return -EINVAL;
785
786         ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
787         if (ret)
788                 return ret;
789
790         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
791         dsb(ishst);
792         return ret;
793 }
794
795 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
796                                  void *mc, u8 owner_id)
797 {
798         int ret;
799         struct stage2_map_data map_data = {
800                 .phys           = KVM_PHYS_INVALID,
801                 .mmu            = pgt->mmu,
802                 .memcache       = mc,
803                 .mm_ops         = pgt->mm_ops,
804                 .owner_id       = owner_id,
805         };
806         struct kvm_pgtable_walker walker = {
807                 .cb             = stage2_map_walker,
808                 .flags          = KVM_PGTABLE_WALK_TABLE_PRE |
809                                   KVM_PGTABLE_WALK_LEAF |
810                                   KVM_PGTABLE_WALK_TABLE_POST,
811                 .arg            = &map_data,
812         };
813
814         if (owner_id > KVM_MAX_OWNER_ID)
815                 return -EINVAL;
816
817         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
818         return ret;
819 }
820
821 static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
822                                enum kvm_pgtable_walk_flags flag,
823                                void * const arg)
824 {
825         struct kvm_pgtable *pgt = arg;
826         struct kvm_s2_mmu *mmu = pgt->mmu;
827         struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
828         kvm_pte_t pte = *ptep, *childp = NULL;
829         bool need_flush = false;
830
831         if (!kvm_pte_valid(pte)) {
832                 if (stage2_pte_is_counted(pte)) {
833                         kvm_clear_pte(ptep);
834                         mm_ops->put_page(ptep);
835                 }
836                 return 0;
837         }
838
839         if (kvm_pte_table(pte, level)) {
840                 childp = kvm_pte_follow(pte, mm_ops);
841
842                 if (mm_ops->page_count(childp) != 1)
843                         return 0;
844         } else if (stage2_pte_cacheable(pgt, pte)) {
845                 need_flush = !stage2_has_fwb(pgt);
846         }
847
848         /*
849          * This is similar to the map() path in that we unmap the entire
850          * block entry and rely on the remaining portions being faulted
851          * back lazily.
852          */
853         stage2_put_pte(ptep, mmu, addr, level, mm_ops);
854
855         if (need_flush) {
856                 kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
857
858                 dcache_clean_inval_poc((unsigned long)pte_follow,
859                                     (unsigned long)pte_follow +
860                                             kvm_granule_size(level));
861         }
862
863         if (childp)
864                 mm_ops->put_page(childp);
865
866         return 0;
867 }
868
869 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
870 {
871         struct kvm_pgtable_walker walker = {
872                 .cb     = stage2_unmap_walker,
873                 .arg    = pgt,
874                 .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
875         };
876
877         return kvm_pgtable_walk(pgt, addr, size, &walker);
878 }
879
880 struct stage2_attr_data {
881         kvm_pte_t                       attr_set;
882         kvm_pte_t                       attr_clr;
883         kvm_pte_t                       pte;
884         u32                             level;
885         struct kvm_pgtable_mm_ops       *mm_ops;
886 };
887
888 static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
889                               enum kvm_pgtable_walk_flags flag,
890                               void * const arg)
891 {
892         kvm_pte_t pte = *ptep;
893         struct stage2_attr_data *data = arg;
894         struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
895
896         if (!kvm_pte_valid(pte))
897                 return 0;
898
899         data->level = level;
900         data->pte = pte;
901         pte &= ~data->attr_clr;
902         pte |= data->attr_set;
903
904         /*
905          * We may race with the CPU trying to set the access flag here,
906          * but worst-case the access flag update gets lost and will be
907          * set on the next access instead.
908          */
909         if (data->pte != pte) {
910                 /*
911                  * Invalidate instruction cache before updating the guest
912                  * stage-2 PTE if we are going to add executable permission.
913                  */
914                 if (mm_ops->icache_inval_pou &&
915                     stage2_pte_executable(pte) && !stage2_pte_executable(*ptep))
916                         mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
917                                                   kvm_granule_size(level));
918                 WRITE_ONCE(*ptep, pte);
919         }
920
921         return 0;
922 }
923
924 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
925                                     u64 size, kvm_pte_t attr_set,
926                                     kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
927                                     u32 *level)
928 {
929         int ret;
930         kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
931         struct stage2_attr_data data = {
932                 .attr_set       = attr_set & attr_mask,
933                 .attr_clr       = attr_clr & attr_mask,
934                 .mm_ops         = pgt->mm_ops,
935         };
936         struct kvm_pgtable_walker walker = {
937                 .cb             = stage2_attr_walker,
938                 .arg            = &data,
939                 .flags          = KVM_PGTABLE_WALK_LEAF,
940         };
941
942         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
943         if (ret)
944                 return ret;
945
946         if (orig_pte)
947                 *orig_pte = data.pte;
948
949         if (level)
950                 *level = data.level;
951         return 0;
952 }
953
954 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
955 {
956         return stage2_update_leaf_attrs(pgt, addr, size, 0,
957                                         KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
958                                         NULL, NULL);
959 }
960
961 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
962 {
963         kvm_pte_t pte = 0;
964         stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
965                                  &pte, NULL);
966         dsb(ishst);
967         return pte;
968 }
969
970 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
971 {
972         kvm_pte_t pte = 0;
973         stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
974                                  &pte, NULL);
975         /*
976          * "But where's the TLBI?!", you scream.
977          * "Over in the core code", I sigh.
978          *
979          * See the '->clear_flush_young()' callback on the KVM mmu notifier.
980          */
981         return pte;
982 }
983
984 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
985 {
986         kvm_pte_t pte = 0;
987         stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL);
988         return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
989 }
990
991 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
992                                    enum kvm_pgtable_prot prot)
993 {
994         int ret;
995         u32 level;
996         kvm_pte_t set = 0, clr = 0;
997
998         if (prot & KVM_PGTABLE_PROT_R)
999                 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1000
1001         if (prot & KVM_PGTABLE_PROT_W)
1002                 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1003
1004         if (prot & KVM_PGTABLE_PROT_X)
1005                 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
1006
1007         ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level);
1008         if (!ret)
1009                 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
1010         return ret;
1011 }
1012
1013 static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
1014                                enum kvm_pgtable_walk_flags flag,
1015                                void * const arg)
1016 {
1017         struct kvm_pgtable *pgt = arg;
1018         struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1019         kvm_pte_t pte = *ptep;
1020         kvm_pte_t *pte_follow;
1021
1022         if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
1023                 return 0;
1024
1025         pte_follow = kvm_pte_follow(pte, mm_ops);
1026         dcache_clean_inval_poc((unsigned long)pte_follow,
1027                             (unsigned long)pte_follow +
1028                                     kvm_granule_size(level));
1029         return 0;
1030 }
1031
1032 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1033 {
1034         struct kvm_pgtable_walker walker = {
1035                 .cb     = stage2_flush_walker,
1036                 .flags  = KVM_PGTABLE_WALK_LEAF,
1037                 .arg    = pgt,
1038         };
1039
1040         if (stage2_has_fwb(pgt))
1041                 return 0;
1042
1043         return kvm_pgtable_walk(pgt, addr, size, &walker);
1044 }
1045
1046 int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
1047                                   struct kvm_pgtable_mm_ops *mm_ops,
1048                                   enum kvm_pgtable_stage2_flags flags)
1049 {
1050         size_t pgd_sz;
1051         u64 vtcr = arch->vtcr;
1052         u32 ia_bits = VTCR_EL2_IPA(vtcr);
1053         u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1054         u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1055
1056         pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1057         pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz);
1058         if (!pgt->pgd)
1059                 return -ENOMEM;
1060
1061         pgt->ia_bits            = ia_bits;
1062         pgt->start_level        = start_level;
1063         pgt->mm_ops             = mm_ops;
1064         pgt->mmu                = &arch->mmu;
1065         pgt->flags              = flags;
1066
1067         /* Ensure zeroed PGD pages are visible to the hardware walker */
1068         dsb(ishst);
1069         return 0;
1070 }
1071
1072 static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
1073                               enum kvm_pgtable_walk_flags flag,
1074                               void * const arg)
1075 {
1076         struct kvm_pgtable_mm_ops *mm_ops = arg;
1077         kvm_pte_t pte = *ptep;
1078
1079         if (!stage2_pte_is_counted(pte))
1080                 return 0;
1081
1082         mm_ops->put_page(ptep);
1083
1084         if (kvm_pte_table(pte, level))
1085                 mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
1086
1087         return 0;
1088 }
1089
1090 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1091 {
1092         size_t pgd_sz;
1093         struct kvm_pgtable_walker walker = {
1094                 .cb     = stage2_free_walker,
1095                 .flags  = KVM_PGTABLE_WALK_LEAF |
1096                           KVM_PGTABLE_WALK_TABLE_POST,
1097                 .arg    = pgt->mm_ops,
1098         };
1099
1100         WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1101         pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1102         pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
1103         pgt->pgd = NULL;
1104 }
1105
1106 #define KVM_PTE_LEAF_S2_COMPAT_MASK     (KVM_PTE_LEAF_ATTR_S2_PERMS | \
1107                                          KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR | \
1108                                          KVM_PTE_LEAF_ATTR_S2_IGNORED)
1109
1110 static int stage2_check_permission_walker(u64 addr, u64 end, u32 level,
1111                                           kvm_pte_t *ptep,
1112                                           enum kvm_pgtable_walk_flags flag,
1113                                           void * const arg)
1114 {
1115         kvm_pte_t old_attr, pte = *ptep, *new_attr = arg;
1116
1117         /*
1118          * Compatible mappings are either invalid and owned by the page-table
1119          * owner (whose id is 0), or valid with matching permission attributes.
1120          */
1121         if (kvm_pte_valid(pte)) {
1122                 old_attr = pte & KVM_PTE_LEAF_S2_COMPAT_MASK;
1123                 if (old_attr != *new_attr)
1124                         return -EEXIST;
1125         } else if (pte) {
1126                 return -EEXIST;
1127         }
1128
1129         return 0;
1130 }
1131
1132 int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
1133                                   enum kvm_pgtable_prot prot,
1134                                   struct kvm_mem_range *range)
1135 {
1136         kvm_pte_t attr;
1137         struct kvm_pgtable_walker check_perm_walker = {
1138                 .cb             = stage2_check_permission_walker,
1139                 .flags          = KVM_PGTABLE_WALK_LEAF,
1140                 .arg            = &attr,
1141         };
1142         u64 granule, start, end;
1143         u32 level;
1144         int ret;
1145
1146         ret = stage2_set_prot_attr(pgt, prot, &attr);
1147         if (ret)
1148                 return ret;
1149         attr &= KVM_PTE_LEAF_S2_COMPAT_MASK;
1150
1151         for (level = pgt->start_level; level < KVM_PGTABLE_MAX_LEVELS; level++) {
1152                 granule = kvm_granule_size(level);
1153                 start = ALIGN_DOWN(addr, granule);
1154                 end = start + granule;
1155
1156                 if (!kvm_level_supports_block_mapping(level))
1157                         continue;
1158
1159                 if (start < range->start || range->end < end)
1160                         continue;
1161
1162                 /*
1163                  * Check the presence of existing mappings with incompatible
1164                  * permissions within the current block range, and try one level
1165                  * deeper if one is found.
1166                  */
1167                 ret = kvm_pgtable_walk(pgt, start, granule, &check_perm_walker);
1168                 if (ret != -EEXIST)
1169                         break;
1170         }
1171
1172         if (!ret) {
1173                 range->start = start;
1174                 range->end = end;
1175         }
1176
1177         return ret;
1178 }