KVM: arm/arm64: Keep GICv2 HYP VAs in kvm_vgic_global_state
[linux-2.6-microblaze.git] / virt / kvm / arm / mmu.c
1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
21 #include <linux/io.h>
22 #include <linux/hugetlb.h>
23 #include <linux/sched/signal.h>
24 #include <trace/events/kvm.h>
25 #include <asm/pgalloc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_mmio.h>
30 #include <asm/kvm_asm.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/virt.h>
33 #include <asm/system_misc.h>
34
35 #include "trace.h"
36
37 static pgd_t *boot_hyp_pgd;
38 static pgd_t *hyp_pgd;
39 static pgd_t *merged_hyp_pgd;
40 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
41
42 static unsigned long hyp_idmap_start;
43 static unsigned long hyp_idmap_end;
44 static phys_addr_t hyp_idmap_vector;
45
46 #define S2_PGD_SIZE     (PTRS_PER_S2_PGD * sizeof(pgd_t))
47 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
48
49 #define KVM_S2PTE_FLAG_IS_IOMAP         (1UL << 0)
50 #define KVM_S2_FLAG_LOGGING_ACTIVE      (1UL << 1)
51
52 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
53 {
54         return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
55 }
56
57 /**
58  * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
59  * @kvm:        pointer to kvm structure.
60  *
61  * Interface to HYP function to flush all VM TLB entries
62  */
63 void kvm_flush_remote_tlbs(struct kvm *kvm)
64 {
65         kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
66 }
67
68 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
69 {
70         kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
71 }
72
73 /*
74  * D-Cache management functions. They take the page table entries by
75  * value, as they are flushing the cache using the kernel mapping (or
76  * kmap on 32bit).
77  */
78 static void kvm_flush_dcache_pte(pte_t pte)
79 {
80         __kvm_flush_dcache_pte(pte);
81 }
82
83 static void kvm_flush_dcache_pmd(pmd_t pmd)
84 {
85         __kvm_flush_dcache_pmd(pmd);
86 }
87
88 static void kvm_flush_dcache_pud(pud_t pud)
89 {
90         __kvm_flush_dcache_pud(pud);
91 }
92
93 static bool kvm_is_device_pfn(unsigned long pfn)
94 {
95         return !pfn_valid(pfn);
96 }
97
98 /**
99  * stage2_dissolve_pmd() - clear and flush huge PMD entry
100  * @kvm:        pointer to kvm structure.
101  * @addr:       IPA
102  * @pmd:        pmd pointer for IPA
103  *
104  * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
105  * pages in the range dirty.
106  */
107 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
108 {
109         if (!pmd_thp_or_huge(*pmd))
110                 return;
111
112         pmd_clear(pmd);
113         kvm_tlb_flush_vmid_ipa(kvm, addr);
114         put_page(virt_to_page(pmd));
115 }
116
117 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
118                                   int min, int max)
119 {
120         void *page;
121
122         BUG_ON(max > KVM_NR_MEM_OBJS);
123         if (cache->nobjs >= min)
124                 return 0;
125         while (cache->nobjs < max) {
126                 page = (void *)__get_free_page(PGALLOC_GFP);
127                 if (!page)
128                         return -ENOMEM;
129                 cache->objects[cache->nobjs++] = page;
130         }
131         return 0;
132 }
133
134 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
135 {
136         while (mc->nobjs)
137                 free_page((unsigned long)mc->objects[--mc->nobjs]);
138 }
139
140 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
141 {
142         void *p;
143
144         BUG_ON(!mc || !mc->nobjs);
145         p = mc->objects[--mc->nobjs];
146         return p;
147 }
148
149 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
150 {
151         pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
152         stage2_pgd_clear(pgd);
153         kvm_tlb_flush_vmid_ipa(kvm, addr);
154         stage2_pud_free(pud_table);
155         put_page(virt_to_page(pgd));
156 }
157
158 static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
159 {
160         pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
161         VM_BUG_ON(stage2_pud_huge(*pud));
162         stage2_pud_clear(pud);
163         kvm_tlb_flush_vmid_ipa(kvm, addr);
164         stage2_pmd_free(pmd_table);
165         put_page(virt_to_page(pud));
166 }
167
168 static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
169 {
170         pte_t *pte_table = pte_offset_kernel(pmd, 0);
171         VM_BUG_ON(pmd_thp_or_huge(*pmd));
172         pmd_clear(pmd);
173         kvm_tlb_flush_vmid_ipa(kvm, addr);
174         pte_free_kernel(NULL, pte_table);
175         put_page(virt_to_page(pmd));
176 }
177
178 /*
179  * Unmapping vs dcache management:
180  *
181  * If a guest maps certain memory pages as uncached, all writes will
182  * bypass the data cache and go directly to RAM.  However, the CPUs
183  * can still speculate reads (not writes) and fill cache lines with
184  * data.
185  *
186  * Those cache lines will be *clean* cache lines though, so a
187  * clean+invalidate operation is equivalent to an invalidate
188  * operation, because no cache lines are marked dirty.
189  *
190  * Those clean cache lines could be filled prior to an uncached write
191  * by the guest, and the cache coherent IO subsystem would therefore
192  * end up writing old data to disk.
193  *
194  * This is why right after unmapping a page/section and invalidating
195  * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
196  * the IO subsystem will never hit in the cache.
197  */
198 static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
199                        phys_addr_t addr, phys_addr_t end)
200 {
201         phys_addr_t start_addr = addr;
202         pte_t *pte, *start_pte;
203
204         start_pte = pte = pte_offset_kernel(pmd, addr);
205         do {
206                 if (!pte_none(*pte)) {
207                         pte_t old_pte = *pte;
208
209                         kvm_set_pte(pte, __pte(0));
210                         kvm_tlb_flush_vmid_ipa(kvm, addr);
211
212                         /* No need to invalidate the cache for device mappings */
213                         if (!kvm_is_device_pfn(pte_pfn(old_pte)))
214                                 kvm_flush_dcache_pte(old_pte);
215
216                         put_page(virt_to_page(pte));
217                 }
218         } while (pte++, addr += PAGE_SIZE, addr != end);
219
220         if (stage2_pte_table_empty(start_pte))
221                 clear_stage2_pmd_entry(kvm, pmd, start_addr);
222 }
223
224 static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
225                        phys_addr_t addr, phys_addr_t end)
226 {
227         phys_addr_t next, start_addr = addr;
228         pmd_t *pmd, *start_pmd;
229
230         start_pmd = pmd = stage2_pmd_offset(pud, addr);
231         do {
232                 next = stage2_pmd_addr_end(addr, end);
233                 if (!pmd_none(*pmd)) {
234                         if (pmd_thp_or_huge(*pmd)) {
235                                 pmd_t old_pmd = *pmd;
236
237                                 pmd_clear(pmd);
238                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
239
240                                 kvm_flush_dcache_pmd(old_pmd);
241
242                                 put_page(virt_to_page(pmd));
243                         } else {
244                                 unmap_stage2_ptes(kvm, pmd, addr, next);
245                         }
246                 }
247         } while (pmd++, addr = next, addr != end);
248
249         if (stage2_pmd_table_empty(start_pmd))
250                 clear_stage2_pud_entry(kvm, pud, start_addr);
251 }
252
253 static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
254                        phys_addr_t addr, phys_addr_t end)
255 {
256         phys_addr_t next, start_addr = addr;
257         pud_t *pud, *start_pud;
258
259         start_pud = pud = stage2_pud_offset(pgd, addr);
260         do {
261                 next = stage2_pud_addr_end(addr, end);
262                 if (!stage2_pud_none(*pud)) {
263                         if (stage2_pud_huge(*pud)) {
264                                 pud_t old_pud = *pud;
265
266                                 stage2_pud_clear(pud);
267                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
268                                 kvm_flush_dcache_pud(old_pud);
269                                 put_page(virt_to_page(pud));
270                         } else {
271                                 unmap_stage2_pmds(kvm, pud, addr, next);
272                         }
273                 }
274         } while (pud++, addr = next, addr != end);
275
276         if (stage2_pud_table_empty(start_pud))
277                 clear_stage2_pgd_entry(kvm, pgd, start_addr);
278 }
279
280 /**
281  * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
282  * @kvm:   The VM pointer
283  * @start: The intermediate physical base address of the range to unmap
284  * @size:  The size of the area to unmap
285  *
286  * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
287  * be called while holding mmu_lock (unless for freeing the stage2 pgd before
288  * destroying the VM), otherwise another faulting VCPU may come in and mess
289  * with things behind our backs.
290  */
291 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
292 {
293         pgd_t *pgd;
294         phys_addr_t addr = start, end = start + size;
295         phys_addr_t next;
296
297         assert_spin_locked(&kvm->mmu_lock);
298         pgd = kvm->arch.pgd + stage2_pgd_index(addr);
299         do {
300                 /*
301                  * Make sure the page table is still active, as another thread
302                  * could have possibly freed the page table, while we released
303                  * the lock.
304                  */
305                 if (!READ_ONCE(kvm->arch.pgd))
306                         break;
307                 next = stage2_pgd_addr_end(addr, end);
308                 if (!stage2_pgd_none(*pgd))
309                         unmap_stage2_puds(kvm, pgd, addr, next);
310                 /*
311                  * If the range is too large, release the kvm->mmu_lock
312                  * to prevent starvation and lockup detector warnings.
313                  */
314                 if (next != end)
315                         cond_resched_lock(&kvm->mmu_lock);
316         } while (pgd++, addr = next, addr != end);
317 }
318
319 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
320                               phys_addr_t addr, phys_addr_t end)
321 {
322         pte_t *pte;
323
324         pte = pte_offset_kernel(pmd, addr);
325         do {
326                 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
327                         kvm_flush_dcache_pte(*pte);
328         } while (pte++, addr += PAGE_SIZE, addr != end);
329 }
330
331 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
332                               phys_addr_t addr, phys_addr_t end)
333 {
334         pmd_t *pmd;
335         phys_addr_t next;
336
337         pmd = stage2_pmd_offset(pud, addr);
338         do {
339                 next = stage2_pmd_addr_end(addr, end);
340                 if (!pmd_none(*pmd)) {
341                         if (pmd_thp_or_huge(*pmd))
342                                 kvm_flush_dcache_pmd(*pmd);
343                         else
344                                 stage2_flush_ptes(kvm, pmd, addr, next);
345                 }
346         } while (pmd++, addr = next, addr != end);
347 }
348
349 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
350                               phys_addr_t addr, phys_addr_t end)
351 {
352         pud_t *pud;
353         phys_addr_t next;
354
355         pud = stage2_pud_offset(pgd, addr);
356         do {
357                 next = stage2_pud_addr_end(addr, end);
358                 if (!stage2_pud_none(*pud)) {
359                         if (stage2_pud_huge(*pud))
360                                 kvm_flush_dcache_pud(*pud);
361                         else
362                                 stage2_flush_pmds(kvm, pud, addr, next);
363                 }
364         } while (pud++, addr = next, addr != end);
365 }
366
367 static void stage2_flush_memslot(struct kvm *kvm,
368                                  struct kvm_memory_slot *memslot)
369 {
370         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
371         phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
372         phys_addr_t next;
373         pgd_t *pgd;
374
375         pgd = kvm->arch.pgd + stage2_pgd_index(addr);
376         do {
377                 next = stage2_pgd_addr_end(addr, end);
378                 stage2_flush_puds(kvm, pgd, addr, next);
379         } while (pgd++, addr = next, addr != end);
380 }
381
382 /**
383  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
384  * @kvm: The struct kvm pointer
385  *
386  * Go through the stage 2 page tables and invalidate any cache lines
387  * backing memory already mapped to the VM.
388  */
389 static void stage2_flush_vm(struct kvm *kvm)
390 {
391         struct kvm_memslots *slots;
392         struct kvm_memory_slot *memslot;
393         int idx;
394
395         idx = srcu_read_lock(&kvm->srcu);
396         spin_lock(&kvm->mmu_lock);
397
398         slots = kvm_memslots(kvm);
399         kvm_for_each_memslot(memslot, slots)
400                 stage2_flush_memslot(kvm, memslot);
401
402         spin_unlock(&kvm->mmu_lock);
403         srcu_read_unlock(&kvm->srcu, idx);
404 }
405
406 static void clear_hyp_pgd_entry(pgd_t *pgd)
407 {
408         pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
409         pgd_clear(pgd);
410         pud_free(NULL, pud_table);
411         put_page(virt_to_page(pgd));
412 }
413
414 static void clear_hyp_pud_entry(pud_t *pud)
415 {
416         pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
417         VM_BUG_ON(pud_huge(*pud));
418         pud_clear(pud);
419         pmd_free(NULL, pmd_table);
420         put_page(virt_to_page(pud));
421 }
422
423 static void clear_hyp_pmd_entry(pmd_t *pmd)
424 {
425         pte_t *pte_table = pte_offset_kernel(pmd, 0);
426         VM_BUG_ON(pmd_thp_or_huge(*pmd));
427         pmd_clear(pmd);
428         pte_free_kernel(NULL, pte_table);
429         put_page(virt_to_page(pmd));
430 }
431
432 static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
433 {
434         pte_t *pte, *start_pte;
435
436         start_pte = pte = pte_offset_kernel(pmd, addr);
437         do {
438                 if (!pte_none(*pte)) {
439                         kvm_set_pte(pte, __pte(0));
440                         put_page(virt_to_page(pte));
441                 }
442         } while (pte++, addr += PAGE_SIZE, addr != end);
443
444         if (hyp_pte_table_empty(start_pte))
445                 clear_hyp_pmd_entry(pmd);
446 }
447
448 static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
449 {
450         phys_addr_t next;
451         pmd_t *pmd, *start_pmd;
452
453         start_pmd = pmd = pmd_offset(pud, addr);
454         do {
455                 next = pmd_addr_end(addr, end);
456                 /* Hyp doesn't use huge pmds */
457                 if (!pmd_none(*pmd))
458                         unmap_hyp_ptes(pmd, addr, next);
459         } while (pmd++, addr = next, addr != end);
460
461         if (hyp_pmd_table_empty(start_pmd))
462                 clear_hyp_pud_entry(pud);
463 }
464
465 static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
466 {
467         phys_addr_t next;
468         pud_t *pud, *start_pud;
469
470         start_pud = pud = pud_offset(pgd, addr);
471         do {
472                 next = pud_addr_end(addr, end);
473                 /* Hyp doesn't use huge puds */
474                 if (!pud_none(*pud))
475                         unmap_hyp_pmds(pud, addr, next);
476         } while (pud++, addr = next, addr != end);
477
478         if (hyp_pud_table_empty(start_pud))
479                 clear_hyp_pgd_entry(pgd);
480 }
481
482 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
483 {
484         pgd_t *pgd;
485         phys_addr_t addr = start, end = start + size;
486         phys_addr_t next;
487
488         /*
489          * We don't unmap anything from HYP, except at the hyp tear down.
490          * Hence, we don't have to invalidate the TLBs here.
491          */
492         pgd = pgdp + pgd_index(addr);
493         do {
494                 next = pgd_addr_end(addr, end);
495                 if (!pgd_none(*pgd))
496                         unmap_hyp_puds(pgd, addr, next);
497         } while (pgd++, addr = next, addr != end);
498 }
499
500 /**
501  * free_hyp_pgds - free Hyp-mode page tables
502  *
503  * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
504  * therefore contains either mappings in the kernel memory area (above
505  * PAGE_OFFSET), or device mappings in the vmalloc range (from
506  * VMALLOC_START to VMALLOC_END).
507  *
508  * boot_hyp_pgd should only map two pages for the init code.
509  */
510 void free_hyp_pgds(void)
511 {
512         mutex_lock(&kvm_hyp_pgd_mutex);
513
514         if (boot_hyp_pgd) {
515                 unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
516                 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
517                 boot_hyp_pgd = NULL;
518         }
519
520         if (hyp_pgd) {
521                 unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
522                 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
523                                 (uintptr_t)high_memory - PAGE_OFFSET);
524                 unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
525                                 VMALLOC_END - VMALLOC_START);
526
527                 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
528                 hyp_pgd = NULL;
529         }
530         if (merged_hyp_pgd) {
531                 clear_page(merged_hyp_pgd);
532                 free_page((unsigned long)merged_hyp_pgd);
533                 merged_hyp_pgd = NULL;
534         }
535
536         mutex_unlock(&kvm_hyp_pgd_mutex);
537 }
538
539 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
540                                     unsigned long end, unsigned long pfn,
541                                     pgprot_t prot)
542 {
543         pte_t *pte;
544         unsigned long addr;
545
546         addr = start;
547         do {
548                 pte = pte_offset_kernel(pmd, addr);
549                 kvm_set_pte(pte, pfn_pte(pfn, prot));
550                 get_page(virt_to_page(pte));
551                 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
552                 pfn++;
553         } while (addr += PAGE_SIZE, addr != end);
554 }
555
556 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
557                                    unsigned long end, unsigned long pfn,
558                                    pgprot_t prot)
559 {
560         pmd_t *pmd;
561         pte_t *pte;
562         unsigned long addr, next;
563
564         addr = start;
565         do {
566                 pmd = pmd_offset(pud, addr);
567
568                 BUG_ON(pmd_sect(*pmd));
569
570                 if (pmd_none(*pmd)) {
571                         pte = pte_alloc_one_kernel(NULL, addr);
572                         if (!pte) {
573                                 kvm_err("Cannot allocate Hyp pte\n");
574                                 return -ENOMEM;
575                         }
576                         pmd_populate_kernel(NULL, pmd, pte);
577                         get_page(virt_to_page(pmd));
578                         kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
579                 }
580
581                 next = pmd_addr_end(addr, end);
582
583                 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
584                 pfn += (next - addr) >> PAGE_SHIFT;
585         } while (addr = next, addr != end);
586
587         return 0;
588 }
589
590 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
591                                    unsigned long end, unsigned long pfn,
592                                    pgprot_t prot)
593 {
594         pud_t *pud;
595         pmd_t *pmd;
596         unsigned long addr, next;
597         int ret;
598
599         addr = start;
600         do {
601                 pud = pud_offset(pgd, addr);
602
603                 if (pud_none_or_clear_bad(pud)) {
604                         pmd = pmd_alloc_one(NULL, addr);
605                         if (!pmd) {
606                                 kvm_err("Cannot allocate Hyp pmd\n");
607                                 return -ENOMEM;
608                         }
609                         pud_populate(NULL, pud, pmd);
610                         get_page(virt_to_page(pud));
611                         kvm_flush_dcache_to_poc(pud, sizeof(*pud));
612                 }
613
614                 next = pud_addr_end(addr, end);
615                 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
616                 if (ret)
617                         return ret;
618                 pfn += (next - addr) >> PAGE_SHIFT;
619         } while (addr = next, addr != end);
620
621         return 0;
622 }
623
624 static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
625                                  unsigned long start, unsigned long end,
626                                  unsigned long pfn, pgprot_t prot)
627 {
628         pgd_t *pgd;
629         pud_t *pud;
630         unsigned long addr, next;
631         int err = 0;
632
633         mutex_lock(&kvm_hyp_pgd_mutex);
634         addr = start & PAGE_MASK;
635         end = PAGE_ALIGN(end);
636         do {
637                 pgd = pgdp + ((addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1));
638
639                 if (pgd_none(*pgd)) {
640                         pud = pud_alloc_one(NULL, addr);
641                         if (!pud) {
642                                 kvm_err("Cannot allocate Hyp pud\n");
643                                 err = -ENOMEM;
644                                 goto out;
645                         }
646                         pgd_populate(NULL, pgd, pud);
647                         get_page(virt_to_page(pgd));
648                         kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
649                 }
650
651                 next = pgd_addr_end(addr, end);
652                 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
653                 if (err)
654                         goto out;
655                 pfn += (next - addr) >> PAGE_SHIFT;
656         } while (addr = next, addr != end);
657 out:
658         mutex_unlock(&kvm_hyp_pgd_mutex);
659         return err;
660 }
661
662 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
663 {
664         if (!is_vmalloc_addr(kaddr)) {
665                 BUG_ON(!virt_addr_valid(kaddr));
666                 return __pa(kaddr);
667         } else {
668                 return page_to_phys(vmalloc_to_page(kaddr)) +
669                        offset_in_page(kaddr);
670         }
671 }
672
673 /**
674  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
675  * @from:       The virtual kernel start address of the range
676  * @to:         The virtual kernel end address of the range (exclusive)
677  * @prot:       The protection to be applied to this range
678  *
679  * The same virtual address as the kernel virtual address is also used
680  * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
681  * physical pages.
682  */
683 int create_hyp_mappings(void *from, void *to, pgprot_t prot)
684 {
685         phys_addr_t phys_addr;
686         unsigned long virt_addr;
687         unsigned long start = kern_hyp_va((unsigned long)from);
688         unsigned long end = kern_hyp_va((unsigned long)to);
689
690         if (is_kernel_in_hyp_mode())
691                 return 0;
692
693         start = start & PAGE_MASK;
694         end = PAGE_ALIGN(end);
695
696         for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
697                 int err;
698
699                 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
700                 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
701                                             virt_addr, virt_addr + PAGE_SIZE,
702                                             __phys_to_pfn(phys_addr),
703                                             prot);
704                 if (err)
705                         return err;
706         }
707
708         return 0;
709 }
710
711 /**
712  * create_hyp_io_mappings - Map IO into both kernel and HYP
713  * @phys_addr:  The physical start address which gets mapped
714  * @size:       Size of the region being mapped
715  * @kaddr:      Kernel VA for this mapping
716  * @haddr:      HYP VA for this mapping
717  */
718 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
719                            void __iomem **kaddr,
720                            void __iomem **haddr)
721 {
722         unsigned long start, end;
723         int ret;
724
725         *kaddr = ioremap(phys_addr, size);
726         if (!*kaddr)
727                 return -ENOMEM;
728
729         if (is_kernel_in_hyp_mode()) {
730                 *haddr = *kaddr;
731                 return 0;
732         }
733
734
735         start = kern_hyp_va((unsigned long)*kaddr);
736         end = kern_hyp_va((unsigned long)*kaddr + size);
737         ret = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD, start, end,
738                                      __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
739
740         if (ret) {
741                 iounmap(*kaddr);
742                 *kaddr = NULL;
743                 return ret;
744         }
745
746         *haddr = (void __iomem *)start;
747         return 0;
748 }
749
750 /**
751  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
752  * @kvm:        The KVM struct pointer for the VM.
753  *
754  * Allocates only the stage-2 HW PGD level table(s) (can support either full
755  * 40-bit input addresses or limited to 32-bit input addresses). Clears the
756  * allocated pages.
757  *
758  * Note we don't need locking here as this is only called when the VM is
759  * created, which can only be done once.
760  */
761 int kvm_alloc_stage2_pgd(struct kvm *kvm)
762 {
763         pgd_t *pgd;
764
765         if (kvm->arch.pgd != NULL) {
766                 kvm_err("kvm_arch already initialized?\n");
767                 return -EINVAL;
768         }
769
770         /* Allocate the HW PGD, making sure that each page gets its own refcount */
771         pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
772         if (!pgd)
773                 return -ENOMEM;
774
775         kvm->arch.pgd = pgd;
776         return 0;
777 }
778
779 static void stage2_unmap_memslot(struct kvm *kvm,
780                                  struct kvm_memory_slot *memslot)
781 {
782         hva_t hva = memslot->userspace_addr;
783         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
784         phys_addr_t size = PAGE_SIZE * memslot->npages;
785         hva_t reg_end = hva + size;
786
787         /*
788          * A memory region could potentially cover multiple VMAs, and any holes
789          * between them, so iterate over all of them to find out if we should
790          * unmap any of them.
791          *
792          *     +--------------------------------------------+
793          * +---------------+----------------+   +----------------+
794          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
795          * +---------------+----------------+   +----------------+
796          *     |               memory region                |
797          *     +--------------------------------------------+
798          */
799         do {
800                 struct vm_area_struct *vma = find_vma(current->mm, hva);
801                 hva_t vm_start, vm_end;
802
803                 if (!vma || vma->vm_start >= reg_end)
804                         break;
805
806                 /*
807                  * Take the intersection of this VMA with the memory region
808                  */
809                 vm_start = max(hva, vma->vm_start);
810                 vm_end = min(reg_end, vma->vm_end);
811
812                 if (!(vma->vm_flags & VM_PFNMAP)) {
813                         gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
814                         unmap_stage2_range(kvm, gpa, vm_end - vm_start);
815                 }
816                 hva = vm_end;
817         } while (hva < reg_end);
818 }
819
820 /**
821  * stage2_unmap_vm - Unmap Stage-2 RAM mappings
822  * @kvm: The struct kvm pointer
823  *
824  * Go through the memregions and unmap any reguler RAM
825  * backing memory already mapped to the VM.
826  */
827 void stage2_unmap_vm(struct kvm *kvm)
828 {
829         struct kvm_memslots *slots;
830         struct kvm_memory_slot *memslot;
831         int idx;
832
833         idx = srcu_read_lock(&kvm->srcu);
834         down_read(&current->mm->mmap_sem);
835         spin_lock(&kvm->mmu_lock);
836
837         slots = kvm_memslots(kvm);
838         kvm_for_each_memslot(memslot, slots)
839                 stage2_unmap_memslot(kvm, memslot);
840
841         spin_unlock(&kvm->mmu_lock);
842         up_read(&current->mm->mmap_sem);
843         srcu_read_unlock(&kvm->srcu, idx);
844 }
845
846 /**
847  * kvm_free_stage2_pgd - free all stage-2 tables
848  * @kvm:        The KVM struct pointer for the VM.
849  *
850  * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
851  * underlying level-2 and level-3 tables before freeing the actual level-1 table
852  * and setting the struct pointer to NULL.
853  */
854 void kvm_free_stage2_pgd(struct kvm *kvm)
855 {
856         void *pgd = NULL;
857
858         spin_lock(&kvm->mmu_lock);
859         if (kvm->arch.pgd) {
860                 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
861                 pgd = READ_ONCE(kvm->arch.pgd);
862                 kvm->arch.pgd = NULL;
863         }
864         spin_unlock(&kvm->mmu_lock);
865
866         /* Free the HW pgd, one page at a time */
867         if (pgd)
868                 free_pages_exact(pgd, S2_PGD_SIZE);
869 }
870
871 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
872                              phys_addr_t addr)
873 {
874         pgd_t *pgd;
875         pud_t *pud;
876
877         pgd = kvm->arch.pgd + stage2_pgd_index(addr);
878         if (WARN_ON(stage2_pgd_none(*pgd))) {
879                 if (!cache)
880                         return NULL;
881                 pud = mmu_memory_cache_alloc(cache);
882                 stage2_pgd_populate(pgd, pud);
883                 get_page(virt_to_page(pgd));
884         }
885
886         return stage2_pud_offset(pgd, addr);
887 }
888
889 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
890                              phys_addr_t addr)
891 {
892         pud_t *pud;
893         pmd_t *pmd;
894
895         pud = stage2_get_pud(kvm, cache, addr);
896         if (!pud)
897                 return NULL;
898
899         if (stage2_pud_none(*pud)) {
900                 if (!cache)
901                         return NULL;
902                 pmd = mmu_memory_cache_alloc(cache);
903                 stage2_pud_populate(pud, pmd);
904                 get_page(virt_to_page(pud));
905         }
906
907         return stage2_pmd_offset(pud, addr);
908 }
909
910 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
911                                *cache, phys_addr_t addr, const pmd_t *new_pmd)
912 {
913         pmd_t *pmd, old_pmd;
914
915         pmd = stage2_get_pmd(kvm, cache, addr);
916         VM_BUG_ON(!pmd);
917
918         /*
919          * Mapping in huge pages should only happen through a fault.  If a
920          * page is merged into a transparent huge page, the individual
921          * subpages of that huge page should be unmapped through MMU
922          * notifiers before we get here.
923          *
924          * Merging of CompoundPages is not supported; they should become
925          * splitting first, unmapped, merged, and mapped back in on-demand.
926          */
927         VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
928
929         old_pmd = *pmd;
930         if (pmd_present(old_pmd)) {
931                 pmd_clear(pmd);
932                 kvm_tlb_flush_vmid_ipa(kvm, addr);
933         } else {
934                 get_page(virt_to_page(pmd));
935         }
936
937         kvm_set_pmd(pmd, *new_pmd);
938         return 0;
939 }
940
941 static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
942 {
943         pmd_t *pmdp;
944         pte_t *ptep;
945
946         pmdp = stage2_get_pmd(kvm, NULL, addr);
947         if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
948                 return false;
949
950         if (pmd_thp_or_huge(*pmdp))
951                 return kvm_s2pmd_exec(pmdp);
952
953         ptep = pte_offset_kernel(pmdp, addr);
954         if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
955                 return false;
956
957         return kvm_s2pte_exec(ptep);
958 }
959
960 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
961                           phys_addr_t addr, const pte_t *new_pte,
962                           unsigned long flags)
963 {
964         pmd_t *pmd;
965         pte_t *pte, old_pte;
966         bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
967         bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
968
969         VM_BUG_ON(logging_active && !cache);
970
971         /* Create stage-2 page table mapping - Levels 0 and 1 */
972         pmd = stage2_get_pmd(kvm, cache, addr);
973         if (!pmd) {
974                 /*
975                  * Ignore calls from kvm_set_spte_hva for unallocated
976                  * address ranges.
977                  */
978                 return 0;
979         }
980
981         /*
982          * While dirty page logging - dissolve huge PMD, then continue on to
983          * allocate page.
984          */
985         if (logging_active)
986                 stage2_dissolve_pmd(kvm, addr, pmd);
987
988         /* Create stage-2 page mappings - Level 2 */
989         if (pmd_none(*pmd)) {
990                 if (!cache)
991                         return 0; /* ignore calls from kvm_set_spte_hva */
992                 pte = mmu_memory_cache_alloc(cache);
993                 pmd_populate_kernel(NULL, pmd, pte);
994                 get_page(virt_to_page(pmd));
995         }
996
997         pte = pte_offset_kernel(pmd, addr);
998
999         if (iomap && pte_present(*pte))
1000                 return -EFAULT;
1001
1002         /* Create 2nd stage page table mapping - Level 3 */
1003         old_pte = *pte;
1004         if (pte_present(old_pte)) {
1005                 kvm_set_pte(pte, __pte(0));
1006                 kvm_tlb_flush_vmid_ipa(kvm, addr);
1007         } else {
1008                 get_page(virt_to_page(pte));
1009         }
1010
1011         kvm_set_pte(pte, *new_pte);
1012         return 0;
1013 }
1014
1015 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1016 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1017 {
1018         if (pte_young(*pte)) {
1019                 *pte = pte_mkold(*pte);
1020                 return 1;
1021         }
1022         return 0;
1023 }
1024 #else
1025 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1026 {
1027         return __ptep_test_and_clear_young(pte);
1028 }
1029 #endif
1030
1031 static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1032 {
1033         return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1034 }
1035
1036 /**
1037  * kvm_phys_addr_ioremap - map a device range to guest IPA
1038  *
1039  * @kvm:        The KVM pointer
1040  * @guest_ipa:  The IPA at which to insert the mapping
1041  * @pa:         The physical address of the device
1042  * @size:       The size of the mapping
1043  */
1044 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1045                           phys_addr_t pa, unsigned long size, bool writable)
1046 {
1047         phys_addr_t addr, end;
1048         int ret = 0;
1049         unsigned long pfn;
1050         struct kvm_mmu_memory_cache cache = { 0, };
1051
1052         end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1053         pfn = __phys_to_pfn(pa);
1054
1055         for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1056                 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
1057
1058                 if (writable)
1059                         pte = kvm_s2pte_mkwrite(pte);
1060
1061                 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
1062                                                 KVM_NR_MEM_OBJS);
1063                 if (ret)
1064                         goto out;
1065                 spin_lock(&kvm->mmu_lock);
1066                 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1067                                                 KVM_S2PTE_FLAG_IS_IOMAP);
1068                 spin_unlock(&kvm->mmu_lock);
1069                 if (ret)
1070                         goto out;
1071
1072                 pfn++;
1073         }
1074
1075 out:
1076         mmu_free_memory_cache(&cache);
1077         return ret;
1078 }
1079
1080 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
1081 {
1082         kvm_pfn_t pfn = *pfnp;
1083         gfn_t gfn = *ipap >> PAGE_SHIFT;
1084
1085         if (PageTransCompoundMap(pfn_to_page(pfn))) {
1086                 unsigned long mask;
1087                 /*
1088                  * The address we faulted on is backed by a transparent huge
1089                  * page.  However, because we map the compound huge page and
1090                  * not the individual tail page, we need to transfer the
1091                  * refcount to the head page.  We have to be careful that the
1092                  * THP doesn't start to split while we are adjusting the
1093                  * refcounts.
1094                  *
1095                  * We are sure this doesn't happen, because mmu_notifier_retry
1096                  * was successful and we are holding the mmu_lock, so if this
1097                  * THP is trying to split, it will be blocked in the mmu
1098                  * notifier before touching any of the pages, specifically
1099                  * before being able to call __split_huge_page_refcount().
1100                  *
1101                  * We can therefore safely transfer the refcount from PG_tail
1102                  * to PG_head and switch the pfn from a tail page to the head
1103                  * page accordingly.
1104                  */
1105                 mask = PTRS_PER_PMD - 1;
1106                 VM_BUG_ON((gfn & mask) != (pfn & mask));
1107                 if (pfn & mask) {
1108                         *ipap &= PMD_MASK;
1109                         kvm_release_pfn_clean(pfn);
1110                         pfn &= ~mask;
1111                         kvm_get_pfn(pfn);
1112                         *pfnp = pfn;
1113                 }
1114
1115                 return true;
1116         }
1117
1118         return false;
1119 }
1120
1121 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1122 {
1123         if (kvm_vcpu_trap_is_iabt(vcpu))
1124                 return false;
1125
1126         return kvm_vcpu_dabt_iswrite(vcpu);
1127 }
1128
1129 /**
1130  * stage2_wp_ptes - write protect PMD range
1131  * @pmd:        pointer to pmd entry
1132  * @addr:       range start address
1133  * @end:        range end address
1134  */
1135 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1136 {
1137         pte_t *pte;
1138
1139         pte = pte_offset_kernel(pmd, addr);
1140         do {
1141                 if (!pte_none(*pte)) {
1142                         if (!kvm_s2pte_readonly(pte))
1143                                 kvm_set_s2pte_readonly(pte);
1144                 }
1145         } while (pte++, addr += PAGE_SIZE, addr != end);
1146 }
1147
1148 /**
1149  * stage2_wp_pmds - write protect PUD range
1150  * @pud:        pointer to pud entry
1151  * @addr:       range start address
1152  * @end:        range end address
1153  */
1154 static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
1155 {
1156         pmd_t *pmd;
1157         phys_addr_t next;
1158
1159         pmd = stage2_pmd_offset(pud, addr);
1160
1161         do {
1162                 next = stage2_pmd_addr_end(addr, end);
1163                 if (!pmd_none(*pmd)) {
1164                         if (pmd_thp_or_huge(*pmd)) {
1165                                 if (!kvm_s2pmd_readonly(pmd))
1166                                         kvm_set_s2pmd_readonly(pmd);
1167                         } else {
1168                                 stage2_wp_ptes(pmd, addr, next);
1169                         }
1170                 }
1171         } while (pmd++, addr = next, addr != end);
1172 }
1173
1174 /**
1175   * stage2_wp_puds - write protect PGD range
1176   * @pgd:       pointer to pgd entry
1177   * @addr:      range start address
1178   * @end:       range end address
1179   *
1180   * Process PUD entries, for a huge PUD we cause a panic.
1181   */
1182 static void  stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
1183 {
1184         pud_t *pud;
1185         phys_addr_t next;
1186
1187         pud = stage2_pud_offset(pgd, addr);
1188         do {
1189                 next = stage2_pud_addr_end(addr, end);
1190                 if (!stage2_pud_none(*pud)) {
1191                         /* TODO:PUD not supported, revisit later if supported */
1192                         BUG_ON(stage2_pud_huge(*pud));
1193                         stage2_wp_pmds(pud, addr, next);
1194                 }
1195         } while (pud++, addr = next, addr != end);
1196 }
1197
1198 /**
1199  * stage2_wp_range() - write protect stage2 memory region range
1200  * @kvm:        The KVM pointer
1201  * @addr:       Start address of range
1202  * @end:        End address of range
1203  */
1204 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1205 {
1206         pgd_t *pgd;
1207         phys_addr_t next;
1208
1209         pgd = kvm->arch.pgd + stage2_pgd_index(addr);
1210         do {
1211                 /*
1212                  * Release kvm_mmu_lock periodically if the memory region is
1213                  * large. Otherwise, we may see kernel panics with
1214                  * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1215                  * CONFIG_LOCKDEP. Additionally, holding the lock too long
1216                  * will also starve other vCPUs. We have to also make sure
1217                  * that the page tables are not freed while we released
1218                  * the lock.
1219                  */
1220                 cond_resched_lock(&kvm->mmu_lock);
1221                 if (!READ_ONCE(kvm->arch.pgd))
1222                         break;
1223                 next = stage2_pgd_addr_end(addr, end);
1224                 if (stage2_pgd_present(*pgd))
1225                         stage2_wp_puds(pgd, addr, next);
1226         } while (pgd++, addr = next, addr != end);
1227 }
1228
1229 /**
1230  * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1231  * @kvm:        The KVM pointer
1232  * @slot:       The memory slot to write protect
1233  *
1234  * Called to start logging dirty pages after memory region
1235  * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1236  * all present PMD and PTEs are write protected in the memory region.
1237  * Afterwards read of dirty page log can be called.
1238  *
1239  * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1240  * serializing operations for VM memory regions.
1241  */
1242 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1243 {
1244         struct kvm_memslots *slots = kvm_memslots(kvm);
1245         struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1246         phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1247         phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1248
1249         spin_lock(&kvm->mmu_lock);
1250         stage2_wp_range(kvm, start, end);
1251         spin_unlock(&kvm->mmu_lock);
1252         kvm_flush_remote_tlbs(kvm);
1253 }
1254
1255 /**
1256  * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1257  * @kvm:        The KVM pointer
1258  * @slot:       The memory slot associated with mask
1259  * @gfn_offset: The gfn offset in memory slot
1260  * @mask:       The mask of dirty pages at offset 'gfn_offset' in this memory
1261  *              slot to be write protected
1262  *
1263  * Walks bits set in mask write protects the associated pte's. Caller must
1264  * acquire kvm_mmu_lock.
1265  */
1266 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1267                 struct kvm_memory_slot *slot,
1268                 gfn_t gfn_offset, unsigned long mask)
1269 {
1270         phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1271         phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
1272         phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1273
1274         stage2_wp_range(kvm, start, end);
1275 }
1276
1277 /*
1278  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1279  * dirty pages.
1280  *
1281  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1282  * enable dirty logging for them.
1283  */
1284 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1285                 struct kvm_memory_slot *slot,
1286                 gfn_t gfn_offset, unsigned long mask)
1287 {
1288         kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1289 }
1290
1291 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
1292 {
1293         __clean_dcache_guest_page(pfn, size);
1294 }
1295
1296 static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
1297 {
1298         __invalidate_icache_guest_page(pfn, size);
1299 }
1300
1301 static void kvm_send_hwpoison_signal(unsigned long address,
1302                                      struct vm_area_struct *vma)
1303 {
1304         siginfo_t info;
1305
1306         info.si_signo   = SIGBUS;
1307         info.si_errno   = 0;
1308         info.si_code    = BUS_MCEERR_AR;
1309         info.si_addr    = (void __user *)address;
1310
1311         if (is_vm_hugetlb_page(vma))
1312                 info.si_addr_lsb = huge_page_shift(hstate_vma(vma));
1313         else
1314                 info.si_addr_lsb = PAGE_SHIFT;
1315
1316         send_sig_info(SIGBUS, &info, current);
1317 }
1318
1319 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1320                           struct kvm_memory_slot *memslot, unsigned long hva,
1321                           unsigned long fault_status)
1322 {
1323         int ret;
1324         bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
1325         unsigned long mmu_seq;
1326         gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1327         struct kvm *kvm = vcpu->kvm;
1328         struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1329         struct vm_area_struct *vma;
1330         kvm_pfn_t pfn;
1331         pgprot_t mem_type = PAGE_S2;
1332         bool logging_active = memslot_is_logging(memslot);
1333         unsigned long flags = 0;
1334
1335         write_fault = kvm_is_write_fault(vcpu);
1336         exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1337         VM_BUG_ON(write_fault && exec_fault);
1338
1339         if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1340                 kvm_err("Unexpected L2 read permission error\n");
1341                 return -EFAULT;
1342         }
1343
1344         /* Let's check if we will get back a huge page backed by hugetlbfs */
1345         down_read(&current->mm->mmap_sem);
1346         vma = find_vma_intersection(current->mm, hva, hva + 1);
1347         if (unlikely(!vma)) {
1348                 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1349                 up_read(&current->mm->mmap_sem);
1350                 return -EFAULT;
1351         }
1352
1353         if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
1354                 hugetlb = true;
1355                 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1356         } else {
1357                 /*
1358                  * Pages belonging to memslots that don't have the same
1359                  * alignment for userspace and IPA cannot be mapped using
1360                  * block descriptors even if the pages belong to a THP for
1361                  * the process, because the stage-2 block descriptor will
1362                  * cover more than a single THP and we loose atomicity for
1363                  * unmapping, updates, and splits of the THP or other pages
1364                  * in the stage-2 block range.
1365                  */
1366                 if ((memslot->userspace_addr & ~PMD_MASK) !=
1367                     ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
1368                         force_pte = true;
1369         }
1370         up_read(&current->mm->mmap_sem);
1371
1372         /* We need minimum second+third level pages */
1373         ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
1374                                      KVM_NR_MEM_OBJS);
1375         if (ret)
1376                 return ret;
1377
1378         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1379         /*
1380          * Ensure the read of mmu_notifier_seq happens before we call
1381          * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1382          * the page we just got a reference to gets unmapped before we have a
1383          * chance to grab the mmu_lock, which ensure that if the page gets
1384          * unmapped afterwards, the call to kvm_unmap_hva will take it away
1385          * from us again properly. This smp_rmb() interacts with the smp_wmb()
1386          * in kvm_mmu_notifier_invalidate_<page|range_end>.
1387          */
1388         smp_rmb();
1389
1390         pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1391         if (pfn == KVM_PFN_ERR_HWPOISON) {
1392                 kvm_send_hwpoison_signal(hva, vma);
1393                 return 0;
1394         }
1395         if (is_error_noslot_pfn(pfn))
1396                 return -EFAULT;
1397
1398         if (kvm_is_device_pfn(pfn)) {
1399                 mem_type = PAGE_S2_DEVICE;
1400                 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1401         } else if (logging_active) {
1402                 /*
1403                  * Faults on pages in a memslot with logging enabled
1404                  * should not be mapped with huge pages (it introduces churn
1405                  * and performance degradation), so force a pte mapping.
1406                  */
1407                 force_pte = true;
1408                 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1409
1410                 /*
1411                  * Only actually map the page as writable if this was a write
1412                  * fault.
1413                  */
1414                 if (!write_fault)
1415                         writable = false;
1416         }
1417
1418         spin_lock(&kvm->mmu_lock);
1419         if (mmu_notifier_retry(kvm, mmu_seq))
1420                 goto out_unlock;
1421
1422         if (!hugetlb && !force_pte)
1423                 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
1424
1425         if (hugetlb) {
1426                 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
1427                 new_pmd = pmd_mkhuge(new_pmd);
1428                 if (writable) {
1429                         new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1430                         kvm_set_pfn_dirty(pfn);
1431                 }
1432
1433                 if (fault_status != FSC_PERM)
1434                         clean_dcache_guest_page(pfn, PMD_SIZE);
1435
1436                 if (exec_fault) {
1437                         new_pmd = kvm_s2pmd_mkexec(new_pmd);
1438                         invalidate_icache_guest_page(pfn, PMD_SIZE);
1439                 } else if (fault_status == FSC_PERM) {
1440                         /* Preserve execute if XN was already cleared */
1441                         if (stage2_is_exec(kvm, fault_ipa))
1442                                 new_pmd = kvm_s2pmd_mkexec(new_pmd);
1443                 }
1444
1445                 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1446         } else {
1447                 pte_t new_pte = pfn_pte(pfn, mem_type);
1448
1449                 if (writable) {
1450                         new_pte = kvm_s2pte_mkwrite(new_pte);
1451                         kvm_set_pfn_dirty(pfn);
1452                         mark_page_dirty(kvm, gfn);
1453                 }
1454
1455                 if (fault_status != FSC_PERM)
1456                         clean_dcache_guest_page(pfn, PAGE_SIZE);
1457
1458                 if (exec_fault) {
1459                         new_pte = kvm_s2pte_mkexec(new_pte);
1460                         invalidate_icache_guest_page(pfn, PAGE_SIZE);
1461                 } else if (fault_status == FSC_PERM) {
1462                         /* Preserve execute if XN was already cleared */
1463                         if (stage2_is_exec(kvm, fault_ipa))
1464                                 new_pte = kvm_s2pte_mkexec(new_pte);
1465                 }
1466
1467                 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1468         }
1469
1470 out_unlock:
1471         spin_unlock(&kvm->mmu_lock);
1472         kvm_set_pfn_accessed(pfn);
1473         kvm_release_pfn_clean(pfn);
1474         return ret;
1475 }
1476
1477 /*
1478  * Resolve the access fault by making the page young again.
1479  * Note that because the faulting entry is guaranteed not to be
1480  * cached in the TLB, we don't need to invalidate anything.
1481  * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1482  * so there is no need for atomic (pte|pmd)_mkyoung operations.
1483  */
1484 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1485 {
1486         pmd_t *pmd;
1487         pte_t *pte;
1488         kvm_pfn_t pfn;
1489         bool pfn_valid = false;
1490
1491         trace_kvm_access_fault(fault_ipa);
1492
1493         spin_lock(&vcpu->kvm->mmu_lock);
1494
1495         pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
1496         if (!pmd || pmd_none(*pmd))     /* Nothing there */
1497                 goto out;
1498
1499         if (pmd_thp_or_huge(*pmd)) {    /* THP, HugeTLB */
1500                 *pmd = pmd_mkyoung(*pmd);
1501                 pfn = pmd_pfn(*pmd);
1502                 pfn_valid = true;
1503                 goto out;
1504         }
1505
1506         pte = pte_offset_kernel(pmd, fault_ipa);
1507         if (pte_none(*pte))             /* Nothing there either */
1508                 goto out;
1509
1510         *pte = pte_mkyoung(*pte);       /* Just a page... */
1511         pfn = pte_pfn(*pte);
1512         pfn_valid = true;
1513 out:
1514         spin_unlock(&vcpu->kvm->mmu_lock);
1515         if (pfn_valid)
1516                 kvm_set_pfn_accessed(pfn);
1517 }
1518
1519 /**
1520  * kvm_handle_guest_abort - handles all 2nd stage aborts
1521  * @vcpu:       the VCPU pointer
1522  * @run:        the kvm_run structure
1523  *
1524  * Any abort that gets to the host is almost guaranteed to be caused by a
1525  * missing second stage translation table entry, which can mean that either the
1526  * guest simply needs more memory and we must allocate an appropriate page or it
1527  * can mean that the guest tried to access I/O memory, which is emulated by user
1528  * space. The distinction is based on the IPA causing the fault and whether this
1529  * memory region has been registered as standard RAM by user space.
1530  */
1531 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1532 {
1533         unsigned long fault_status;
1534         phys_addr_t fault_ipa;
1535         struct kvm_memory_slot *memslot;
1536         unsigned long hva;
1537         bool is_iabt, write_fault, writable;
1538         gfn_t gfn;
1539         int ret, idx;
1540
1541         fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1542
1543         fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1544         is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1545
1546         /* Synchronous External Abort? */
1547         if (kvm_vcpu_dabt_isextabt(vcpu)) {
1548                 /*
1549                  * For RAS the host kernel may handle this abort.
1550                  * There is no need to pass the error into the guest.
1551                  */
1552                 if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
1553                         return 1;
1554
1555                 if (unlikely(!is_iabt)) {
1556                         kvm_inject_vabt(vcpu);
1557                         return 1;
1558                 }
1559         }
1560
1561         trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1562                               kvm_vcpu_get_hfar(vcpu), fault_ipa);
1563
1564         /* Check the stage-2 fault is trans. fault or write fault */
1565         if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1566             fault_status != FSC_ACCESS) {
1567                 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1568                         kvm_vcpu_trap_get_class(vcpu),
1569                         (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1570                         (unsigned long)kvm_vcpu_get_hsr(vcpu));
1571                 return -EFAULT;
1572         }
1573
1574         idx = srcu_read_lock(&vcpu->kvm->srcu);
1575
1576         gfn = fault_ipa >> PAGE_SHIFT;
1577         memslot = gfn_to_memslot(vcpu->kvm, gfn);
1578         hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1579         write_fault = kvm_is_write_fault(vcpu);
1580         if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1581                 if (is_iabt) {
1582                         /* Prefetch Abort on I/O address */
1583                         kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1584                         ret = 1;
1585                         goto out_unlock;
1586                 }
1587
1588                 /*
1589                  * Check for a cache maintenance operation. Since we
1590                  * ended-up here, we know it is outside of any memory
1591                  * slot. But we can't find out if that is for a device,
1592                  * or if the guest is just being stupid. The only thing
1593                  * we know for sure is that this range cannot be cached.
1594                  *
1595                  * So let's assume that the guest is just being
1596                  * cautious, and skip the instruction.
1597                  */
1598                 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1599                         kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1600                         ret = 1;
1601                         goto out_unlock;
1602                 }
1603
1604                 /*
1605                  * The IPA is reported as [MAX:12], so we need to
1606                  * complement it with the bottom 12 bits from the
1607                  * faulting VA. This is always 12 bits, irrespective
1608                  * of the page size.
1609                  */
1610                 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1611                 ret = io_mem_abort(vcpu, run, fault_ipa);
1612                 goto out_unlock;
1613         }
1614
1615         /* Userspace should not be able to register out-of-bounds IPAs */
1616         VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1617
1618         if (fault_status == FSC_ACCESS) {
1619                 handle_access_fault(vcpu, fault_ipa);
1620                 ret = 1;
1621                 goto out_unlock;
1622         }
1623
1624         ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1625         if (ret == 0)
1626                 ret = 1;
1627 out_unlock:
1628         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1629         return ret;
1630 }
1631
1632 static int handle_hva_to_gpa(struct kvm *kvm,
1633                              unsigned long start,
1634                              unsigned long end,
1635                              int (*handler)(struct kvm *kvm,
1636                                             gpa_t gpa, u64 size,
1637                                             void *data),
1638                              void *data)
1639 {
1640         struct kvm_memslots *slots;
1641         struct kvm_memory_slot *memslot;
1642         int ret = 0;
1643
1644         slots = kvm_memslots(kvm);
1645
1646         /* we only care about the pages that the guest sees */
1647         kvm_for_each_memslot(memslot, slots) {
1648                 unsigned long hva_start, hva_end;
1649                 gfn_t gpa;
1650
1651                 hva_start = max(start, memslot->userspace_addr);
1652                 hva_end = min(end, memslot->userspace_addr +
1653                                         (memslot->npages << PAGE_SHIFT));
1654                 if (hva_start >= hva_end)
1655                         continue;
1656
1657                 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
1658                 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
1659         }
1660
1661         return ret;
1662 }
1663
1664 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1665 {
1666         unmap_stage2_range(kvm, gpa, size);
1667         return 0;
1668 }
1669
1670 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1671 {
1672         unsigned long end = hva + PAGE_SIZE;
1673
1674         if (!kvm->arch.pgd)
1675                 return 0;
1676
1677         trace_kvm_unmap_hva(hva);
1678         handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1679         return 0;
1680 }
1681
1682 int kvm_unmap_hva_range(struct kvm *kvm,
1683                         unsigned long start, unsigned long end)
1684 {
1685         if (!kvm->arch.pgd)
1686                 return 0;
1687
1688         trace_kvm_unmap_hva_range(start, end);
1689         handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1690         return 0;
1691 }
1692
1693 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1694 {
1695         pte_t *pte = (pte_t *)data;
1696
1697         WARN_ON(size != PAGE_SIZE);
1698         /*
1699          * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
1700          * flag clear because MMU notifiers will have unmapped a huge PMD before
1701          * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1702          * therefore stage2_set_pte() never needs to clear out a huge PMD
1703          * through this calling path.
1704          */
1705         stage2_set_pte(kvm, NULL, gpa, pte, 0);
1706         return 0;
1707 }
1708
1709
1710 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1711 {
1712         unsigned long end = hva + PAGE_SIZE;
1713         pte_t stage2_pte;
1714
1715         if (!kvm->arch.pgd)
1716                 return;
1717
1718         trace_kvm_set_spte_hva(hva);
1719         stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1720         handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1721 }
1722
1723 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1724 {
1725         pmd_t *pmd;
1726         pte_t *pte;
1727
1728         WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
1729         pmd = stage2_get_pmd(kvm, NULL, gpa);
1730         if (!pmd || pmd_none(*pmd))     /* Nothing there */
1731                 return 0;
1732
1733         if (pmd_thp_or_huge(*pmd))      /* THP, HugeTLB */
1734                 return stage2_pmdp_test_and_clear_young(pmd);
1735
1736         pte = pte_offset_kernel(pmd, gpa);
1737         if (pte_none(*pte))
1738                 return 0;
1739
1740         return stage2_ptep_test_and_clear_young(pte);
1741 }
1742
1743 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1744 {
1745         pmd_t *pmd;
1746         pte_t *pte;
1747
1748         WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
1749         pmd = stage2_get_pmd(kvm, NULL, gpa);
1750         if (!pmd || pmd_none(*pmd))     /* Nothing there */
1751                 return 0;
1752
1753         if (pmd_thp_or_huge(*pmd))              /* THP, HugeTLB */
1754                 return pmd_young(*pmd);
1755
1756         pte = pte_offset_kernel(pmd, gpa);
1757         if (!pte_none(*pte))            /* Just a page... */
1758                 return pte_young(*pte);
1759
1760         return 0;
1761 }
1762
1763 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1764 {
1765         if (!kvm->arch.pgd)
1766                 return 0;
1767         trace_kvm_age_hva(start, end);
1768         return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1769 }
1770
1771 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1772 {
1773         if (!kvm->arch.pgd)
1774                 return 0;
1775         trace_kvm_test_age_hva(hva);
1776         return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
1777 }
1778
1779 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1780 {
1781         mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1782 }
1783
1784 phys_addr_t kvm_mmu_get_httbr(void)
1785 {
1786         if (__kvm_cpu_uses_extended_idmap())
1787                 return virt_to_phys(merged_hyp_pgd);
1788         else
1789                 return virt_to_phys(hyp_pgd);
1790 }
1791
1792 phys_addr_t kvm_get_idmap_vector(void)
1793 {
1794         return hyp_idmap_vector;
1795 }
1796
1797 static int kvm_map_idmap_text(pgd_t *pgd)
1798 {
1799         int err;
1800
1801         /* Create the idmap in the boot page tables */
1802         err =   __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
1803                                       hyp_idmap_start, hyp_idmap_end,
1804                                       __phys_to_pfn(hyp_idmap_start),
1805                                       PAGE_HYP_EXEC);
1806         if (err)
1807                 kvm_err("Failed to idmap %lx-%lx\n",
1808                         hyp_idmap_start, hyp_idmap_end);
1809
1810         return err;
1811 }
1812
1813 int kvm_mmu_init(void)
1814 {
1815         int err;
1816
1817         hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1818         hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1819         hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1820
1821         /*
1822          * We rely on the linker script to ensure at build time that the HYP
1823          * init code does not cross a page boundary.
1824          */
1825         BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1826
1827         kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1828         kvm_debug("HYP VA range: %lx:%lx\n",
1829                   kern_hyp_va(PAGE_OFFSET),
1830                   kern_hyp_va((unsigned long)high_memory - 1));
1831
1832         if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1833             hyp_idmap_start <  kern_hyp_va(~0UL) &&
1834             hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1835                 /*
1836                  * The idmap page is intersecting with the VA space,
1837                  * it is not safe to continue further.
1838                  */
1839                 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1840                 err = -EINVAL;
1841                 goto out;
1842         }
1843
1844         hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1845         if (!hyp_pgd) {
1846                 kvm_err("Hyp mode PGD not allocated\n");
1847                 err = -ENOMEM;
1848                 goto out;
1849         }
1850
1851         if (__kvm_cpu_uses_extended_idmap()) {
1852                 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1853                                                          hyp_pgd_order);
1854                 if (!boot_hyp_pgd) {
1855                         kvm_err("Hyp boot PGD not allocated\n");
1856                         err = -ENOMEM;
1857                         goto out;
1858                 }
1859
1860                 err = kvm_map_idmap_text(boot_hyp_pgd);
1861                 if (err)
1862                         goto out;
1863
1864                 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1865                 if (!merged_hyp_pgd) {
1866                         kvm_err("Failed to allocate extra HYP pgd\n");
1867                         goto out;
1868                 }
1869                 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
1870                                     hyp_idmap_start);
1871         } else {
1872                 err = kvm_map_idmap_text(hyp_pgd);
1873                 if (err)
1874                         goto out;
1875         }
1876
1877         return 0;
1878 out:
1879         free_hyp_pgds();
1880         return err;
1881 }
1882
1883 void kvm_arch_commit_memory_region(struct kvm *kvm,
1884                                    const struct kvm_userspace_memory_region *mem,
1885                                    const struct kvm_memory_slot *old,
1886                                    const struct kvm_memory_slot *new,
1887                                    enum kvm_mr_change change)
1888 {
1889         /*
1890          * At this point memslot has been committed and there is an
1891          * allocated dirty_bitmap[], dirty pages will be be tracked while the
1892          * memory slot is write protected.
1893          */
1894         if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
1895                 kvm_mmu_wp_memory_region(kvm, mem->slot);
1896 }
1897
1898 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1899                                    struct kvm_memory_slot *memslot,
1900                                    const struct kvm_userspace_memory_region *mem,
1901                                    enum kvm_mr_change change)
1902 {
1903         hva_t hva = mem->userspace_addr;
1904         hva_t reg_end = hva + mem->memory_size;
1905         bool writable = !(mem->flags & KVM_MEM_READONLY);
1906         int ret = 0;
1907
1908         if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1909                         change != KVM_MR_FLAGS_ONLY)
1910                 return 0;
1911
1912         /*
1913          * Prevent userspace from creating a memory region outside of the IPA
1914          * space addressable by the KVM guest IPA space.
1915          */
1916         if (memslot->base_gfn + memslot->npages >=
1917             (KVM_PHYS_SIZE >> PAGE_SHIFT))
1918                 return -EFAULT;
1919
1920         down_read(&current->mm->mmap_sem);
1921         /*
1922          * A memory region could potentially cover multiple VMAs, and any holes
1923          * between them, so iterate over all of them to find out if we can map
1924          * any of them right now.
1925          *
1926          *     +--------------------------------------------+
1927          * +---------------+----------------+   +----------------+
1928          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
1929          * +---------------+----------------+   +----------------+
1930          *     |               memory region                |
1931          *     +--------------------------------------------+
1932          */
1933         do {
1934                 struct vm_area_struct *vma = find_vma(current->mm, hva);
1935                 hva_t vm_start, vm_end;
1936
1937                 if (!vma || vma->vm_start >= reg_end)
1938                         break;
1939
1940                 /*
1941                  * Mapping a read-only VMA is only allowed if the
1942                  * memory region is configured as read-only.
1943                  */
1944                 if (writable && !(vma->vm_flags & VM_WRITE)) {
1945                         ret = -EPERM;
1946                         break;
1947                 }
1948
1949                 /*
1950                  * Take the intersection of this VMA with the memory region
1951                  */
1952                 vm_start = max(hva, vma->vm_start);
1953                 vm_end = min(reg_end, vma->vm_end);
1954
1955                 if (vma->vm_flags & VM_PFNMAP) {
1956                         gpa_t gpa = mem->guest_phys_addr +
1957                                     (vm_start - mem->userspace_addr);
1958                         phys_addr_t pa;
1959
1960                         pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1961                         pa += vm_start - vma->vm_start;
1962
1963                         /* IO region dirty page logging not allowed */
1964                         if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1965                                 ret = -EINVAL;
1966                                 goto out;
1967                         }
1968
1969                         ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1970                                                     vm_end - vm_start,
1971                                                     writable);
1972                         if (ret)
1973                                 break;
1974                 }
1975                 hva = vm_end;
1976         } while (hva < reg_end);
1977
1978         if (change == KVM_MR_FLAGS_ONLY)
1979                 goto out;
1980
1981         spin_lock(&kvm->mmu_lock);
1982         if (ret)
1983                 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1984         else
1985                 stage2_flush_memslot(kvm, memslot);
1986         spin_unlock(&kvm->mmu_lock);
1987 out:
1988         up_read(&current->mm->mmap_sem);
1989         return ret;
1990 }
1991
1992 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1993                            struct kvm_memory_slot *dont)
1994 {
1995 }
1996
1997 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1998                             unsigned long npages)
1999 {
2000         return 0;
2001 }
2002
2003 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
2004 {
2005 }
2006
2007 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2008 {
2009         kvm_free_stage2_pgd(kvm);
2010 }
2011
2012 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2013                                    struct kvm_memory_slot *slot)
2014 {
2015         gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2016         phys_addr_t size = slot->npages << PAGE_SHIFT;
2017
2018         spin_lock(&kvm->mmu_lock);
2019         unmap_stage2_range(kvm, gpa, size);
2020         spin_unlock(&kvm->mmu_lock);
2021 }
2022
2023 /*
2024  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2025  *
2026  * Main problems:
2027  * - S/W ops are local to a CPU (not broadcast)
2028  * - We have line migration behind our back (speculation)
2029  * - System caches don't support S/W at all (damn!)
2030  *
2031  * In the face of the above, the best we can do is to try and convert
2032  * S/W ops to VA ops. Because the guest is not allowed to infer the
2033  * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2034  * which is a rather good thing for us.
2035  *
2036  * Also, it is only used when turning caches on/off ("The expected
2037  * usage of the cache maintenance instructions that operate by set/way
2038  * is associated with the cache maintenance instructions associated
2039  * with the powerdown and powerup of caches, if this is required by
2040  * the implementation.").
2041  *
2042  * We use the following policy:
2043  *
2044  * - If we trap a S/W operation, we enable VM trapping to detect
2045  *   caches being turned on/off, and do a full clean.
2046  *
2047  * - We flush the caches on both caches being turned on and off.
2048  *
2049  * - Once the caches are enabled, we stop trapping VM ops.
2050  */
2051 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2052 {
2053         unsigned long hcr = *vcpu_hcr(vcpu);
2054
2055         /*
2056          * If this is the first time we do a S/W operation
2057          * (i.e. HCR_TVM not set) flush the whole memory, and set the
2058          * VM trapping.
2059          *
2060          * Otherwise, rely on the VM trapping to wait for the MMU +
2061          * Caches to be turned off. At that point, we'll be able to
2062          * clean the caches again.
2063          */
2064         if (!(hcr & HCR_TVM)) {
2065                 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2066                                         vcpu_has_cache_enabled(vcpu));
2067                 stage2_flush_vm(vcpu->kvm);
2068                 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2069         }
2070 }
2071
2072 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2073 {
2074         bool now_enabled = vcpu_has_cache_enabled(vcpu);
2075
2076         /*
2077          * If switching the MMU+caches on, need to invalidate the caches.
2078          * If switching it off, need to clean the caches.
2079          * Clean + invalidate does the trick always.
2080          */
2081         if (now_enabled != was_enabled)
2082                 stage2_flush_vm(vcpu->kvm);
2083
2084         /* Caches are now on, stop trapping VM ops (until a S/W op) */
2085         if (now_enabled)
2086                 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2087
2088         trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2089 }