KVM: arm64: Convert kvm_phys_addr_ioremap() to generic page-table API
authorWill Deacon <will@kernel.org>
Fri, 11 Sep 2020 13:25:15 +0000 (14:25 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 11 Sep 2020 14:51:13 +0000 (15:51 +0100)
Convert kvm_phys_addr_ioremap() to use kvm_pgtable_stage2_map() instead
of stage2_set_pte().

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20200911132529.19844-8-will@kernel.org
arch/arm64/kvm/mmu.c

index 4607e9c..32e93f4 100644 (file)
@@ -1154,35 +1154,33 @@ static int stage2_pudp_test_and_clear_young(pud_t *pud)
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
                          phys_addr_t pa, unsigned long size, bool writable)
 {
-       phys_addr_t addr, end;
+       phys_addr_t addr;
        int ret = 0;
-       unsigned long pfn;
        struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
+       struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
+       enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
+                                    KVM_PGTABLE_PROT_R |
+                                    (writable ? KVM_PGTABLE_PROT_W : 0);
 
-       end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
-       pfn = __phys_to_pfn(pa);
-
-       for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
-               pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
-
-               if (writable)
-                       pte = kvm_s2pte_mkwrite(pte);
+       size += offset_in_page(guest_ipa);
+       guest_ipa &= PAGE_MASK;
 
+       for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
                ret = kvm_mmu_topup_memory_cache(&cache,
                                                 kvm_mmu_cache_min_pages(kvm));
                if (ret)
-                       goto out;
+                       break;
+
                spin_lock(&kvm->mmu_lock);
-               ret = stage2_set_pte(&kvm->arch.mmu, &cache, addr, &pte,
-                                    KVM_S2PTE_FLAG_IS_IOMAP);
+               ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
+                                            &cache);
                spin_unlock(&kvm->mmu_lock);
                if (ret)
-                       goto out;
+                       break;
 
-               pfn++;
+               pa += PAGE_SIZE;
        }
 
-out:
        kvm_mmu_free_memory_cache(&cache);
        return ret;
 }