KVM: guest_memfd: let kvm_gmem_populate() operate only on private gfns
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 11 Jul 2024 22:27:55 +0000 (18:27 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 26 Jul 2024 18:46:15 +0000 (14:46 -0400)
This check is currently performed by sev_gmem_post_populate(), but it
applies to all callers of kvm_gmem_populate(): the point of the function
is that the memory is being encrypted and some work has to be done
on all the gfns in order to encrypt them.

Therefore, check the KVM_MEMORY_ATTRIBUTE_PRIVATE attribute prior
to invoking the callback, and stop the operation if a shared page
is encountered.  Because CONFIG_KVM_PRIVATE_MEM in principle does
not require attributes, this makes kvm_gmem_populate() depend on
CONFIG_KVM_GENERIC_PRIVATE_MEM (which does require them).

Reviewed-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/sev.c
include/linux/kvm_host.h
virt/kvm/guest_memfd.c

index 752d2ff..532df12 100644 (file)
@@ -2279,13 +2279,6 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
                bool assigned;
                int level;
 
-               if (!kvm_mem_is_private(kvm, gfn)) {
-                       pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
-                                __func__, gfn);
-                       ret = -EINVAL;
-                       goto err;
-               }
-
                ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
                if (ret || assigned) {
                        pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
index c223b97..79a6b1a 100644 (file)
@@ -2449,6 +2449,7 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
 #endif
 
+#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
 /**
  * kvm_gmem_populate() - Populate/prepare a GPA range with guest data
  *
@@ -2475,6 +2476,7 @@ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
 
 long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
                       kvm_gmem_populate_cb post_populate, void *opaque);
+#endif
 
 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
index 319ec49..95e338a 100644 (file)
@@ -612,6 +612,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
 
+#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
 long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
                       kvm_gmem_populate_cb post_populate, void *opaque)
 {
@@ -665,11 +666,21 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
                    (npages - i) < (1 << max_order))
                        max_order = 0;
 
+               ret = -EINVAL;
+               while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
+                                                       KVM_MEMORY_ATTRIBUTE_PRIVATE,
+                                                       KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
+                       if (!max_order)
+                               goto put_folio_and_exit;
+                       max_order--;
+               }
+
                p = src ? src + i * PAGE_SIZE : NULL;
                ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
                if (!ret)
                        folio_mark_uptodate(folio);
 
+put_folio_and_exit:
                folio_put(folio);
                if (ret)
                        break;
@@ -681,3 +692,4 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
        return ret && !i ? ret : i;
 }
 EXPORT_SYMBOL_GPL(kvm_gmem_populate);
+#endif