Merge tag 'kvm-ppc-next-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulu...
authorPaolo Bonzini <pbonzini@redhat.com>
Sun, 9 Aug 2020 17:24:02 +0000 (13:24 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Sun, 9 Aug 2020 17:24:02 +0000 (13:24 -0400)
PPC KVM update for 5.9

- Improvements and bug-fixes for secure VM support, giving reduced startup
  time and memory hotplug support.
- Locking fixes in nested KVM code
- Increase number of guests supported by HV KVM to 4094
- Preliminary POWER10 support

1  2 
arch/powerpc/include/asm/reg.h
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_uvmem.c
arch/powerpc/kvm/book3s_interrupts.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/powerpc.c

Simple merge
Simple merge
@@@ -253,14 -496,94 +496,95 @@@ unsigned long kvmppc_h_svm_init_start(s
        return ret;
  }
  
- unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
+ /*
+  * Provision a new page on HV side and copy over the contents
+  * from secure memory using UV_PAGE_OUT uvcall.
+  * Caller must held kvm->arch.uvmem_lock.
+  */
+ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
+               unsigned long start,
+               unsigned long end, unsigned long page_shift,
+               struct kvm *kvm, unsigned long gpa)
  {
-       if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
-               return H_UNSUPPORTED;
+       unsigned long src_pfn, dst_pfn = 0;
+       struct migrate_vma mig;
+       struct page *dpage, *spage;
+       struct kvmppc_uvmem_page_pvt *pvt;
+       unsigned long pfn;
+       int ret = U_SUCCESS;
  
-       kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
-       pr_info("LPID %d went secure\n", kvm->arch.lpid);
-       return H_SUCCESS;
+       memset(&mig, 0, sizeof(mig));
+       mig.vma = vma;
+       mig.start = start;
+       mig.end = end;
+       mig.src = &src_pfn;
+       mig.dst = &dst_pfn;
 -      mig.src_owner = &kvmppc_uvmem_pgmap;
++      mig.pgmap_owner = &kvmppc_uvmem_pgmap;
++      mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+       /* The requested page is already paged-out, nothing to do */
+       if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
+               return ret;
+       ret = migrate_vma_setup(&mig);
+       if (ret)
+               return -1;
+       spage = migrate_pfn_to_page(*mig.src);
+       if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
+               goto out_finalize;
+       if (!is_zone_device_page(spage))
+               goto out_finalize;
+       dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
+       if (!dpage) {
+               ret = -1;
+               goto out_finalize;
+       }
+       lock_page(dpage);
+       pvt = spage->zone_device_data;
+       pfn = page_to_pfn(dpage);
+       /*
+        * This function is used in two cases:
+        * - When HV touches a secure page, for which we do UV_PAGE_OUT
+        * - When a secure page is converted to shared page, we *get*
+        *   the page to essentially unmap the device page. In this
+        *   case we skip page-out.
+        */
+       if (!pvt->skip_page_out)
+               ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
+                                 gpa, 0, page_shift);
+       if (ret == U_SUCCESS)
+               *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
+       else {
+               unlock_page(dpage);
+               __free_page(dpage);
+               goto out_finalize;
+       }
+       migrate_vma_pages(&mig);
+ out_finalize:
+       migrate_vma_finalize(&mig);
+       return ret;
+ }
+ static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
+                                     unsigned long start, unsigned long end,
+                                     unsigned long page_shift,
+                                     struct kvm *kvm, unsigned long gpa)
+ {
+       int ret;
+       mutex_lock(&kvm->arch.uvmem_lock);
+       ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
+       mutex_unlock(&kvm->arch.uvmem_lock);
+       return ret;
  }
  
  /*
@@@ -400,20 -744,7 +745,8 @@@ static int kvmppc_svm_page_in(struct vm
        mig.end = end;
        mig.src = &src_pfn;
        mig.dst = &dst_pfn;
 +      mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
  
-       /*
-        * We come here with mmap_lock write lock held just for
-        * ksm_madvise(), otherwise we only need read mmap_lock.
-        * Hence downgrade to read lock once ksm_madvise() is done.
-        */
-       ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
-                         MADV_UNMERGEABLE, &vma->vm_flags);
-       mmap_write_downgrade(kvm->mm);
-       *downgrade = true;
-       if (ret)
-               return ret;
        ret = migrate_vma_setup(&mig);
        if (ret)
                return ret;
Simple merge
@@@ -1826,8 -1826,10 +1826,7 @@@ static void kvmppc_core_vcpu_free_pr(st
  
  static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
  {
-       struct kvm_run *run = vcpu->run;
        int ret;
 -#ifdef CONFIG_ALTIVEC
 -      unsigned long uninitialized_var(vrsave);
 -#endif
  
        /* Check if we can run the vcpu at all */
        if (!vcpu->arch.sane) {
Simple merge