Merge tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / mm / migrate_device.c
index 5dd97c3..27fb37d 100644 (file)
@@ -148,15 +148,21 @@ again:
                        if (is_writable_device_private_entry(entry))
                                mpfn |= MIGRATE_PFN_WRITE;
                } else {
-                       if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
-                               goto next;
                        pfn = pte_pfn(pte);
-                       if (is_zero_pfn(pfn)) {
+                       if (is_zero_pfn(pfn) &&
+                           (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
                                mpfn = MIGRATE_PFN_MIGRATE;
                                migrate->cpages++;
                                goto next;
                        }
                        page = vm_normal_page(migrate->vma, addr, pte);
+                       if (page && !is_zone_device_page(page) &&
+                           !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
+                               goto next;
+                       else if (page && is_device_coherent_page(page) &&
+                           (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
+                            page->pgmap->owner != migrate->pgmap_owner))
+                               goto next;
                        mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
                        mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
                }
@@ -518,7 +524,7 @@ EXPORT_SYMBOL(migrate_vma_setup);
  *     handle_pte_fault()
  *       do_anonymous_page()
  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
- * private page.
+ * private or coherent page.
  */
 static void migrate_vma_insert_page(struct migrate_vma *migrate,
                                    unsigned long addr,
@@ -594,11 +600,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
                                                page_to_pfn(page));
                entry = swp_entry_to_pte(swp_entry);
        } else {
-               /*
-                * For now we only support migrating to un-addressable device
-                * memory.
-                */
-               if (is_zone_device_page(page)) {
+               if (is_zone_device_page(page) &&
+                   !is_device_coherent_page(page)) {
                        pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
                        goto abort;
                }
@@ -683,6 +686,12 @@ void migrate_vma_pages(struct migrate_vma *migrate)
                }
 
                if (!page) {
+                       /*
+                        * The only time there is no vma is when called from
+                        * migrate_device_coherent_page(). However this isn't
+                        * called if the page could not be unmapped.
+                        */
+                       VM_BUG_ON(!migrate->vma);
                        if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
                                continue;
                        if (!notified) {
@@ -701,10 +710,11 @@ void migrate_vma_pages(struct migrate_vma *migrate)
 
                mapping = page_mapping(page);
 
-               if (is_device_private_page(newpage)) {
+               if (is_device_private_page(newpage) ||
+                   is_device_coherent_page(newpage)) {
                        /*
-                        * For now only support private anonymous when migrating
-                        * to un-addressable device memory.
+                        * For now only support anonymous memory migrating to
+                        * device private or coherent memory.
                         */
                        if (mapping) {
                                migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
@@ -791,3 +801,49 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
        }
 }
 EXPORT_SYMBOL(migrate_vma_finalize);
+
+/*
+ * Migrate a device coherent page back to normal memory. The caller should have
+ * a reference on page which will be copied to the new page if migration is
+ * successful or dropped on failure.
+ */
+int migrate_device_coherent_page(struct page *page)
+{
+       unsigned long src_pfn, dst_pfn = 0;
+       struct migrate_vma args;
+       struct page *dpage;
+
+       WARN_ON_ONCE(PageCompound(page));
+
+       lock_page(page);
+       src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
+       args.src = &src_pfn;
+       args.dst = &dst_pfn;
+       args.cpages = 1;
+       args.npages = 1;
+       args.vma = NULL;
+
+       /*
+        * We don't have a VMA and don't need to walk the page tables to find
+        * the source page. So call migrate_vma_unmap() directly to unmap the
+        * page as migrate_vma_setup() will fail if args.vma == NULL.
+        */
+       migrate_vma_unmap(&args);
+       if (!(src_pfn & MIGRATE_PFN_MIGRATE))
+               return -EBUSY;
+
+       dpage = alloc_page(GFP_USER | __GFP_NOWARN);
+       if (dpage) {
+               lock_page(dpage);
+               dst_pfn = migrate_pfn(page_to_pfn(dpage));
+       }
+
+       migrate_vma_pages(&args);
+       if (src_pfn & MIGRATE_PFN_MIGRATE)
+               copy_highpage(dpage, page);
+       migrate_vma_finalize(&args);
+
+       if (src_pfn & MIGRATE_PFN_MIGRATE)
+               return 0;
+       return -EBUSY;
+}