perf build-id: Fix coding style, replace 8 spaces by tabs
[linux-2.6-microblaze.git] / mm / gup.c
index e2a39e3..7328251 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -134,7 +134,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
                 * path.
                 */
                if (unlikely((flags & FOLL_LONGTERM) &&
-                            !is_pinnable_page(page)))
+                            !is_longterm_pinnable_page(page)))
                        return NULL;
 
                /*
@@ -953,6 +953,25 @@ static int faultin_page(struct vm_area_struct *vma,
        }
 
        ret = handle_mm_fault(vma, address, fault_flags, NULL);
+
+       if (ret & VM_FAULT_COMPLETED) {
+               /*
+                * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
+                * mmap lock in the page fault handler. Sanity check this.
+                */
+               WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
+               if (locked)
+                       *locked = 0;
+               /*
+                * We should do the same as VM_FAULT_RETRY, but let's not
+                * return -EBUSY since that's not reflecting the reality of
+                * what has happened - we've just fully completed a page
+                * fault, with the mmap lock released.  Use -EAGAIN to show
+                * that we want to take the mmap lock _again_.
+                */
+               return -EAGAIN;
+       }
+
        if (ret & VM_FAULT_ERROR) {
                int err = vm_fault_to_errno(ret, *flags);
 
@@ -1179,6 +1198,7 @@ retry:
                        case 0:
                                goto retry;
                        case -EBUSY:
+                       case -EAGAIN:
                                ret = 0;
                                fallthrough;
                        case -EFAULT:
@@ -1305,6 +1325,18 @@ retry:
                return -EINTR;
 
        ret = handle_mm_fault(vma, address, fault_flags, NULL);
+
+       if (ret & VM_FAULT_COMPLETED) {
+               /*
+                * NOTE: it's a pity that we need to retake the lock here
+                * to pair with the unlock() in the callers. Ideally we
+                * could tell the callers so they do not need to unlock.
+                */
+               mmap_read_lock(mm);
+               *unlocked = true;
+               return 0;
+       }
+
        if (ret & VM_FAULT_ERROR) {
                int err = vm_fault_to_errno(ret, 0);
 
@@ -1370,7 +1402,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                        /* VM_FAULT_RETRY couldn't trigger, bypass */
                        return ret;
 
-               /* VM_FAULT_RETRY cannot return errors */
+               /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
                if (!*locked) {
                        BUG_ON(ret < 0);
                        BUG_ON(ret >= nr_pages);
@@ -1674,7 +1706,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
                        goto finish_or_fault;
 
                if (pages) {
-                       pages[i] = virt_to_page(start);
+                       pages[i] = virt_to_page((void *)start);
                        if (pages[i])
                                get_page(pages[i]);
                }
@@ -1883,7 +1915,7 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
        unsigned long isolation_error_count = 0, i;
        struct folio *prev_folio = NULL;
        LIST_HEAD(movable_page_list);
-       bool drain_allow = true;
+       bool drain_allow = true, coherent_pages = false;
        int ret = 0;
 
        for (i = 0; i < nr_pages; i++) {
@@ -1893,14 +1925,43 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
                        continue;
                prev_folio = folio;
 
-               if (folio_is_pinnable(folio))
+               /*
+                * Device coherent pages are managed by a driver and should not
+                * be pinned indefinitely as it prevents the driver moving the
+                * page. So when trying to pin with FOLL_LONGTERM instead try
+                * to migrate the page out of device memory.
+                */
+               if (folio_is_device_coherent(folio)) {
+                       /*
+                        * We always want a new GUP lookup with device coherent
+                        * pages.
+                        */
+                       pages[i] = 0;
+                       coherent_pages = true;
+
+                       /*
+                        * Migration will fail if the page is pinned, so convert
+                        * the pin on the source page to a normal reference.
+                        */
+                       if (gup_flags & FOLL_PIN) {
+                               get_page(&folio->page);
+                               unpin_user_page(&folio->page);
+                       }
+
+                       ret = migrate_device_coherent_page(&folio->page);
+                       if (ret)
+                               goto unpin_pages;
+
                        continue;
+               }
 
+               if (folio_is_longterm_pinnable(folio))
+                       continue;
                /*
                 * Try to move out any movable page before pinning the range.
                 */
                if (folio_test_hugetlb(folio)) {
-                       if (!isolate_huge_page(&folio->page,
+                       if (isolate_hugetlb(&folio->page,
                                                &movable_page_list))
                                isolation_error_count++;
                        continue;
@@ -1921,7 +1982,8 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
                                    folio_nr_pages(folio));
        }
 
-       if (!list_empty(&movable_page_list) || isolation_error_count)
+       if (!list_empty(&movable_page_list) || isolation_error_count ||
+           coherent_pages)
                goto unpin_pages;
 
        /*
@@ -1931,10 +1993,16 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
        return nr_pages;
 
 unpin_pages:
-       if (gup_flags & FOLL_PIN) {
-               unpin_user_pages(pages, nr_pages);
-       } else {
-               for (i = 0; i < nr_pages; i++)
+       /*
+        * pages[i] might be NULL if any device coherent pages were found.
+        */
+       for (i = 0; i < nr_pages; i++) {
+               if (!pages[i])
+                       continue;
+
+               if (gup_flags & FOLL_PIN)
+                       unpin_user_page(pages[i]);
+               else
                        put_page(pages[i]);
        }