Merge tag 'rproc-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson...
[linux-2.6-microblaze.git] / mm / gup.c
index b947179..886d614 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -92,10 +92,17 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
        return head;
 }
 
-/*
+/**
  * try_grab_compound_head() - attempt to elevate a page's refcount, by a
  * flags-dependent amount.
  *
+ * Even though the name includes "compound_head", this function is still
+ * appropriate for callers that have a non-compound @page to get.
+ *
+ * @page:  pointer to page to be grabbed
+ * @refs:  the value to (effectively) add to the page's refcount
+ * @flags: gup flags: these are the FOLL_* flag values.
+ *
  * "grab" names in this file mean, "look at flags to decide whether to use
  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
  *
@@ -103,22 +110,26 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
  * same time. (That's true throughout the get_user_pages*() and
  * pin_user_pages*() APIs.) Cases:
  *
- *    FOLL_GET: page's refcount will be incremented by 1.
- *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
+ *    FOLL_GET: page's refcount will be incremented by @refs.
+ *
+ *    FOLL_PIN on compound pages that are > two pages long: page's refcount will
+ *    be incremented by @refs, and page[2].hpage_pinned_refcount will be
+ *    incremented by @refs * GUP_PIN_COUNTING_BIAS.
+ *
+ *    FOLL_PIN on normal pages, or compound pages that are two pages long:
+ *    page's refcount will be incremented by @refs * GUP_PIN_COUNTING_BIAS.
  *
  * Return: head page (with refcount appropriately incremented) for success, or
  * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
  * considered failure, and furthermore, a likely bug in the caller, so a warning
  * is also emitted.
  */
-__maybe_unused struct page *try_grab_compound_head(struct page *page,
-                                                  int refs, unsigned int flags)
+struct page *try_grab_compound_head(struct page *page,
+                                   int refs, unsigned int flags)
 {
        if (flags & FOLL_GET)
                return try_get_compound_head(page, refs);
        else if (flags & FOLL_PIN) {
-               int orig_refs = refs;
-
                /*
                 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
                 * right zone, so fail and let the caller fall back to the slow
@@ -143,6 +154,8 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
                 *
                 * However, be sure to *also* increment the normal page refcount
                 * field at least once, so that the page really is pinned.
+                * That's why the refcount from the earlier
+                * try_get_compound_head() is left intact.
                 */
                if (hpage_pincount_available(page))
                        hpage_pincount_add(page, refs);
@@ -150,7 +163,7 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
                        page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
 
                mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
-                                   orig_refs);
+                                   refs);
 
                return page;
        }
@@ -186,10 +199,8 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
  * @flags:   gup flags: these are the FOLL_* flag values.
  *
  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
- * time. Cases:
- *
- *    FOLL_GET: page's refcount will be incremented by 1.
- *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
+ * time. Cases: please see the try_grab_compound_head() documentation, with
+ * "refs=1".
  *
  * Return: true for success, or if no action was required (if neither FOLL_PIN
  * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
@@ -197,35 +208,10 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
  */
 bool __must_check try_grab_page(struct page *page, unsigned int flags)
 {
-       WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
-
-       if (flags & FOLL_GET)
-               return try_get_page(page);
-       else if (flags & FOLL_PIN) {
-               int refs = 1;
-
-               page = compound_head(page);
-
-               if (WARN_ON_ONCE(page_ref_count(page) <= 0))
-                       return false;
-
-               if (hpage_pincount_available(page))
-                       hpage_pincount_add(page, 1);
-               else
-                       refs = GUP_PIN_COUNTING_BIAS;
-
-               /*
-                * Similar to try_grab_compound_head(): even if using the
-                * hpage_pincount_add/_sub() routines, be sure to
-                * *also* increment the normal page refcount field at least
-                * once, so that the page really is pinned.
-                */
-               page_ref_add(page, refs);
+       if (!(flags & (FOLL_GET | FOLL_PIN)))
+               return true;
 
-               mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
-       }
-
-       return true;
+       return try_grab_compound_head(page, 1, flags);
 }
 
 /**
@@ -1151,7 +1137,6 @@ static long __get_user_pages(struct mm_struct *mm,
                                         * We must stop here.
                                         */
                                        BUG_ON(gup_flags & FOLL_NOWAIT);
-                                       BUG_ON(ret != 0);
                                        goto out;
                                }
                                continue;
@@ -1276,7 +1261,7 @@ int fixup_user_fault(struct mm_struct *mm,
                     bool *unlocked)
 {
        struct vm_area_struct *vma;
-       vm_fault_t ret, major = 0;
+       vm_fault_t ret;
 
        address = untagged_addr(address);
 
@@ -1296,7 +1281,6 @@ retry:
                return -EINTR;
 
        ret = handle_mm_fault(vma, address, fault_flags, NULL);
-       major |= ret & VM_FAULT_MAJOR;
        if (ret & VM_FAULT_ERROR) {
                int err = vm_fault_to_errno(ret, 0);
 
@@ -1475,8 +1459,8 @@ long populate_vma_page_range(struct vm_area_struct *vma,
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
 
-       VM_BUG_ON(start & ~PAGE_MASK);
-       VM_BUG_ON(end   & ~PAGE_MASK);
+       VM_BUG_ON(!PAGE_ALIGNED(start));
+       VM_BUG_ON(!PAGE_ALIGNED(end));
        VM_BUG_ON_VMA(start < vma->vm_start, vma);
        VM_BUG_ON_VMA(end   > vma->vm_end, vma);
        mmap_assert_locked(mm);
@@ -1775,7 +1759,7 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
        if (!list_empty(&movable_page_list)) {
                ret = migrate_pages(&movable_page_list, alloc_migration_target,
                                    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
-                                   MR_LONGTERM_PIN);
+                                   MR_LONGTERM_PIN, NULL);
                if (ret && !list_empty(&movable_page_list))
                        putback_movable_pages(&movable_page_list);
        }
@@ -2244,6 +2228,7 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
 {
        int nr_start = *nr;
        struct dev_pagemap *pgmap = NULL;
+       int ret = 1;
 
        do {
                struct page *page = pfn_to_page(pfn);
@@ -2251,21 +2236,22 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
                pgmap = get_dev_pagemap(pfn, pgmap);
                if (unlikely(!pgmap)) {
                        undo_dev_pagemap(nr, nr_start, flags, pages);
-                       return 0;
+                       ret = 0;
+                       break;
                }
                SetPageReferenced(page);
                pages[*nr] = page;
                if (unlikely(!try_grab_page(page, flags))) {
                        undo_dev_pagemap(nr, nr_start, flags, pages);
-                       return 0;
+                       ret = 0;
+                       break;
                }
                (*nr)++;
                pfn++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       if (pgmap)
-               put_dev_pagemap(pgmap);
-       return 1;
+       put_dev_pagemap(pgmap);
+       return ret;
 }
 
 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,