Linux 6.9-rc1
[linux-2.6-microblaze.git] / mm / gup.c
index 7328251..df83182 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -18,6 +18,7 @@
 #include <linux/migrate.h>
 #include <linux/mm_inline.h>
 #include <linux/sched/mm.h>
+#include <linux/shmem_fs.h>
 
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
@@ -51,7 +52,8 @@ static inline void sanity_check_pinned_pages(struct page **pages,
                struct page *page = *pages;
                struct folio *folio = page_folio(page);
 
-               if (!folio_test_anon(folio))
+               if (is_zero_page(page) ||
+                   !folio_test_anon(folio))
                        continue;
                if (!folio_test_large(folio) || folio_test_hugetlb(folio))
                        VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
@@ -111,7 +113,7 @@ retry:
  *    FOLL_GET: folio's refcount will be incremented by @refs.
  *
  *    FOLL_PIN on large folios: folio's refcount will be incremented by
- *    @refs, and its compound_pincount will be incremented by @refs.
+ *    @refs, and its pincount will be incremented by @refs.
  *
  *    FOLL_PIN on single-page folios: folio's refcount will be incremented by
  *    @refs * GUP_PIN_COUNTING_BIAS.
@@ -123,56 +125,75 @@ retry:
  */
 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
 {
+       struct folio *folio;
+
+       if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
+               return NULL;
+
+       if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
+               return NULL;
+
        if (flags & FOLL_GET)
                return try_get_folio(page, refs);
-       else if (flags & FOLL_PIN) {
-               struct folio *folio;
 
-               /*
-                * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
-                * right zone, so fail and let the caller fall back to the slow
-                * path.
-                */
-               if (unlikely((flags & FOLL_LONGTERM) &&
-                            !is_longterm_pinnable_page(page)))
-                       return NULL;
+       /* FOLL_PIN is set */
 
-               /*
-                * CAUTION: Don't use compound_head() on the page before this
-                * point, the result won't be stable.
-                */
-               folio = try_get_folio(page, refs);
-               if (!folio)
-                       return NULL;
+       /*
+        * Don't take a pin on the zero page - it's not going anywhere
+        * and it is used in a *lot* of places.
+        */
+       if (is_zero_page(page))
+               return page_folio(page);
 
-               /*
-                * When pinning a large folio, use an exact count to track it.
-                *
-                * However, be sure to *also* increment the normal folio
-                * refcount field at least once, so that the folio really
-                * is pinned.  That's why the refcount from the earlier
-                * try_get_folio() is left intact.
-                */
-               if (folio_test_large(folio))
-                       atomic_add(refs, folio_pincount_ptr(folio));
-               else
-                       folio_ref_add(folio,
-                                       refs * (GUP_PIN_COUNTING_BIAS - 1));
-               node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
+       folio = try_get_folio(page, refs);
+       if (!folio)
+               return NULL;
 
-               return folio;
+       /*
+        * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
+        * right zone, so fail and let the caller fall back to the slow
+        * path.
+        */
+       if (unlikely((flags & FOLL_LONGTERM) &&
+                    !folio_is_longterm_pinnable(folio))) {
+               if (!put_devmap_managed_page_refs(&folio->page, refs))
+                       folio_put_refs(folio, refs);
+               return NULL;
        }
 
-       WARN_ON_ONCE(1);
-       return NULL;
+       /*
+        * When pinning a large folio, use an exact count to track it.
+        *
+        * However, be sure to *also* increment the normal folio
+        * refcount field at least once, so that the folio really
+        * is pinned.  That's why the refcount from the earlier
+        * try_get_folio() is left intact.
+        */
+       if (folio_test_large(folio))
+               atomic_add(refs, &folio->_pincount);
+       else
+               folio_ref_add(folio,
+                               refs * (GUP_PIN_COUNTING_BIAS - 1));
+       /*
+        * Adjust the pincount before re-checking the PTE for changes.
+        * This is essentially a smp_mb() and is paired with a memory
+        * barrier in folio_try_share_anon_rmap_*().
+        */
+       smp_mb__after_atomic();
+
+       node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
+
+       return folio;
 }
 
 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
 {
        if (flags & FOLL_PIN) {
+               if (is_zero_folio(folio))
+                       return;
                node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
                if (folio_test_large(folio))
-                       atomic_sub(refs, folio_pincount_ptr(folio));
+                       atomic_sub(refs, &folio->_pincount);
                else
                        refs *= GUP_PIN_COUNTING_BIAS;
        }
@@ -195,21 +216,32 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
  * time. Cases: please see the try_grab_folio() documentation, with
  * "refs=1".
  *
- * Return: true for success, or if no action was required (if neither FOLL_PIN
- * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
- * FOLL_PIN was set, but the page could not be grabbed.
+ * Return: 0 for success, or if no action was required (if neither FOLL_PIN
+ * nor FOLL_GET was set, nothing is done). A negative error code for failure:
+ *
+ *   -ENOMEM           FOLL_GET or FOLL_PIN was set, but the page could not
+ *                     be grabbed.
  */
-bool __must_check try_grab_page(struct page *page, unsigned int flags)
+int __must_check try_grab_page(struct page *page, unsigned int flags)
 {
        struct folio *folio = page_folio(page);
 
-       WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
        if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
-               return false;
+               return -ENOMEM;
+
+       if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
+               return -EREMOTEIO;
 
        if (flags & FOLL_GET)
                folio_ref_inc(folio);
        else if (flags & FOLL_PIN) {
+               /*
+                * Don't take a pin on the zero page - it's not going anywhere
+                * and it is used in a *lot* of places.
+                */
+               if (is_zero_page(page))
+                       return 0;
+
                /*
                 * Similar to try_grab_folio(): be sure to *also*
                 * increment the normal page refcount field at least once,
@@ -217,7 +249,7 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags)
                 */
                if (folio_test_large(folio)) {
                        folio_ref_add(folio, 1);
-                       atomic_add(1, folio_pincount_ptr(folio));
+                       atomic_add(1, &folio->_pincount);
                } else {
                        folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
                }
@@ -225,7 +257,7 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags)
                node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
        }
 
-       return true;
+       return 0;
 }
 
 /**
@@ -244,6 +276,33 @@ void unpin_user_page(struct page *page)
 }
 EXPORT_SYMBOL(unpin_user_page);
 
+/**
+ * folio_add_pin - Try to get an additional pin on a pinned folio
+ * @folio: The folio to be pinned
+ *
+ * Get an additional pin on a folio we already have a pin on.  Makes no change
+ * if the folio is a zero_page.
+ */
+void folio_add_pin(struct folio *folio)
+{
+       if (is_zero_folio(folio))
+               return;
+
+       /*
+        * Similar to try_grab_folio(): be sure to *also* increment the normal
+        * page refcount field at least once, so that the page really is
+        * pinned.
+        */
+       if (folio_test_large(folio)) {
+               WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
+               folio_ref_inc(folio);
+               atomic_inc(&folio->_pincount);
+       } else {
+               WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
+               folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
+       }
+}
+
 static inline struct folio *gup_folio_range_next(struct page *start,
                unsigned long npages, unsigned long i, unsigned int *ntails)
 {
@@ -462,13 +521,14 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
                pte_t *pte, unsigned int flags)
 {
        if (flags & FOLL_TOUCH) {
-               pte_t entry = *pte;
+               pte_t orig_entry = ptep_get(pte);
+               pte_t entry = orig_entry;
 
                if (flags & FOLL_WRITE)
                        entry = pte_mkdirty(entry);
                entry = pte_mkyoung(entry);
 
-               if (!pte_same(*pte, entry)) {
+               if (!pte_same(orig_entry, entry)) {
                        set_pte_at(vma->vm_mm, address, pte, entry);
                        update_mmu_cache(vma, address, pte);
                }
@@ -478,14 +538,42 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
        return -EEXIST;
 }
 
-/*
- * FOLL_FORCE can write to even unwritable pte's, but only
- * after we've gone through a COW cycle and they are dirty.
- */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+/* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
+static inline bool can_follow_write_pte(pte_t pte, struct page *page,
+                                       struct vm_area_struct *vma,
+                                       unsigned int flags)
 {
-       return pte_write(pte) ||
-               ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+       /* If the pte is writable, we can write to the page. */
+       if (pte_write(pte))
+               return true;
+
+       /* Maybe FOLL_FORCE is set to override it? */
+       if (!(flags & FOLL_FORCE))
+               return false;
+
+       /* But FOLL_FORCE has no effect on shared mappings */
+       if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
+               return false;
+
+       /* ... or read-only private ones */
+       if (!(vma->vm_flags & VM_MAYWRITE))
+               return false;
+
+       /* ... or already writable ones that just need to take a write fault */
+       if (vma->vm_flags & VM_WRITE)
+               return false;
+
+       /*
+        * See can_change_pte_writable(): we broke COW and could map the page
+        * writable if we have an exclusive anonymous page ...
+        */
+       if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+               return false;
+
+       /* ... and a write-fault isn't required for other reasons. */
+       if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
+               return false;
+       return !userfaultfd_pte_wp(vma, pte);
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -502,38 +590,28 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
        if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
                         (FOLL_PIN | FOLL_GET)))
                return ERR_PTR(-EINVAL);
-retry:
-       if (unlikely(pmd_bad(*pmd)))
-               return no_page_table(vma, flags);
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-       pte = *ptep;
-       if (!pte_present(pte)) {
-               swp_entry_t entry;
-               /*
-                * KSM's break_ksm() relies upon recognizing a ksm page
-                * even while it is being migrated, so for that case we
-                * need migration_entry_wait().
-                */
-               if (likely(!(flags & FOLL_MIGRATION)))
-                       goto no_page;
-               if (pte_none(pte))
-                       goto no_page;
-               entry = pte_to_swp_entry(pte);
-               if (!is_migration_entry(entry))
-                       goto no_page;
-               pte_unmap_unlock(ptep, ptl);
-               migration_entry_wait(mm, pmd, address);
-               goto retry;
-       }
-       if ((flags & FOLL_NUMA) && pte_protnone(pte))
+       if (!ptep)
+               return no_page_table(vma, flags);
+       pte = ptep_get(ptep);
+       if (!pte_present(pte))
+               goto no_page;
+       if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
                goto no_page;
-       if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
-               pte_unmap_unlock(ptep, ptl);
-               return NULL;
-       }
 
        page = vm_normal_page(vma, address, pte);
+
+       /*
+        * We only care about anon pages in can_follow_write_pte() and don't
+        * have to worry about pte_devmap() because they are never anon.
+        */
+       if ((flags & FOLL_WRITE) &&
+           !can_follow_write_pte(pte, page, vma, flags)) {
+               page = NULL;
+               goto out;
+       }
+
        if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
                /*
                 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
@@ -561,7 +639,7 @@ retry:
                }
        }
 
-       if (!pte_write(pte) && gup_must_unshare(flags, page)) {
+       if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
                page = ERR_PTR(-EMLINK);
                goto out;
        }
@@ -570,10 +648,12 @@ retry:
                       !PageAnonExclusive(page), page);
 
        /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
-       if (unlikely(!try_grab_page(page, flags))) {
-               page = ERR_PTR(-ENOMEM);
+       ret = try_grab_page(page, flags);
+       if (unlikely(ret)) {
+               page = ERR_PTR(ret);
                goto out;
        }
+
        /*
         * We need to make the page accessible if and only if we are going
         * to access its content (the FOLL_PIN case).  Please see
@@ -619,95 +699,39 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
 
        pmd = pmd_offset(pudp, address);
-       /*
-        * The READ_ONCE() will stabilize the pmdval in a register or
-        * on the stack so that it will stop changing under the code.
-        */
-       pmdval = READ_ONCE(*pmd);
+       pmdval = pmdp_get_lockless(pmd);
        if (pmd_none(pmdval))
                return no_page_table(vma, flags);
-       if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
-               page = follow_huge_pmd(mm, address, pmd, flags);
-               if (page)
-                       return page;
-               return no_page_table(vma, flags);
-       }
-       if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
-               page = follow_huge_pd(vma, address,
-                                     __hugepd(pmd_val(pmdval)), flags,
-                                     PMD_SHIFT);
-               if (page)
-                       return page;
+       if (!pmd_present(pmdval))
                return no_page_table(vma, flags);
-       }
-retry:
-       if (!pmd_present(pmdval)) {
-               /*
-                * Should never reach here, if thp migration is not supported;
-                * Otherwise, it must be a thp migration entry.
-                */
-               VM_BUG_ON(!thp_migration_supported() ||
-                                 !is_pmd_migration_entry(pmdval));
-
-               if (likely(!(flags & FOLL_MIGRATION)))
-                       return no_page_table(vma, flags);
-
-               pmd_migration_entry_wait(mm, pmd);
-               pmdval = READ_ONCE(*pmd);
-               /*
-                * MADV_DONTNEED may convert the pmd to null because
-                * mmap_lock is held in read mode
-                */
-               if (pmd_none(pmdval))
-                       return no_page_table(vma, flags);
-               goto retry;
-       }
        if (pmd_devmap(pmdval)) {
                ptl = pmd_lock(mm, pmd);
                page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
                spin_unlock(ptl);
                if (page)
                        return page;
+               return no_page_table(vma, flags);
        }
        if (likely(!pmd_trans_huge(pmdval)))
                return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 
-       if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
+       if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
                return no_page_table(vma, flags);
 
-retry_locked:
        ptl = pmd_lock(mm, pmd);
-       if (unlikely(pmd_none(*pmd))) {
-               spin_unlock(ptl);
-               return no_page_table(vma, flags);
-       }
        if (unlikely(!pmd_present(*pmd))) {
                spin_unlock(ptl);
-               if (likely(!(flags & FOLL_MIGRATION)))
-                       return no_page_table(vma, flags);
-               pmd_migration_entry_wait(mm, pmd);
-               goto retry_locked;
+               return no_page_table(vma, flags);
        }
        if (unlikely(!pmd_trans_huge(*pmd))) {
                spin_unlock(ptl);
                return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
        }
        if (flags & FOLL_SPLIT_PMD) {
-               int ret;
-               page = pmd_page(*pmd);
-               if (is_huge_zero_page(page)) {
-                       spin_unlock(ptl);
-                       ret = 0;
-                       split_huge_pmd(vma, pmd, address);
-                       if (pmd_trans_unstable(pmd))
-                               ret = -EBUSY;
-               } else {
-                       spin_unlock(ptl);
-                       split_huge_pmd(vma, pmd, address);
-                       ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
-               }
-
-               return ret ? ERR_PTR(ret) :
+               spin_unlock(ptl);
+               split_huge_pmd(vma, pmd, address);
+               /* If pmd was left empty, stuff a page table in there quickly */
+               return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
                        follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
        }
        page = follow_trans_huge_pmd(vma, address, pmd, flags);
@@ -729,26 +753,13 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
        pud = pud_offset(p4dp, address);
        if (pud_none(*pud))
                return no_page_table(vma, flags);
-       if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
-               page = follow_huge_pud(mm, address, pud, flags);
-               if (page)
-                       return page;
-               return no_page_table(vma, flags);
-       }
-       if (is_hugepd(__hugepd(pud_val(*pud)))) {
-               page = follow_huge_pd(vma, address,
-                                     __hugepd(pud_val(*pud)), flags,
-                                     PUD_SHIFT);
-               if (page)
-                       return page;
-               return no_page_table(vma, flags);
-       }
        if (pud_devmap(*pud)) {
                ptl = pud_lock(mm, pud);
                page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
                spin_unlock(ptl);
                if (page)
                        return page;
+               return no_page_table(vma, flags);
        }
        if (unlikely(pud_bad(*pud)))
                return no_page_table(vma, flags);
@@ -762,7 +773,6 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
                                    struct follow_page_context *ctx)
 {
        p4d_t *p4d;
-       struct page *page;
 
        p4d = p4d_offset(pgdp, address);
        if (p4d_none(*p4d))
@@ -771,14 +781,6 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
        if (unlikely(p4d_bad(*p4d)))
                return no_page_table(vma, flags);
 
-       if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
-               page = follow_huge_pd(vma, address,
-                                     __hugepd(p4d_val(*p4d)), flags,
-                                     P4D_SHIFT);
-               if (page)
-                       return page;
-               return no_page_table(vma, flags);
-       }
        return follow_pud_mask(vma, address, p4d, flags, ctx);
 }
 
@@ -811,38 +813,24 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
                              struct follow_page_context *ctx)
 {
        pgd_t *pgd;
-       struct page *page;
        struct mm_struct *mm = vma->vm_mm;
 
        ctx->page_mask = 0;
 
-       /* make this handle hugepd */
-       page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
-       if (!IS_ERR(page)) {
-               WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
-               return page;
-       }
+       /*
+        * Call hugetlb_follow_page_mask for hugetlb vmas as it will use
+        * special hugetlb page table walking code.  This eliminates the
+        * need to check for hugetlb entries in the general walking code.
+        */
+       if (is_vm_hugetlb_page(vma))
+               return hugetlb_follow_page_mask(vma, address, flags,
+                                               &ctx->page_mask);
 
        pgd = pgd_offset(mm, address);
 
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
                return no_page_table(vma, flags);
 
-       if (pgd_huge(*pgd)) {
-               page = follow_huge_pgd(mm, address, pgd, flags);
-               if (page)
-                       return page;
-               return no_page_table(vma, flags);
-       }
-       if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
-               page = follow_huge_pd(vma, address,
-                                     __hugepd(pgd_val(*pgd)), flags,
-                                     PGDIR_SHIFT);
-               if (page)
-                       return page;
-               return no_page_table(vma, flags);
-       }
-
        return follow_p4d_mask(vma, address, pgd, flags, ctx);
 }
 
@@ -855,9 +843,13 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
        if (vma_is_secretmem(vma))
                return NULL;
 
-       if (foll_flags & FOLL_PIN)
+       if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
                return NULL;
 
+       /*
+        * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect
+        * to fail on PROT_NONE-mapped pages.
+        */
        page = follow_page_mask(vma, address, foll_flags, &ctx);
        if (ctx.pgmap)
                put_dev_pagemap(ctx.pgmap);
@@ -873,6 +865,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
+       pte_t entry;
        int ret = -EFAULT;
 
        /* user gate pages are read-only */
@@ -893,23 +886,24 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
        pmd = pmd_offset(pud, address);
        if (!pmd_present(*pmd))
                return -EFAULT;
-       VM_BUG_ON(pmd_trans_huge(*pmd));
        pte = pte_offset_map(pmd, address);
-       if (pte_none(*pte))
+       if (!pte)
+               return -EFAULT;
+       entry = ptep_get(pte);
+       if (pte_none(entry))
                goto unmap;
        *vma = get_gate_vma(mm);
        if (!page)
                goto out;
-       *page = vm_normal_page(*vma, address, *pte);
+       *page = vm_normal_page(*vma, address, entry);
        if (!*page) {
-               if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
+               if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry)))
                        goto unmap;
-               *page = pte_page(*pte);
+               *page = pte_page(entry);
        }
-       if (unlikely(!try_grab_page(*page, gup_flags))) {
-               ret = -ENOMEM;
+       ret = try_grab_page(*page, gup_flags);
+       if (unlikely(ret))
                goto unmap;
-       }
 out:
        ret = 0;
 unmap:
@@ -918,9 +912,9 @@ unmap:
 }
 
 /*
- * mmap_lock must be held on entry.  If @locked != NULL and *@flags
- * does not include FOLL_NOWAIT, the mmap_lock may be released.  If it
- * is, *@locked will be set to 0 and -EBUSY returned.
+ * mmap_lock must be held on entry.  If @flags has FOLL_UNLOCKABLE but not
+ * FOLL_NOWAIT, the mmap_lock may be released.  If it is, *@locked will be set
+ * to 0 and -EBUSY returned.
  */
 static int faultin_page(struct vm_area_struct *vma,
                unsigned long address, unsigned int *flags, bool unshare,
@@ -935,8 +929,17 @@ static int faultin_page(struct vm_area_struct *vma,
                fault_flags |= FAULT_FLAG_WRITE;
        if (*flags & FOLL_REMOTE)
                fault_flags |= FAULT_FLAG_REMOTE;
-       if (locked)
+       if (*flags & FOLL_UNLOCKABLE) {
                fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+               /*
+                * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
+                * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
+                * That's because some callers may not be prepared to
+                * handle early exits caused by non-fatal signals.
+                */
+               if (*flags & FOLL_INTERRUPTIBLE)
+                       fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
+       }
        if (*flags & FOLL_NOWAIT)
                fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
        if (*flags & FOLL_TRIED) {
@@ -960,8 +963,8 @@ static int faultin_page(struct vm_area_struct *vma,
                 * mmap lock in the page fault handler. Sanity check this.
                 */
                WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
-               if (locked)
-                       *locked = 0;
+               *locked = 0;
+
                /*
                 * We should do the same as VM_FAULT_RETRY, but let's not
                 * return -EBUSY since that's not reflecting the reality of
@@ -981,23 +984,49 @@ static int faultin_page(struct vm_area_struct *vma,
        }
 
        if (ret & VM_FAULT_RETRY) {
-               if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
+               if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
                        *locked = 0;
                return -EBUSY;
        }
 
+       return 0;
+}
+
+/*
+ * Writing to file-backed mappings which require folio dirty tracking using GUP
+ * is a fundamentally broken operation, as kernel write access to GUP mappings
+ * do not adhere to the semantics expected by a file system.
+ *
+ * Consider the following scenario:-
+ *
+ * 1. A folio is written to via GUP which write-faults the memory, notifying
+ *    the file system and dirtying the folio.
+ * 2. Later, writeback is triggered, resulting in the folio being cleaned and
+ *    the PTE being marked read-only.
+ * 3. The GUP caller writes to the folio, as it is mapped read/write via the
+ *    direct mapping.
+ * 4. The GUP caller, now done with the page, unpins it and sets it dirty
+ *    (though it does not have to).
+ *
+ * This results in both data being written to a folio without writenotify, and
+ * the folio being dirtied unexpectedly (if the caller decides to do so).
+ */
+static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
+                                         unsigned long gup_flags)
+{
+       /*
+        * If we aren't pinning then no problematic write can occur. A long term
+        * pin is the most egregious case so this is the case we disallow.
+        */
+       if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
+           (FOLL_PIN | FOLL_LONGTERM))
+               return true;
+
        /*
-        * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
-        * necessary, even if maybe_mkwrite decided not to set pte_write. We
-        * can thus safely do subsequent page lookups as if they were reads.
-        * But only do so when looping for pte_write is futile: in some cases
-        * userspace may also be wanting to write to the gotten user page,
-        * which a read fault here might prevent (a readonly page might get
-        * reCOWed by userspace write).
+        * If the VMA does not require dirty tracking then no problematic write
+        * can occur either.
         */
-       if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
-               *flags |= FOLL_COW;
-       return 0;
+       return !vma_needs_dirty_tracking(vma);
 }
 
 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
@@ -1005,11 +1034,12 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
        vm_flags_t vm_flags = vma->vm_flags;
        int write = (gup_flags & FOLL_WRITE);
        int foreign = (gup_flags & FOLL_REMOTE);
+       bool vma_anon = vma_is_anonymous(vma);
 
        if (vm_flags & (VM_IO | VM_PFNMAP))
                return -EFAULT;
 
-       if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+       if ((gup_flags & FOLL_ANON) && !vma_anon)
                return -EFAULT;
 
        if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
@@ -1019,9 +1049,16 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
                return -EFAULT;
 
        if (write) {
-               if (!(vm_flags & VM_WRITE)) {
+               if (!vma_anon &&
+                   !writable_file_mapping_allowed(vma, gup_flags))
+                       return -EFAULT;
+
+               if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
                        if (!(gup_flags & FOLL_FORCE))
                                return -EFAULT;
+                       /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
+                       if (is_vm_hugetlb_page(vma))
+                               return -EFAULT;
                        /*
                         * We used to let the write,force case do COW in a
                         * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
@@ -1053,6 +1090,45 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
        return 0;
 }
 
+/*
+ * This is "vma_lookup()", but with a warning if we would have
+ * historically expanded the stack in the GUP code.
+ */
+static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
+        unsigned long addr)
+{
+#ifdef CONFIG_STACK_GROWSUP
+       return vma_lookup(mm, addr);
+#else
+       static volatile unsigned long next_warn;
+       struct vm_area_struct *vma;
+       unsigned long now, next;
+
+       vma = find_vma(mm, addr);
+       if (!vma || (addr >= vma->vm_start))
+               return vma;
+
+       /* Only warn for half-way relevant accesses */
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               return NULL;
+       if (vma->vm_start - addr > 65536)
+               return NULL;
+
+       /* Let's not warn more than once an hour.. */
+       now = jiffies; next = next_warn;
+       if (next && time_before(now, next))
+               return NULL;
+       next_warn = now + 60*60*HZ;
+
+       /* Let people know things may have changed. */
+       pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
+               current->comm, task_pid_nr(current),
+               vma->vm_start, vma->vm_end, addr);
+       dump_stack();
+       return NULL;
+#endif
+}
+
 /**
  * __get_user_pages() - pin user pages in memory
  * @mm:                mm_struct of target mm
@@ -1062,8 +1138,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  * @pages:     array that receives pointers to the pages pinned.
  *             Should be at least nr_pages long. Or NULL, if caller
  *             only intends to ensure the pages are faulted in.
- * @vmas:      array of pointers to vmas corresponding to each page.
- *             Or NULL if the caller does not require them.
  * @locked:     whether we're still with the mmap_lock held
  *
  * Returns either number of pages pinned (which may be less than the
@@ -1077,8 +1151,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  *
  * The caller is responsible for releasing returned @pages, via put_page().
  *
- * @vmas are valid only as long as mmap_lock is held.
- *
  * Must be called with mmap_lock held.  It may be released.  See below.
  *
  * __get_user_pages walks a process's page tables and takes a reference to
@@ -1089,7 +1161,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  * This does not guarantee that the page exists in the user mappings when
  * __get_user_pages returns, and there may even be a completely different
  * page there in some cases (eg. if mmapped pagecache has been invalidated
- * and subsequently re faulted). However it does guarantee that the page
+ * and subsequently re-faulted). However it does guarantee that the page
  * won't be freed completely. And mostly callers simply care that the page
  * contains data that was valid *at some point in time*. Typically, an IO
  * or similar operation cannot guarantee anything stronger anyway because
@@ -1100,14 +1172,12 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  * appropriate) must be called after the page is finished with, and
  * before put_page is called.
  *
- * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
- * released by an up_read().  That can happen if @gup_flags does not
- * have FOLL_NOWAIT.
+ * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
+ * be released. If this happens *@locked will be set to 0 on return.
  *
- * A caller using such a combination of @locked and @gup_flags
- * must therefore hold the mmap_lock for reading only, and recognize
- * when it's been released.  Otherwise, it must be held for either
- * reading or writing and will not be released.
+ * A caller using such a combination of @gup_flags must therefore hold the
+ * mmap_lock for reading only, and recognize when it's been released. Otherwise,
+ * it must be held for either reading or writing and will not be released.
  *
  * In most cases, get_user_pages or get_user_pages_fast should be used
  * instead of __get_user_pages. __get_user_pages should be used only if
@@ -1116,7 +1186,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 static long __get_user_pages(struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
-               struct vm_area_struct **vmas, int *locked)
+               int *locked)
 {
        long ret = 0, i = 0;
        struct vm_area_struct *vma = NULL;
@@ -1125,18 +1195,10 @@ static long __get_user_pages(struct mm_struct *mm,
        if (!nr_pages)
                return 0;
 
-       start = untagged_addr(start);
+       start = untagged_addr_remote(mm, start);
 
        VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
 
-       /*
-        * If FOLL_FORCE is set then do not force a full fault as the hinting
-        * fault information is unrelated to the reference behaviour of a task
-        * using the address space
-        */
-       if (!(gup_flags & FOLL_FORCE))
-               gup_flags |= FOLL_NUMA;
-
        do {
                struct page *page;
                unsigned int foll_flags = gup_flags;
@@ -1144,11 +1206,11 @@ static long __get_user_pages(struct mm_struct *mm,
 
                /* first iteration or cross vma bound */
                if (!vma || start >= vma->vm_end) {
-                       vma = find_extend_vma(mm, start);
+                       vma = gup_vma_lookup(mm, start);
                        if (!vma && in_gate_area(mm, start)) {
                                ret = get_gate_page(mm, start & PAGE_MASK,
                                                gup_flags, &vma,
-                                               pages ? &pages[i] : NULL);
+                                               pages ? &page : NULL);
                                if (ret)
                                        goto out;
                                ctx.page_mask = 0;
@@ -1162,22 +1224,6 @@ static long __get_user_pages(struct mm_struct *mm,
                        ret = check_vma_flags(vma, gup_flags);
                        if (ret)
                                goto out;
-
-                       if (is_vm_hugetlb_page(vma)) {
-                               i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                               &start, &nr_pages, i,
-                                               gup_flags, locked);
-                               if (locked && *locked == 0) {
-                                       /*
-                                        * We've got a VM_FAULT_RETRY
-                                        * and we've lost mmap_lock.
-                                        * We must stop here.
-                                        */
-                                       BUG_ON(gup_flags & FOLL_NOWAIT);
-                                       goto out;
-                               }
-                               continue;
-                       }
                }
 retry:
                /*
@@ -1218,26 +1264,58 @@ retry:
                                ret = PTR_ERR(page);
                                goto out;
                        }
-
-                       goto next_page;
                } else if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
                }
-               if (pages) {
-                       pages[i] = page;
-                       flush_anon_page(vma, page, start);
-                       flush_dcache_page(page);
-                       ctx.page_mask = 0;
-               }
 next_page:
-               if (vmas) {
-                       vmas[i] = vma;
-                       ctx.page_mask = 0;
-               }
                page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
                if (page_increm > nr_pages)
                        page_increm = nr_pages;
+
+               if (pages) {
+                       struct page *subpage;
+                       unsigned int j;
+
+                       /*
+                        * This must be a large folio (and doesn't need to
+                        * be the whole folio; it can be part of it), do
+                        * the refcount work for all the subpages too.
+                        *
+                        * NOTE: here the page may not be the head page
+                        * e.g. when start addr is not thp-size aligned.
+                        * try_grab_folio() should have taken care of tail
+                        * pages.
+                        */
+                       if (page_increm > 1) {
+                               struct folio *folio;
+
+                               /*
+                                * Since we already hold refcount on the
+                                * large folio, this should never fail.
+                                */
+                               folio = try_grab_folio(page, page_increm - 1,
+                                                      foll_flags);
+                               if (WARN_ON_ONCE(!folio)) {
+                                       /*
+                                        * Release the 1st page ref if the
+                                        * folio is problematic, fail hard.
+                                        */
+                                       gup_put_folio(page_folio(page), 1,
+                                                     foll_flags);
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                       }
+
+                       for (j = 0; j < page_increm; j++) {
+                               subpage = nth_page(page, j);
+                               pages[i + j] = subpage;
+                               flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
+                               flush_dcache_page(subpage);
+                       }
+               }
+
                i += page_increm;
                start += page_increm * PAGE_SIZE;
                nr_pages -= page_increm;
@@ -1307,14 +1385,14 @@ int fixup_user_fault(struct mm_struct *mm,
        struct vm_area_struct *vma;
        vm_fault_t ret;
 
-       address = untagged_addr(address);
+       address = untagged_addr_remote(mm, address);
 
        if (unlocked)
                fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
 retry:
-       vma = find_extend_vma(mm, address);
-       if (!vma || address < vma->vm_start)
+       vma = gup_vma_lookup(mm, address);
+       if (!vma)
                return -EFAULT;
 
        if (!vma_permits_fault(vma, fault_flags))
@@ -1357,26 +1435,59 @@ retry:
 EXPORT_SYMBOL_GPL(fixup_user_fault);
 
 /*
- * Please note that this function, unlike __get_user_pages will not
- * return 0 for nr_pages > 0 without FOLL_NOWAIT
+ * GUP always responds to fatal signals.  When FOLL_INTERRUPTIBLE is
+ * specified, it'll also respond to generic signals.  The caller of GUP
+ * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
+ */
+static bool gup_signal_pending(unsigned int flags)
+{
+       if (fatal_signal_pending(current))
+               return true;
+
+       if (!(flags & FOLL_INTERRUPTIBLE))
+               return false;
+
+       return signal_pending(current);
+}
+
+/*
+ * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
+ * the caller. This function may drop the mmap_lock. If it does so, then it will
+ * set (*locked = 0).
+ *
+ * (*locked == 0) means that the caller expects this function to acquire and
+ * drop the mmap_lock. Therefore, the value of *locked will still be zero when
+ * the function returns, even though it may have changed temporarily during
+ * function execution.
+ *
+ * Please note that this function, unlike __get_user_pages(), will not return 0
+ * for nr_pages > 0, unless FOLL_NOWAIT is used.
  */
 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                                                unsigned long start,
                                                unsigned long nr_pages,
                                                struct page **pages,
-                                               struct vm_area_struct **vmas,
                                                int *locked,
                                                unsigned int flags)
 {
        long ret, pages_done;
-       bool lock_dropped;
+       bool must_unlock = false;
 
-       if (locked) {
-               /* if VM_FAULT_RETRY can be returned, vmas become invalid */
-               BUG_ON(vmas);
-               /* check caller initialized locked */
-               BUG_ON(*locked != 1);
+       if (!nr_pages)
+               return 0;
+
+       /*
+        * The internal caller expects GUP to manage the lock internally and the
+        * lock must be released when this returns.
+        */
+       if (!*locked) {
+               if (mmap_read_lock_killable(mm))
+                       return -EAGAIN;
+               must_unlock = true;
+               *locked = 1;
        }
+       else
+               mmap_assert_locked(mm);
 
        if (flags & FOLL_PIN)
                mm_set_has_pinned_flag(&mm->flags);
@@ -1394,13 +1505,14 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                flags |= FOLL_GET;
 
        pages_done = 0;
-       lock_dropped = false;
        for (;;) {
                ret = __get_user_pages(mm, start, nr_pages, flags, pages,
-                                      vmas, locked);
-               if (!locked)
+                                      locked);
+               if (!(flags & FOLL_UNLOCKABLE)) {
                        /* VM_FAULT_RETRY couldn't trigger, bypass */
-                       return ret;
+                       pages_done = ret;
+                       break;
+               }
 
                /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
                if (!*locked) {
@@ -1430,18 +1542,20 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                if (likely(pages))
                        pages += ret;
                start += ret << PAGE_SHIFT;
-               lock_dropped = true;
+
+               /* The lock was temporarily dropped, so we must unlock later */
+               must_unlock = true;
 
 retry:
                /*
                 * Repeat on the address that fired VM_FAULT_RETRY
                 * with both FAULT_FLAG_ALLOW_RETRY and
                 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
-                * by fatal signals, so we need to check it before we
+                * by fatal signals of even common signals, depending on
+                * the caller's request. So we need to check it before we
                 * start trying again otherwise it can loop forever.
                 */
-
-               if (fatal_signal_pending(current)) {
+               if (gup_signal_pending(flags)) {
                        if (!pages_done)
                                pages_done = -EINTR;
                        break;
@@ -1457,7 +1571,7 @@ retry:
 
                *locked = 1;
                ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
-                                      pages, NULL, locked);
+                                      pages, locked);
                if (!*locked) {
                        /* Continue to retry until we succeeded */
                        BUG_ON(ret != 0);
@@ -1477,14 +1591,23 @@ retry:
                        pages++;
                start += PAGE_SIZE;
        }
-       if (lock_dropped && *locked) {
+       if (must_unlock && *locked) {
                /*
-                * We must let the caller know we temporarily dropped the lock
-                * and so the critical section protected by it was lost.
+                * We either temporarily dropped the lock, or the caller
+                * requested that we both acquire and drop the lock. Either way,
+                * we must now unlock, and notify the caller of that state.
                 */
                mmap_read_unlock(mm);
                *locked = 0;
        }
+
+       /*
+        * Failing to pin anything implies something has gone wrong (except when
+        * FOLL_NOWAIT is specified).
+        */
+       if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT)))
+               return -EFAULT;
+
        return pages_done;
 }
 
@@ -1513,6 +1636,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
+       int local_locked = 1;
        int gup_flags;
        long ret;
 
@@ -1545,12 +1669,15 @@ long populate_vma_page_range(struct vm_area_struct *vma,
        if (vma_is_accessible(vma))
                gup_flags |= FOLL_FORCE;
 
+       if (locked)
+               gup_flags |= FOLL_UNLOCKABLE;
+
        /*
         * We made sure addr is within a VMA, so the following will
         * not result in a stack expansion that recurses back here.
         */
        ret = __get_user_pages(mm, start, nr_pages, gup_flags,
-                               NULL, NULL, locked);
+                              NULL, locked ? locked : &local_locked);
        lru_add_drain();
        return ret;
 }
@@ -1571,12 +1698,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
  * code on error (see __get_user_pages()).
  *
  * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
- * covered by the VMA.
- *
- * If @locked is NULL, it may be held for read or write and will be unperturbed.
- *
- * If @locked is non-NULL, it must held for read only and may be released.  If
- * it's released, *@locked will be set to 0.
+ * covered by the VMA. If it's released, *@locked will be set to 0.
  */
 long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end, bool write, int *locked)
@@ -1601,7 +1723,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
         *                a poisoned page.
         * !FOLL_FORCE: Require proper access permissions.
         */
-       gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
+       gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE;
        if (write)
                gup_flags |= FOLL_WRITE;
 
@@ -1613,7 +1735,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
                return -EINVAL;
 
        ret = __get_user_pages(mm, start, nr_pages, gup_flags,
-                               NULL, NULL, locked);
+                              NULL, locked);
        lru_add_drain();
        return ret;
 }
@@ -1643,10 +1765,11 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
                if (!locked) {
                        locked = 1;
                        mmap_read_lock(mm);
-                       vma = find_vma(mm, nstart);
+                       vma = find_vma_intersection(mm, nstart, end);
                } else if (nstart >= vma->vm_end)
-                       vma = vma->vm_next;
-               if (!vma || vma->vm_start >= end)
+                       vma = find_vma_intersection(mm, vma->vm_end, end);
+
+               if (!vma)
                        break;
                /*
                 * Set [nstart; nend) to intersection of desired address
@@ -1680,13 +1803,27 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
 #else /* CONFIG_MMU */
 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
                unsigned long nr_pages, struct page **pages,
-               struct vm_area_struct **vmas, int *locked,
-               unsigned int foll_flags)
+               int *locked, unsigned int foll_flags)
 {
        struct vm_area_struct *vma;
+       bool must_unlock = false;
        unsigned long vm_flags;
        long i;
 
+       if (!nr_pages)
+               return 0;
+
+       /*
+        * The internal caller expects GUP to manage the lock internally and the
+        * lock must be released when this returns.
+        */
+       if (!*locked) {
+               if (mmap_read_lock_killable(mm))
+                       return -EAGAIN;
+               must_unlock = true;
+               *locked = 1;
+       }
+
        /* calculate required read or write permissions.
         * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
@@ -1698,26 +1835,27 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
        for (i = 0; i < nr_pages; i++) {
                vma = find_vma(mm, start);
                if (!vma)
-                       goto finish_or_fault;
+                       break;
 
                /* protect what we can, including chardevs */
                if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
                    !(vm_flags & vma->vm_flags))
-                       goto finish_or_fault;
+                       break;
 
                if (pages) {
                        pages[i] = virt_to_page((void *)start);
                        if (pages[i])
                                get_page(pages[i]);
                }
-               if (vmas)
-                       vmas[i] = vma;
+
                start = (start + PAGE_SIZE) & PAGE_MASK;
        }
 
-       return i;
+       if (must_unlock && *locked) {
+               mmap_read_unlock(mm);
+               *locked = 0;
+       }
 
-finish_or_fault:
        return i ? : -EFAULT;
 }
 #endif /* !CONFIG_MMU */
@@ -1886,37 +2024,28 @@ EXPORT_SYMBOL(fault_in_readable);
 #ifdef CONFIG_ELF_CORE
 struct page *get_dump_page(unsigned long addr)
 {
-       struct mm_struct *mm = current->mm;
        struct page *page;
-       int locked = 1;
+       int locked = 0;
        int ret;
 
-       if (mmap_read_lock_killable(mm))
-               return NULL;
-       ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
+       ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
                                      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
-       if (locked)
-               mmap_read_unlock(mm);
        return (ret == 1) ? page : NULL;
 }
 #endif /* CONFIG_ELF_CORE */
 
 #ifdef CONFIG_MIGRATION
 /*
- * Check whether all pages are pinnable, if so return number of pages.  If some
- * pages are not pinnable, migrate them, and unpin all pages. Return zero if
- * pages were migrated, or if some pages were not successfully isolated.
- * Return negative error if migration fails.
+ * Returns the number of collected pages. Return value is always >= 0.
  */
-static long check_and_migrate_movable_pages(unsigned long nr_pages,
-                                           struct page **pages,
-                                           unsigned int gup_flags)
+static unsigned long collect_longterm_unpinnable_pages(
+                                       struct list_head *movable_page_list,
+                                       unsigned long nr_pages,
+                                       struct page **pages)
 {
-       unsigned long isolation_error_count = 0, i;
+       unsigned long i, collected = 0;
        struct folio *prev_folio = NULL;
-       LIST_HEAD(movable_page_list);
-       bool drain_allow = true, coherent_pages = false;
-       int ret = 0;
+       bool drain_allow = true;
 
        for (i = 0; i < nr_pages; i++) {
                struct folio *folio = page_folio(pages[i]);
@@ -1925,45 +2054,16 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
                        continue;
                prev_folio = folio;
 
-               /*
-                * Device coherent pages are managed by a driver and should not
-                * be pinned indefinitely as it prevents the driver moving the
-                * page. So when trying to pin with FOLL_LONGTERM instead try
-                * to migrate the page out of device memory.
-                */
-               if (folio_is_device_coherent(folio)) {
-                       /*
-                        * We always want a new GUP lookup with device coherent
-                        * pages.
-                        */
-                       pages[i] = 0;
-                       coherent_pages = true;
-
-                       /*
-                        * Migration will fail if the page is pinned, so convert
-                        * the pin on the source page to a normal reference.
-                        */
-                       if (gup_flags & FOLL_PIN) {
-                               get_page(&folio->page);
-                               unpin_user_page(&folio->page);
-                       }
+               if (folio_is_longterm_pinnable(folio))
+                       continue;
 
-                       ret = migrate_device_coherent_page(&folio->page);
-                       if (ret)
-                               goto unpin_pages;
+               collected++;
 
+               if (folio_is_device_coherent(folio))
                        continue;
-               }
 
-               if (folio_is_longterm_pinnable(folio))
-                       continue;
-               /*
-                * Try to move out any movable page before pinning the range.
-                */
                if (folio_test_hugetlb(folio)) {
-                       if (isolate_hugetlb(&folio->page,
-                                               &movable_page_list))
-                               isolation_error_count++;
+                       isolate_hugetlb(folio, movable_page_list);
                        continue;
                }
 
@@ -1972,63 +2072,124 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
                        drain_allow = false;
                }
 
-               if (folio_isolate_lru(folio)) {
-                       isolation_error_count++;
+               if (!folio_isolate_lru(folio))
                        continue;
-               }
-               list_add_tail(&folio->lru, &movable_page_list);
+
+               list_add_tail(&folio->lru, movable_page_list);
                node_stat_mod_folio(folio,
                                    NR_ISOLATED_ANON + folio_is_file_lru(folio),
                                    folio_nr_pages(folio));
        }
 
-       if (!list_empty(&movable_page_list) || isolation_error_count ||
-           coherent_pages)
-               goto unpin_pages;
+       return collected;
+}
 
-       /*
-        * If list is empty, and no isolation errors, means that all pages are
-        * in the correct zone.
-        */
-       return nr_pages;
+/*
+ * Unpins all pages and migrates device coherent pages and movable_page_list.
+ * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
+ * (or partial success).
+ */
+static int migrate_longterm_unpinnable_pages(
+                                       struct list_head *movable_page_list,
+                                       unsigned long nr_pages,
+                                       struct page **pages)
+{
+       int ret;
+       unsigned long i;
 
-unpin_pages:
-       /*
-        * pages[i] might be NULL if any device coherent pages were found.
-        */
        for (i = 0; i < nr_pages; i++) {
-               if (!pages[i])
+               struct folio *folio = page_folio(pages[i]);
+
+               if (folio_is_device_coherent(folio)) {
+                       /*
+                        * Migration will fail if the page is pinned, so convert
+                        * the pin on the source page to a normal reference.
+                        */
+                       pages[i] = NULL;
+                       folio_get(folio);
+                       gup_put_folio(folio, 1, FOLL_PIN);
+
+                       if (migrate_device_coherent_page(&folio->page)) {
+                               ret = -EBUSY;
+                               goto err;
+                       }
+
                        continue;
+               }
 
-               if (gup_flags & FOLL_PIN)
-                       unpin_user_page(pages[i]);
-               else
-                       put_page(pages[i]);
+               /*
+                * We can't migrate pages with unexpected references, so drop
+                * the reference obtained by __get_user_pages_locked().
+                * Migrating pages have been added to movable_page_list after
+                * calling folio_isolate_lru() which takes a reference so the
+                * page won't be freed if it's migrating.
+                */
+               unpin_user_page(pages[i]);
+               pages[i] = NULL;
        }
 
-       if (!list_empty(&movable_page_list)) {
+       if (!list_empty(movable_page_list)) {
                struct migration_target_control mtc = {
                        .nid = NUMA_NO_NODE,
                        .gfp_mask = GFP_USER | __GFP_NOWARN,
                };
 
-               ret = migrate_pages(&movable_page_list, alloc_migration_target,
-                                   NULL, (unsigned long)&mtc, MIGRATE_SYNC,
-                                   MR_LONGTERM_PIN, NULL);
-               if (ret > 0) /* number of pages not migrated */
+               if (migrate_pages(movable_page_list, alloc_migration_target,
+                                 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
+                                 MR_LONGTERM_PIN, NULL)) {
                        ret = -ENOMEM;
+                       goto err;
+               }
        }
 
-       if (ret && !list_empty(&movable_page_list))
-               putback_movable_pages(&movable_page_list);
+       putback_movable_pages(movable_page_list);
+
+       return -EAGAIN;
+
+err:
+       for (i = 0; i < nr_pages; i++)
+               if (pages[i])
+                       unpin_user_page(pages[i]);
+       putback_movable_pages(movable_page_list);
+
        return ret;
 }
+
+/*
+ * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
+ * pages in the range are required to be pinned via FOLL_PIN, before calling
+ * this routine.
+ *
+ * If any pages in the range are not allowed to be pinned, then this routine
+ * will migrate those pages away, unpin all the pages in the range and return
+ * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
+ * call this routine again.
+ *
+ * If an error other than -EAGAIN occurs, this indicates a migration failure.
+ * The caller should give up, and propagate the error back up the call stack.
+ *
+ * If everything is OK and all pages in the range are allowed to be pinned, then
+ * this routine leaves all pages pinned and returns zero for success.
+ */
+static long check_and_migrate_movable_pages(unsigned long nr_pages,
+                                           struct page **pages)
+{
+       unsigned long collected;
+       LIST_HEAD(movable_page_list);
+
+       collected = collect_longterm_unpinnable_pages(&movable_page_list,
+                                               nr_pages, pages);
+       if (!collected)
+               return 0;
+
+       return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
+                                               pages);
+}
 #else
 static long check_and_migrate_movable_pages(unsigned long nr_pages,
-                                           struct page **pages,
-                                           unsigned int gup_flags)
+                                           struct page **pages)
 {
-       return nr_pages;
+       return 0;
 }
 #endif /* CONFIG_MIGRATION */
 
@@ -2040,77 +2201,84 @@ static long __gup_longterm_locked(struct mm_struct *mm,
                                  unsigned long start,
                                  unsigned long nr_pages,
                                  struct page **pages,
-                                 struct vm_area_struct **vmas,
+                                 int *locked,
                                  unsigned int gup_flags)
 {
        unsigned int flags;
-       long rc;
+       long rc, nr_pinned_pages;
 
        if (!(gup_flags & FOLL_LONGTERM))
-               return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
-                                              NULL, gup_flags);
+               return __get_user_pages_locked(mm, start, nr_pages, pages,
+                                              locked, gup_flags);
+
        flags = memalloc_pin_save();
        do {
-               rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
-                                            NULL, gup_flags);
-               if (rc <= 0)
+               nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
+                                                         pages, locked,
+                                                         gup_flags);
+               if (nr_pinned_pages <= 0) {
+                       rc = nr_pinned_pages;
                        break;
-               rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
-       } while (!rc);
-       memalloc_pin_restore(flags);
+               }
 
-       return rc;
+               /* FOLL_LONGTERM implies FOLL_PIN */
+               rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
+       } while (rc == -EAGAIN);
+       memalloc_pin_restore(flags);
+       return rc ? rc : nr_pinned_pages;
 }
 
-static bool is_valid_gup_flags(unsigned int gup_flags)
+/*
+ * Check that the given flags are valid for the exported gup/pup interface, and
+ * update them with the required flags that the caller must have set.
+ */
+static bool is_valid_gup_args(struct page **pages, int *locked,
+                             unsigned int *gup_flags_p, unsigned int to_set)
 {
+       unsigned int gup_flags = *gup_flags_p;
+
        /*
-        * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
-        * never directly by the caller, so enforce that with an assertion:
-        */
-       if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
-               return false;
-       /*
-        * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
-        * that is, FOLL_LONGTERM is a specific case, more restrictive case of
-        * FOLL_PIN.
+        * These flags not allowed to be specified externally to the gup
+        * interfaces:
+        * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
+        * - FOLL_REMOTE is internal only and used on follow_page()
+        * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
         */
-       if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
+       if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
                return false;
 
-       return true;
-}
+       gup_flags |= to_set;
+       if (locked) {
+               /* At the external interface locked must be set */
+               if (WARN_ON_ONCE(*locked != 1))
+                       return false;
 
-#ifdef CONFIG_MMU
-static long __get_user_pages_remote(struct mm_struct *mm,
-                                   unsigned long start, unsigned long nr_pages,
-                                   unsigned int gup_flags, struct page **pages,
-                                   struct vm_area_struct **vmas, int *locked)
-{
-       /*
-        * Parts of FOLL_LONGTERM behavior are incompatible with
-        * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
-        * vmas. However, this only comes up if locked is set, and there are
-        * callers that do request FOLL_LONGTERM, but do not set locked. So,
-        * allow what we can.
-        */
-       if (gup_flags & FOLL_LONGTERM) {
-               if (WARN_ON_ONCE(locked))
-                       return -EINVAL;
-               /*
-                * This will check the vmas (even if our vmas arg is NULL)
-                * and return -ENOTSUPP if DAX isn't allowed in this case:
-                */
-               return __gup_longterm_locked(mm, start, nr_pages, pages,
-                                            vmas, gup_flags | FOLL_TOUCH |
-                                            FOLL_REMOTE);
+               gup_flags |= FOLL_UNLOCKABLE;
        }
 
-       return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
-                                      locked,
-                                      gup_flags | FOLL_TOUCH | FOLL_REMOTE);
+       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+       if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
+                        (FOLL_PIN | FOLL_GET)))
+               return false;
+
+       /* LONGTERM can only be specified when pinning */
+       if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
+               return false;
+
+       /* Pages input must be given if using GET/PIN */
+       if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
+               return false;
+
+       /* We want to allow the pgmap to be hot-unplugged at all times */
+       if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
+                        (gup_flags & FOLL_PCI_P2PDMA)))
+               return false;
+
+       *gup_flags_p = gup_flags;
+       return true;
 }
 
+#ifdef CONFIG_MMU
 /**
  * get_user_pages_remote() - pin user pages in memory
  * @mm:                mm_struct of target mm
@@ -2120,8 +2288,6 @@ static long __get_user_pages_remote(struct mm_struct *mm,
  * @pages:     array that receives pointers to the pages pinned.
  *             Should be at least nr_pages long. Or NULL, if caller
  *             only intends to ensure the pages are faulted in.
- * @vmas:      array of pointers to vmas corresponding to each page.
- *             Or NULL if the caller does not require them.
  * @locked:    pointer to lock flag indicating whether lock is held and
  *             subsequently whether VM_FAULT_RETRY functionality can be
  *             utilised. Lock must initially be held.
@@ -2136,8 +2302,6 @@ static long __get_user_pages_remote(struct mm_struct *mm,
  *
  * The caller is responsible for releasing returned @pages, via put_page().
  *
- * @vmas are valid only as long as mmap_lock is held.
- *
  * Must be called with mmap_lock held for read or write.
  *
  * get_user_pages_remote walks a process's page tables and takes a reference
@@ -2148,7 +2312,7 @@ static long __get_user_pages_remote(struct mm_struct *mm,
  * This does not guarantee that the page exists in the user mappings when
  * get_user_pages_remote returns, and there may even be a completely different
  * page there in some cases (eg. if mmapped pagecache has been invalidated
- * and subsequently re faulted). However it does guarantee that the page
+ * and subsequently re-faulted). However it does guarantee that the page
  * won't be freed completely. And mostly callers simply care that the page
  * contains data that was valid *at some point in time*. Typically, an IO
  * or similar operation cannot guarantee anything stronger anyway because
@@ -2174,13 +2338,17 @@ static long __get_user_pages_remote(struct mm_struct *mm,
 long get_user_pages_remote(struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
-               struct vm_area_struct **vmas, int *locked)
+               int *locked)
 {
-       if (!is_valid_gup_flags(gup_flags))
+       int local_locked = 1;
+
+       if (!is_valid_gup_args(pages, locked, &gup_flags,
+                              FOLL_TOUCH | FOLL_REMOTE))
                return -EINVAL;
 
-       return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
-                                      pages, vmas, locked);
+       return __get_user_pages_locked(mm, start, nr_pages, pages,
+                                      locked ? locked : &local_locked,
+                                      gup_flags);
 }
 EXPORT_SYMBOL(get_user_pages_remote);
 
@@ -2188,15 +2356,7 @@ EXPORT_SYMBOL(get_user_pages_remote);
 long get_user_pages_remote(struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
                           unsigned int gup_flags, struct page **pages,
-                          struct vm_area_struct **vmas, int *locked)
-{
-       return 0;
-}
-
-static long __get_user_pages_remote(struct mm_struct *mm,
-                                   unsigned long start, unsigned long nr_pages,
-                                   unsigned int gup_flags, struct page **pages,
-                                   struct vm_area_struct **vmas, int *locked)
+                          int *locked)
 {
        return 0;
 }
@@ -2210,8 +2370,6 @@ static long __get_user_pages_remote(struct mm_struct *mm,
  * @pages:      array that receives pointers to the pages pinned.
  *              Should be at least nr_pages long. Or NULL, if caller
  *              only intends to ensure the pages are faulted in.
- * @vmas:       array of pointers to vmas corresponding to each page.
- *              Or NULL if the caller does not require them.
  *
  * This is the same as get_user_pages_remote(), just with a less-flexible
  * calling convention where we assume that the mm being operated on belongs to
@@ -2219,14 +2377,15 @@ static long __get_user_pages_remote(struct mm_struct *mm,
  * obviously don't pass FOLL_REMOTE in here.
  */
 long get_user_pages(unsigned long start, unsigned long nr_pages,
-               unsigned int gup_flags, struct page **pages,
-               struct vm_area_struct **vmas)
+                   unsigned int gup_flags, struct page **pages)
 {
-       if (!is_valid_gup_flags(gup_flags))
+       int locked = 1;
+
+       if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
                return -EINVAL;
 
-       return __gup_longterm_locked(current->mm, start, nr_pages,
-                                    pages, vmas, gup_flags | FOLL_TOUCH);
+       return __get_user_pages_locked(current->mm, start, nr_pages, pages,
+                                      &locked, gup_flags);
 }
 EXPORT_SYMBOL(get_user_pages);
 
@@ -2248,25 +2407,14 @@ EXPORT_SYMBOL(get_user_pages);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                             struct page **pages, unsigned int gup_flags)
 {
-       struct mm_struct *mm = current->mm;
-       int locked = 1;
-       long ret;
+       int locked = 0;
 
-       /*
-        * FIXME: Current FOLL_LONGTERM behavior is incompatible with
-        * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
-        * vmas.  As there are no users of this flag in this call we simply
-        * disallow this option for now.
-        */
-       if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
+       if (!is_valid_gup_args(pages, NULL, &gup_flags,
+                              FOLL_TOUCH | FOLL_UNLOCKABLE))
                return -EINVAL;
 
-       mmap_read_lock(mm);
-       ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
-                                     &locked, gup_flags | FOLL_TOUCH);
-       if (locked)
-               mmap_read_unlock(mm);
-       return ret;
+       return __get_user_pages_locked(current->mm, start, nr_pages, pages,
+                                      &locked, gup_flags);
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
 
@@ -2305,6 +2453,82 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  */
 #ifdef CONFIG_HAVE_FAST_GUP
 
+/*
+ * Used in the GUP-fast path to determine whether a pin is permitted for a
+ * specific folio.
+ *
+ * This call assumes the caller has pinned the folio, that the lowest page table
+ * level still points to this folio, and that interrupts have been disabled.
+ *
+ * Writing to pinned file-backed dirty tracked folios is inherently problematic
+ * (see comment describing the writable_file_mapping_allowed() function). We
+ * therefore try to avoid the most egregious case of a long-term mapping doing
+ * so.
+ *
+ * This function cannot be as thorough as that one as the VMA is not available
+ * in the fast path, so instead we whitelist known good cases and if in doubt,
+ * fall back to the slow path.
+ */
+static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags)
+{
+       struct address_space *mapping;
+       unsigned long mapping_flags;
+
+       /*
+        * If we aren't pinning then no problematic write can occur. A long term
+        * pin is the most egregious case so this is the one we disallow.
+        */
+       if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) !=
+           (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
+               return true;
+
+       /* The folio is pinned, so we can safely access folio fields. */
+
+       if (WARN_ON_ONCE(folio_test_slab(folio)))
+               return false;
+
+       /* hugetlb mappings do not require dirty-tracking. */
+       if (folio_test_hugetlb(folio))
+               return true;
+
+       /*
+        * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
+        * cannot proceed, which means no actions performed under RCU can
+        * proceed either.
+        *
+        * inodes and thus their mappings are freed under RCU, which means the
+        * mapping cannot be freed beneath us and thus we can safely dereference
+        * it.
+        */
+       lockdep_assert_irqs_disabled();
+
+       /*
+        * However, there may be operations which _alter_ the mapping, so ensure
+        * we read it once and only once.
+        */
+       mapping = READ_ONCE(folio->mapping);
+
+       /*
+        * The mapping may have been truncated, in any case we cannot determine
+        * if this mapping is safe - fall back to slow path to determine how to
+        * proceed.
+        */
+       if (!mapping)
+               return false;
+
+       /* Anonymous folios pose no problem. */
+       mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
+       if (mapping_flags)
+               return mapping_flags & PAGE_MAPPING_ANON;
+
+       /*
+        * At this point, we know the mapping is non-null and points to an
+        * address_space object. The only remaining whitelisted file system is
+        * shmem.
+        */
+       return shmem_mapping(mapping);
+}
+
 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
                                            unsigned int flags,
                                            struct page **pages)
@@ -2321,22 +2545,47 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
 }
 
 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
-                        unsigned int flags, struct page **pages, int *nr)
+/*
+ * Fast-gup relies on pte change detection to avoid concurrent pgtable
+ * operations.
+ *
+ * To pin the page, fast-gup needs to do below in order:
+ * (1) pin the page (by prefetching pte), then (2) check pte not changed.
+ *
+ * For the rest of pgtable operations where pgtable updates can be racy
+ * with fast-gup, we need to do (1) clear pte, then (2) check whether page
+ * is pinned.
+ *
+ * Above will work for all pte-level operations, including THP split.
+ *
+ * For THP collapse, it's a bit more complicated because fast-gup may be
+ * walking a pgtable page that is being freed (pte is still valid but pmd
+ * can be cleared already).  To avoid race in such condition, we need to
+ * also check pmd here to make sure pmd doesn't change (corresponds to
+ * pmdp_collapse_flush() in the THP collapse code path).
+ */
+static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+                        unsigned long end, unsigned int flags,
+                        struct page **pages, int *nr)
 {
        struct dev_pagemap *pgmap = NULL;
        int nr_start = *nr, ret = 0;
        pte_t *ptep, *ptem;
 
        ptem = ptep = pte_offset_map(&pmd, addr);
+       if (!ptep)
+               return 0;
        do {
                pte_t pte = ptep_get_lockless(ptep);
                struct page *page;
                struct folio *folio;
 
                /*
-                * Similar to the PMD case below, NUMA hinting must take slow
-                * path using the pte_protnone check.
+                * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
+                * pte_access_permitted() better should reject these pages
+                * either way: otherwise, GUP-fast might succeed in
+                * cases where ordinary GUP would fail due to VMA access
+                * permissions.
                 */
                if (pte_protnone(pte))
                        goto pte_unmap;
@@ -2363,17 +2612,23 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                if (!folio)
                        goto pte_unmap;
 
-               if (unlikely(page_is_secretmem(page))) {
+               if (unlikely(folio_is_secretmem(folio))) {
+                       gup_put_folio(folio, 1, flags);
+                       goto pte_unmap;
+               }
+
+               if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
+                   unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
                        gup_put_folio(folio, 1, flags);
                        goto pte_unmap;
                }
 
-               if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+               if (!folio_fast_pin_allowed(folio, flags)) {
                        gup_put_folio(folio, 1, flags);
                        goto pte_unmap;
                }
 
-               if (!pte_write(pte) && gup_must_unshare(flags, page)) {
+               if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
                        gup_put_folio(folio, 1, flags);
                        goto pte_unmap;
                }
@@ -2415,8 +2670,9 @@ pte_unmap:
  * get_user_pages_fast_only implementation that can pin pages. Thus it's still
  * useful to have gup_huge_pmd even if we can't operate on ptes.
  */
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
-                        unsigned int flags, struct page **pages, int *nr)
+static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+                        unsigned long end, unsigned int flags,
+                        struct page **pages, int *nr)
 {
        return 0;
 }
@@ -2438,9 +2694,15 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
                        undo_dev_pagemap(nr, nr_start, flags, pages);
                        break;
                }
+
+               if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
+                       undo_dev_pagemap(nr, nr_start, flags, pages);
+                       break;
+               }
+
                SetPageReferenced(page);
                pages[*nr] = page;
-               if (unlikely(!try_grab_page(page, flags))) {
+               if (unlikely(try_grab_page(page, flags))) {
                        undo_dev_pagemap(nr, nr_start, flags, pages);
                        break;
                }
@@ -2553,12 +2815,17 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
        if (!folio)
                return 0;
 
-       if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+       if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
                gup_put_folio(folio, refs, flags);
                return 0;
        }
 
-       if (!pte_write(pte) && gup_must_unshare(flags, &folio->page)) {
+       if (!folio_fast_pin_allowed(folio, flags)) {
+               gup_put_folio(folio, refs, flags);
+               return 0;
+       }
+
+       if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
                gup_put_folio(folio, refs, flags);
                return 0;
        }
@@ -2624,7 +2891,11 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                return 0;
        }
 
-       if (!pmd_write(orig) && gup_must_unshare(flags, &folio->page)) {
+       if (!folio_fast_pin_allowed(folio, flags)) {
+               gup_put_folio(folio, refs, flags);
+               return 0;
+       }
+       if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
                gup_put_folio(folio, refs, flags);
                return 0;
        }
@@ -2664,7 +2935,12 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                return 0;
        }
 
-       if (!pud_write(orig) && gup_must_unshare(flags, &folio->page)) {
+       if (!folio_fast_pin_allowed(folio, flags)) {
+               gup_put_folio(folio, refs, flags);
+               return 0;
+       }
+
+       if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
                gup_put_folio(folio, refs, flags);
                return 0;
        }
@@ -2699,6 +2975,16 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
                return 0;
        }
 
+       if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
+               gup_put_folio(folio, refs, flags);
+               return 0;
+       }
+
+       if (!folio_fast_pin_allowed(folio, flags)) {
+               gup_put_folio(folio, refs, flags);
+               return 0;
+       }
+
        *nr += refs;
        folio_set_referenced(folio);
        return 1;
@@ -2712,7 +2998,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
 
        pmdp = pmd_offset_lockless(pudp, pud, addr);
        do {
-               pmd_t pmd = READ_ONCE(*pmdp);
+               pmd_t pmd = pmdp_get_lockless(pmdp);
 
                next = pmd_addr_end(addr, end);
                if (!pmd_present(pmd))
@@ -2720,11 +3006,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
 
                if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
                             pmd_devmap(pmd))) {
-                       /*
-                        * NUMA hinting faults need to be handled in the GUP
-                        * slowpath for accounting purposes and so that they
-                        * can be serialised against THP migration.
-                        */
+                       /* See gup_pte_range() */
                        if (pmd_protnone(pmd))
                                return 0;
 
@@ -2740,7 +3022,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
                        if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
                                         PMD_SHIFT, next, flags, pages, nr))
                                return 0;
-               } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
+               } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
                        return 0;
        } while (pmdp++, addr = next, addr != end);
 
@@ -2760,7 +3042,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
                next = pud_addr_end(addr, end);
                if (unlikely(!pud_present(pud)))
                        return 0;
-               if (unlikely(pud_huge(pud))) {
+               if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
                        if (!gup_huge_pud(pud, pudp, addr, next, flags,
                                          pages, nr))
                                return 0;
@@ -2843,29 +3125,6 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end)
 }
 #endif
 
-static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
-                                  unsigned int gup_flags, struct page **pages)
-{
-       int ret;
-
-       /*
-        * FIXME: FOLL_LONGTERM does not work with
-        * get_user_pages_unlocked() (see comments in that function)
-        */
-       if (gup_flags & FOLL_LONGTERM) {
-               mmap_read_lock(current->mm);
-               ret = __gup_longterm_locked(current->mm,
-                                           start, nr_pages,
-                                           pages, NULL, gup_flags);
-               mmap_read_unlock(current->mm);
-       } else {
-               ret = get_user_pages_unlocked(start, nr_pages,
-                                             pages, gup_flags);
-       }
-
-       return ret;
-}
-
 static unsigned long lockless_pages_from_mm(unsigned long start,
                                            unsigned long end,
                                            unsigned int gup_flags,
@@ -2922,11 +3181,13 @@ static int internal_get_user_pages_fast(unsigned long start,
 {
        unsigned long len, end;
        unsigned long nr_pinned;
+       int locked = 0;
        int ret;
 
        if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
                                       FOLL_FORCE | FOLL_PIN | FOLL_GET |
-                                      FOLL_FAST_ONLY | FOLL_NOFAULT)))
+                                      FOLL_FAST_ONLY | FOLL_NOFAULT |
+                                      FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
                return -EINVAL;
 
        if (gup_flags & FOLL_PIN)
@@ -2938,7 +3199,9 @@ static int internal_get_user_pages_fast(unsigned long start,
        start = untagged_addr(start) & PAGE_MASK;
        len = nr_pages << PAGE_SHIFT;
        if (check_add_overflow(start, len, &end))
-               return 0;
+               return -EOVERFLOW;
+       if (end > TASK_SIZE_MAX)
+               return -EFAULT;
        if (unlikely(!access_ok((void __user *)start, len)))
                return -EFAULT;
 
@@ -2949,8 +3212,9 @@ static int internal_get_user_pages_fast(unsigned long start,
        /* Slow path: try to get the remaining pages with get_user_pages */
        start += nr_pinned << PAGE_SHIFT;
        pages += nr_pinned;
-       ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
-                                     pages);
+       ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
+                                   pages, &locked,
+                                   gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
        if (ret < 0) {
                /*
                 * The caller has to unpin the pages we already pinned so
@@ -2973,8 +3237,6 @@ static int internal_get_user_pages_fast(unsigned long start,
  *
  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
  * the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
  *
  * If the architecture does not support this function, simply return with no
  * pages pinned.
@@ -2986,7 +3248,6 @@ static int internal_get_user_pages_fast(unsigned long start,
 int get_user_pages_fast_only(unsigned long start, int nr_pages,
                             unsigned int gup_flags, struct page **pages)
 {
-       int nr_pinned;
        /*
         * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
         * because gup fast is always a "pin with a +1 page refcount" request.
@@ -2994,21 +3255,11 @@ int get_user_pages_fast_only(unsigned long start, int nr_pages,
         * FOLL_FAST_ONLY is required in order to match the API description of
         * this routine: no fall back to regular ("slow") GUP.
         */
-       gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
-
-       nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
-                                                pages);
-
-       /*
-        * As specified in the API description above, this routine is not
-        * allowed to return negative values. However, the common core
-        * routine internal_get_user_pages_fast() *can* return -errno.
-        * Therefore, correct for that here:
-        */
-       if (nr_pinned < 0)
-               nr_pinned = 0;
+       if (!is_valid_gup_args(pages, NULL, &gup_flags,
+                              FOLL_GET | FOLL_FAST_ONLY))
+               return -EINVAL;
 
-       return nr_pinned;
+       return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
 
@@ -3031,16 +3282,14 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
 int get_user_pages_fast(unsigned long start, int nr_pages,
                        unsigned int gup_flags, struct page **pages)
 {
-       if (!is_valid_gup_flags(gup_flags))
-               return -EINVAL;
-
        /*
         * The caller may or may not have explicitly set FOLL_GET; either way is
         * OK. However, internally (within mm/gup.c), gup fast variants must set
         * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
         * request.
         */
-       gup_flags |= FOLL_GET;
+       if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
+               return -EINVAL;
        return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
@@ -3060,61 +3309,19 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast);
  *
  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  * see Documentation/core-api/pin_user_pages.rst for further details.
+ *
+ * Note that if a zero_page is amongst the returned pages, it will not have
+ * pins in it and unpin_user_page() will not remove pins from it.
  */
 int pin_user_pages_fast(unsigned long start, int nr_pages,
                        unsigned int gup_flags, struct page **pages)
 {
-       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
-       if (WARN_ON_ONCE(gup_flags & FOLL_GET))
-               return -EINVAL;
-
-       if (WARN_ON_ONCE(!pages))
+       if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
                return -EINVAL;
-
-       gup_flags |= FOLL_PIN;
        return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
 }
 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
 
-/*
- * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
- * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
- *
- * The API rules are the same, too: no negative values may be returned.
- */
-int pin_user_pages_fast_only(unsigned long start, int nr_pages,
-                            unsigned int gup_flags, struct page **pages)
-{
-       int nr_pinned;
-
-       /*
-        * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
-        * rules require returning 0, rather than -errno:
-        */
-       if (WARN_ON_ONCE(gup_flags & FOLL_GET))
-               return 0;
-
-       if (WARN_ON_ONCE(!pages))
-               return 0;
-       /*
-        * FOLL_FAST_ONLY is required in order to match the API description of
-        * this routine: no fall back to regular ("slow") GUP.
-        */
-       gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
-       nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
-                                                pages);
-       /*
-        * This routine is not allowed to return negative values. However,
-        * internal_get_user_pages_fast() *can* return -errno. Therefore,
-        * correct for that here:
-        */
-       if (nr_pinned < 0)
-               nr_pinned = 0;
-
-       return nr_pinned;
-}
-EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
-
 /**
  * pin_user_pages_remote() - pin pages of a remote process
  *
@@ -3124,8 +3331,6 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
  * @gup_flags: flags modifying lookup behaviour
  * @pages:     array that receives pointers to the pages pinned.
  *             Should be at least nr_pages long.
- * @vmas:      array of pointers to vmas corresponding to each page.
- *             Or NULL if the caller does not require them.
  * @locked:    pointer to lock flag indicating whether lock is held and
  *             subsequently whether VM_FAULT_RETRY functionality can be
  *             utilised. Lock must initially be held.
@@ -3136,22 +3341,23 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
  *
  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  * see Documentation/core-api/pin_user_pages.rst for details.
+ *
+ * Note that if a zero_page is amongst the returned pages, it will not have
+ * pins in it and unpin_user_page*() will not remove pins from it.
  */
 long pin_user_pages_remote(struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
                           unsigned int gup_flags, struct page **pages,
-                          struct vm_area_struct **vmas, int *locked)
+                          int *locked)
 {
-       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
-       if (WARN_ON_ONCE(gup_flags & FOLL_GET))
-               return -EINVAL;
-
-       if (WARN_ON_ONCE(!pages))
-               return -EINVAL;
+       int local_locked = 1;
 
-       gup_flags |= FOLL_PIN;
-       return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
-                                      pages, vmas, locked);
+       if (!is_valid_gup_args(pages, locked, &gup_flags,
+                              FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
+               return 0;
+       return __gup_longterm_locked(mm, start, nr_pages, pages,
+                                    locked ? locked : &local_locked,
+                                    gup_flags);
 }
 EXPORT_SYMBOL(pin_user_pages_remote);
 
@@ -3163,29 +3369,25 @@ EXPORT_SYMBOL(pin_user_pages_remote);
  * @gup_flags: flags modifying lookup behaviour
  * @pages:     array that receives pointers to the pages pinned.
  *             Should be at least nr_pages long.
- * @vmas:      array of pointers to vmas corresponding to each page.
- *             Or NULL if the caller does not require them.
  *
  * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
  * FOLL_PIN is set.
  *
  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  * see Documentation/core-api/pin_user_pages.rst for details.
+ *
+ * Note that if a zero_page is amongst the returned pages, it will not have
+ * pins in it and unpin_user_page*() will not remove pins from it.
  */
 long pin_user_pages(unsigned long start, unsigned long nr_pages,
-                   unsigned int gup_flags, struct page **pages,
-                   struct vm_area_struct **vmas)
+                   unsigned int gup_flags, struct page **pages)
 {
-       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
-       if (WARN_ON_ONCE(gup_flags & FOLL_GET))
-               return -EINVAL;
-
-       if (WARN_ON_ONCE(!pages))
-               return -EINVAL;
+       int locked = 1;
 
-       gup_flags |= FOLL_PIN;
+       if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
+               return 0;
        return __gup_longterm_locked(current->mm, start, nr_pages,
-                                    pages, vmas, gup_flags);
+                                    pages, &locked, gup_flags);
 }
 EXPORT_SYMBOL(pin_user_pages);
 
@@ -3193,18 +3395,20 @@ EXPORT_SYMBOL(pin_user_pages);
  * pin_user_pages_unlocked() is the FOLL_PIN variant of
  * get_user_pages_unlocked(). Behavior is the same, except that this one sets
  * FOLL_PIN and rejects FOLL_GET.
+ *
+ * Note that if a zero_page is amongst the returned pages, it will not have
+ * pins in it and unpin_user_page*() will not remove pins from it.
  */
 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                             struct page **pages, unsigned int gup_flags)
 {
-       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
-       if (WARN_ON_ONCE(gup_flags & FOLL_GET))
-               return -EINVAL;
+       int locked = 0;
 
-       if (WARN_ON_ONCE(!pages))
-               return -EINVAL;
+       if (!is_valid_gup_args(pages, NULL, &gup_flags,
+                              FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
+               return 0;
 
-       gup_flags |= FOLL_PIN;
-       return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
+       return __gup_longterm_locked(current->mm, start, nr_pages, pages,
+                                    &locked, gup_flags);
 }
 EXPORT_SYMBOL(pin_user_pages_unlocked);