mm: gup: remove FOLL_SPLIT
authorYang Shi <shy828301@gmail.com>
Fri, 30 Apr 2021 05:55:56 +0000 (22:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Apr 2021 18:20:37 +0000 (11:20 -0700)
Since commit 5a52c9df62b4 ("uprobe: use FOLL_SPLIT_PMD instead of
FOLL_SPLIT") and commit ba925fa35057 ("s390/gmap: improve THP splitting")
FOLL_SPLIT has not been used anymore.  Remove the dead code.

Link: https://lkml.kernel.org/r/20210330203900.9222-1-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Documentation/vm/transhuge.rst
include/linux/mm.h
mm/gup.c

index 0ed23e5..216db1d 100644 (file)
@@ -53,11 +53,6 @@ prevent the page from being split by anyone.
    of handling GUP on hugetlbfs will also work fine on transparent
    hugepage backed mappings.
 
-In case you can't handle compound pages if they're returned by
-follow_page, the FOLL_SPLIT bit can be specified as a parameter to
-follow_page, so that it will split the hugepages before returning
-them.
-
 Graceful fallback
 =================
 
index 702c2a7..64be3ba 100644 (file)
@@ -2791,7 +2791,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 #define FOLL_NOWAIT    0x20    /* if a disk transfer is needed, start the IO
                                 * and return without waiting upon it */
 #define FOLL_POPULATE  0x40    /* fault in page */
-#define FOLL_SPLIT     0x80    /* don't return transhuge pages, split them */
 #define FOLL_HWPOISON  0x100   /* check page is hwpoisoned */
 #define FOLL_NUMA      0x200   /* force NUMA hinting page fault */
 #define FOLL_MIGRATION 0x400   /* wait for page to replace migration entry */
index 66522ae..71e546e 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -516,18 +516,6 @@ retry:
                }
        }
 
-       if (flags & FOLL_SPLIT && PageTransCompound(page)) {
-               get_page(page);
-               pte_unmap_unlock(ptep, ptl);
-               lock_page(page);
-               ret = split_huge_page(page);
-               unlock_page(page);
-               put_page(page);
-               if (ret)
-                       return ERR_PTR(ret);
-               goto retry;
-       }
-
        /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
        if (unlikely(!try_grab_page(page, flags))) {
                page = ERR_PTR(-ENOMEM);
@@ -672,7 +660,7 @@ retry_locked:
                spin_unlock(ptl);
                return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
        }
-       if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
+       if (flags & FOLL_SPLIT_PMD) {
                int ret;
                page = pmd_page(*pmd);
                if (is_huge_zero_page(page)) {
@@ -681,19 +669,7 @@ retry_locked:
                        split_huge_pmd(vma, pmd, address);
                        if (pmd_trans_unstable(pmd))
                                ret = -EBUSY;
-               } else if (flags & FOLL_SPLIT) {
-                       if (unlikely(!try_get_page(page))) {
-                               spin_unlock(ptl);
-                               return ERR_PTR(-ENOMEM);
-                       }
-                       spin_unlock(ptl);
-                       lock_page(page);
-                       ret = split_huge_page(page);
-                       unlock_page(page);
-                       put_page(page);
-                       if (pmd_none(*pmd))
-                               return no_page_table(vma, flags);
-               } else {  /* flags & FOLL_SPLIT_PMD */
+               } else {
                        spin_unlock(ptl);
                        split_huge_pmd(vma, pmd, address);
                        ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;