Linux 6.9-rc1
[linux-2.6-microblaze.git] / mm / huge_memory.c
index 1596508..9859aa4 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/numa.h>
 #include <linux/page_owner.h>
 #include <linux/sched/sysctl.h>
+#include <linux/memory-tiers.h>
+#include <linux/compat.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -64,27 +66,127 @@ unsigned long transparent_hugepage_flags __read_mostly =
        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
        (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 
-static struct shrinker deferred_split_shrinker;
+static struct shrinker *deferred_split_shrinker;
+static unsigned long deferred_split_count(struct shrinker *shrink,
+                                         struct shrink_control *sc);
+static unsigned long deferred_split_scan(struct shrinker *shrink,
+                                        struct shrink_control *sc);
 
 static atomic_t huge_zero_refcount;
 struct page *huge_zero_page __read_mostly;
 unsigned long huge_zero_pfn __read_mostly = ~0UL;
+unsigned long huge_anon_orders_always __read_mostly;
+unsigned long huge_anon_orders_madvise __read_mostly;
+unsigned long huge_anon_orders_inherit __read_mostly;
+
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+                                        unsigned long vm_flags, bool smaps,
+                                        bool in_pf, bool enforce_sysfs,
+                                        unsigned long orders)
+{
+       /* Check the intersection of requested and supported orders. */
+       orders &= vma_is_anonymous(vma) ?
+                       THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
+       if (!orders)
+               return 0;
 
-bool transparent_hugepage_active(struct vm_area_struct *vma)
-{
-       /* The addr is used to check if the vma size fits */
-       unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
+       if (!vma->vm_mm)                /* vdso */
+               return 0;
 
-       if (!transhuge_vma_suitable(vma, addr))
-               return false;
-       if (vma_is_anonymous(vma))
-               return __transparent_hugepage_enabled(vma);
-       if (vma_is_shmem(vma))
-               return shmem_huge_enabled(vma);
-       if (transhuge_vma_enabled(vma, vma->vm_flags) && file_thp_enabled(vma))
-               return true;
+       /*
+        * Explicitly disabled through madvise or prctl, or some
+        * architectures may disable THP for some mappings, for
+        * example, s390 kvm.
+        * */
+       if ((vm_flags & VM_NOHUGEPAGE) ||
+           test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+               return 0;
+       /*
+        * If the hardware/firmware marked hugepage support disabled.
+        */
+       if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
+               return 0;
 
-       return false;
+       /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
+       if (vma_is_dax(vma))
+               return in_pf ? orders : 0;
+
+       /*
+        * khugepaged special VMA and hugetlb VMA.
+        * Must be checked after dax since some dax mappings may have
+        * VM_MIXEDMAP set.
+        */
+       if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
+               return 0;
+
+       /*
+        * Check alignment for file vma and size for both file and anon vma by
+        * filtering out the unsuitable orders.
+        *
+        * Skip the check for page fault. Huge fault does the check in fault
+        * handlers.
+        */
+       if (!in_pf) {
+               int order = highest_order(orders);
+               unsigned long addr;
+
+               while (orders) {
+                       addr = vma->vm_end - (PAGE_SIZE << order);
+                       if (thp_vma_suitable_order(vma, addr, order))
+                               break;
+                       order = next_order(&orders, order);
+               }
+
+               if (!orders)
+                       return 0;
+       }
+
+       /*
+        * Enabled via shmem mount options or sysfs settings.
+        * Must be done before hugepage flags check since shmem has its
+        * own flags.
+        */
+       if (!in_pf && shmem_file(vma->vm_file))
+               return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
+                                    !enforce_sysfs, vma->vm_mm, vm_flags)
+                       ? orders : 0;
+
+       if (!vma_is_anonymous(vma)) {
+               /*
+                * Enforce sysfs THP requirements as necessary. Anonymous vmas
+                * were already handled in thp_vma_allowable_orders().
+                */
+               if (enforce_sysfs &&
+                   (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
+                                                   !hugepage_global_always())))
+                       return 0;
+
+               /*
+                * Trust that ->huge_fault() handlers know what they are doing
+                * in fault path.
+                */
+               if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
+                       return orders;
+               /* Only regular file is valid in collapse path */
+               if (((!in_pf || smaps)) && file_thp_enabled(vma))
+                       return orders;
+               return 0;
+       }
+
+       if (vma_is_temporary_stack(vma))
+               return 0;
+
+       /*
+        * THPeligible bit of smaps should show 1 for proper VMAs even
+        * though anon_vma is not initialized yet.
+        *
+        * Allow page fault since anon_vma may be not initialized until
+        * the first page fault.
+        */
+       if (!vma->anon_vma)
+               return (smaps || in_pf) ? orders : 0;
+
+       return orders;
 }
 
 static bool get_huge_zero_page(void)
@@ -100,7 +202,6 @@ retry:
                count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
                return false;
        }
-       count_vm_event(THP_ZERO_PAGE_ALLOC);
        preempt_disable();
        if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
                preempt_enable();
@@ -112,6 +213,7 @@ retry:
        /* We take additional reference here. It will be put back by shrinker */
        atomic_set(&huge_zero_refcount, 2);
        preempt_enable();
+       count_vm_event(THP_ZERO_PAGE_ALLOC);
        return true;
 }
 
@@ -165,11 +267,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
        return 0;
 }
 
-static struct shrinker huge_zero_page_shrinker = {
-       .count_objects = shrink_huge_zero_page_count,
-       .scan_objects = shrink_huge_zero_page_scan,
-       .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *huge_zero_page_shrinker;
 
 #ifdef CONFIG_SYSFS
 static ssize_t enabled_show(struct kobject *kobj,
@@ -213,8 +311,8 @@ static ssize_t enabled_store(struct kobject *kobj,
        }
        return ret;
 }
-static struct kobj_attribute enabled_attr =
-       __ATTR(enabled, 0644, enabled_show, enabled_store);
+
+static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
 
 ssize_t single_hugepage_flag_show(struct kobject *kobj,
                                  struct kobj_attribute *attr, char *buf,
@@ -303,8 +401,7 @@ static ssize_t defrag_store(struct kobject *kobj,
 
        return count;
 }
-static struct kobj_attribute defrag_attr =
-       __ATTR(defrag, 0644, defrag_show, defrag_store);
+static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
 
 static ssize_t use_zero_page_show(struct kobject *kobj,
                                  struct kobj_attribute *attr, char *buf)
@@ -318,8 +415,7 @@ static ssize_t use_zero_page_store(struct kobject *kobj,
        return single_hugepage_flag_store(kobj, attr, buf, count,
                                 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 }
-static struct kobj_attribute use_zero_page_attr =
-       __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
+static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
 
 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
                                   struct kobj_attribute *attr, char *buf)
@@ -344,9 +440,136 @@ static const struct attribute_group hugepage_attr_group = {
        .attrs = hugepage_attr,
 };
 
+static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
+static void thpsize_release(struct kobject *kobj);
+static DEFINE_SPINLOCK(huge_anon_orders_lock);
+static LIST_HEAD(thpsize_list);
+
+struct thpsize {
+       struct kobject kobj;
+       struct list_head node;
+       int order;
+};
+
+#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
+
+static ssize_t thpsize_enabled_show(struct kobject *kobj,
+                                   struct kobj_attribute *attr, char *buf)
+{
+       int order = to_thpsize(kobj)->order;
+       const char *output;
+
+       if (test_bit(order, &huge_anon_orders_always))
+               output = "[always] inherit madvise never";
+       else if (test_bit(order, &huge_anon_orders_inherit))
+               output = "always [inherit] madvise never";
+       else if (test_bit(order, &huge_anon_orders_madvise))
+               output = "always inherit [madvise] never";
+       else
+               output = "always inherit madvise [never]";
+
+       return sysfs_emit(buf, "%s\n", output);
+}
+
+static ssize_t thpsize_enabled_store(struct kobject *kobj,
+                                    struct kobj_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       int order = to_thpsize(kobj)->order;
+       ssize_t ret = count;
+
+       if (sysfs_streq(buf, "always")) {
+               spin_lock(&huge_anon_orders_lock);
+               clear_bit(order, &huge_anon_orders_inherit);
+               clear_bit(order, &huge_anon_orders_madvise);
+               set_bit(order, &huge_anon_orders_always);
+               spin_unlock(&huge_anon_orders_lock);
+       } else if (sysfs_streq(buf, "inherit")) {
+               spin_lock(&huge_anon_orders_lock);
+               clear_bit(order, &huge_anon_orders_always);
+               clear_bit(order, &huge_anon_orders_madvise);
+               set_bit(order, &huge_anon_orders_inherit);
+               spin_unlock(&huge_anon_orders_lock);
+       } else if (sysfs_streq(buf, "madvise")) {
+               spin_lock(&huge_anon_orders_lock);
+               clear_bit(order, &huge_anon_orders_always);
+               clear_bit(order, &huge_anon_orders_inherit);
+               set_bit(order, &huge_anon_orders_madvise);
+               spin_unlock(&huge_anon_orders_lock);
+       } else if (sysfs_streq(buf, "never")) {
+               spin_lock(&huge_anon_orders_lock);
+               clear_bit(order, &huge_anon_orders_always);
+               clear_bit(order, &huge_anon_orders_inherit);
+               clear_bit(order, &huge_anon_orders_madvise);
+               spin_unlock(&huge_anon_orders_lock);
+       } else
+               ret = -EINVAL;
+
+       return ret;
+}
+
+static struct kobj_attribute thpsize_enabled_attr =
+       __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
+
+static struct attribute *thpsize_attrs[] = {
+       &thpsize_enabled_attr.attr,
+       NULL,
+};
+
+static const struct attribute_group thpsize_attr_group = {
+       .attrs = thpsize_attrs,
+};
+
+static const struct kobj_type thpsize_ktype = {
+       .release = &thpsize_release,
+       .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static struct thpsize *thpsize_create(int order, struct kobject *parent)
+{
+       unsigned long size = (PAGE_SIZE << order) / SZ_1K;
+       struct thpsize *thpsize;
+       int ret;
+
+       thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
+       if (!thpsize)
+               return ERR_PTR(-ENOMEM);
+
+       ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
+                                  "hugepages-%lukB", size);
+       if (ret) {
+               kfree(thpsize);
+               return ERR_PTR(ret);
+       }
+
+       ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
+       if (ret) {
+               kobject_put(&thpsize->kobj);
+               return ERR_PTR(ret);
+       }
+
+       thpsize->order = order;
+       return thpsize;
+}
+
+static void thpsize_release(struct kobject *kobj)
+{
+       kfree(to_thpsize(kobj));
+}
+
 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
 {
        int err;
+       struct thpsize *thpsize;
+       unsigned long orders;
+       int order;
+
+       /*
+        * Default to setting PMD-sized THP to inherit the global setting and
+        * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
+        * constant so we have to do this here.
+        */
+       huge_anon_orders_inherit = BIT(PMD_ORDER);
 
        *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
        if (unlikely(!*hugepage_kobj)) {
@@ -366,8 +589,24 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
                goto remove_hp_group;
        }
 
+       orders = THP_ORDERS_ALL_ANON;
+       order = highest_order(orders);
+       while (orders) {
+               thpsize = thpsize_create(order, *hugepage_kobj);
+               if (IS_ERR(thpsize)) {
+                       pr_err("failed to create thpsize for order %d\n", order);
+                       err = PTR_ERR(thpsize);
+                       goto remove_all;
+               }
+               list_add(&thpsize->node, &thpsize_list);
+               order = next_order(&orders, order);
+       }
+
        return 0;
 
+remove_all:
+       hugepage_exit_sysfs(*hugepage_kobj);
+       return err;
 remove_hp_group:
        sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
 delete_obj:
@@ -377,6 +616,13 @@ delete_obj:
 
 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
 {
+       struct thpsize *thpsize, *tmp;
+
+       list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
+               list_del(&thpsize->node);
+               kobject_put(&thpsize->kobj);
+       }
+
        sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
        sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
        kobject_put(hugepage_kobj);
@@ -392,24 +638,52 @@ static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
 }
 #endif /* CONFIG_SYSFS */
 
+static int __init thp_shrinker_init(void)
+{
+       huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
+       if (!huge_zero_page_shrinker)
+               return -ENOMEM;
+
+       deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
+                                                SHRINKER_MEMCG_AWARE |
+                                                SHRINKER_NONSLAB,
+                                                "thp-deferred_split");
+       if (!deferred_split_shrinker) {
+               shrinker_free(huge_zero_page_shrinker);
+               return -ENOMEM;
+       }
+
+       huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
+       huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
+       shrinker_register(huge_zero_page_shrinker);
+
+       deferred_split_shrinker->count_objects = deferred_split_count;
+       deferred_split_shrinker->scan_objects = deferred_split_scan;
+       shrinker_register(deferred_split_shrinker);
+
+       return 0;
+}
+
+static void __init thp_shrinker_exit(void)
+{
+       shrinker_free(huge_zero_page_shrinker);
+       shrinker_free(deferred_split_shrinker);
+}
+
 static int __init hugepage_init(void)
 {
        int err;
        struct kobject *hugepage_kobj;
 
        if (!has_transparent_hugepage()) {
-               /*
-                * Hardware doesn't support hugepages, hence disable
-                * DAX PMD support.
-                */
-               transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
+               transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
                return -EINVAL;
        }
 
        /*
         * hugepages can't be allocated by the buddy allocator
         */
-       MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
+       MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
        /*
         * we use page->mapping and page->index in second tail page
         * as list_head: assuming THP order >= 2
@@ -424,12 +698,9 @@ static int __init hugepage_init(void)
        if (err)
                goto err_slab;
 
-       err = register_shrinker(&huge_zero_page_shrinker);
-       if (err)
-               goto err_hzp_shrinker;
-       err = register_shrinker(&deferred_split_shrinker);
+       err = thp_shrinker_init();
        if (err)
-               goto err_split_shrinker;
+               goto err_shrinker;
 
        /*
         * By default disable transparent hugepages on smaller systems,
@@ -447,10 +718,8 @@ static int __init hugepage_init(void)
 
        return 0;
 err_khugepaged:
-       unregister_shrinker(&deferred_split_shrinker);
-err_split_shrinker:
-       unregister_shrinker(&huge_zero_page_shrinker);
-err_hzp_shrinker:
+       thp_shrinker_exit();
+err_shrinker:
        khugepaged_destroy();
 err_slab:
        hugepage_exit_sysfs(hugepage_kobj);
@@ -493,15 +762,16 @@ __setup("transparent_hugepage=", setup_transparent_hugepage);
 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 {
        if (likely(vma->vm_flags & VM_WRITE))
-               pmd = pmd_mkwrite(pmd);
+               pmd = pmd_mkwrite(pmd, vma);
        return pmd;
 }
 
 #ifdef CONFIG_MEMCG
-static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+static inline
+struct deferred_split *get_deferred_split_queue(struct folio *folio)
 {
-       struct mem_cgroup *memcg = page_memcg(compound_head(page));
-       struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+       struct mem_cgroup *memcg = folio_memcg(folio);
+       struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
 
        if (memcg)
                return &memcg->deferred_split_queue;
@@ -509,33 +779,31 @@ static inline struct deferred_split *get_deferred_split_queue(struct page *page)
                return &pgdat->deferred_split_queue;
 }
 #else
-static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+static inline
+struct deferred_split *get_deferred_split_queue(struct folio *folio)
 {
-       struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+       struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
 
        return &pgdat->deferred_split_queue;
 }
 #endif
 
-void prep_transhuge_page(struct page *page)
+void folio_prep_large_rmappable(struct folio *folio)
 {
-       /*
-        * we use page->mapping and page->indexlru in second tail page
-        * as list_head: assuming THP order >= 2
-        */
-
-       INIT_LIST_HEAD(page_deferred_list(page));
-       set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
+       if (!folio || !folio_test_large(folio))
+               return;
+       if (folio_order(folio) > 1)
+               INIT_LIST_HEAD(&folio->_deferred_list);
+       folio_set_large_rmappable(folio);
 }
 
-static inline bool is_transparent_hugepage(struct page *page)
+static inline bool is_transparent_hugepage(struct folio *folio)
 {
-       if (!PageCompound(page))
+       if (!folio_test_large(folio))
                return false;
 
-       page = compound_head(page);
-       return is_huge_zero_page(page) ||
-              page[1].compound_dtor == TRANSHUGE_PAGE_DTOR;
+       return is_huge_zero_page(&folio->page) ||
+               folio_test_large_rmappable(folio);
 }
 
 static unsigned long __thp_get_unmapped_area(struct file *filp,
@@ -544,7 +812,10 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
 {
        loff_t off_end = off + len;
        loff_t off_align = round_up(off, size);
-       unsigned long len_pad, ret;
+       unsigned long len_pad, ret, off_sub;
+
+       if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
+               return 0;
 
        if (off_end <= off_align || (off_end - off_align) < size)
                return 0;
@@ -570,7 +841,13 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
        if (ret == addr)
                return addr;
 
-       ret += (off - ret) & (size - 1);
+       off_sub = (off - ret) & (size - 1);
+
+       if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
+           !off_sub)
+               return ret + size;
+
+       ret += off_sub;
        return ret;
 }
 
@@ -592,19 +869,20 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                        struct page *page, gfp_t gfp)
 {
        struct vm_area_struct *vma = vmf->vma;
+       struct folio *folio = page_folio(page);
        pgtable_t pgtable;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
        vm_fault_t ret = 0;
 
-       VM_BUG_ON_PAGE(!PageCompound(page), page);
+       VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
 
-       if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) {
-               put_page(page);
+       if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
+               folio_put(folio);
                count_vm_event(THP_FAULT_FALLBACK);
                count_vm_event(THP_FAULT_FALLBACK_CHARGE);
                return VM_FAULT_FALLBACK;
        }
-       cgroup_throttle_swaprate(page, gfp);
+       folio_throttle_swaprate(folio, gfp);
 
        pgtable = pte_alloc_one(vma->vm_mm);
        if (unlikely(!pgtable)) {
@@ -614,11 +892,11 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 
        clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
        /*
-        * The memory barrier inside __SetPageUptodate makes sure that
+        * The memory barrier inside __folio_mark_uptodate makes sure that
         * clear_huge_page writes become visible before the set_pmd_at()
         * write.
         */
-       __SetPageUptodate(page);
+       __folio_mark_uptodate(folio);
 
        vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
        if (unlikely(!pmd_none(*vmf->pmd))) {
@@ -633,7 +911,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                /* Deliver the page fault to userland */
                if (userfaultfd_missing(vma)) {
                        spin_unlock(vmf->ptl);
-                       put_page(page);
+                       folio_put(folio);
                        pte_free(vma->vm_mm, pgtable);
                        ret = handle_userfault(vmf, VM_UFFD_MISSING);
                        VM_BUG_ON(ret & VM_FAULT_FALLBACK);
@@ -642,8 +920,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 
                entry = mk_huge_pmd(page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               page_add_new_anon_rmap(page, vma, haddr);
-               lru_cache_add_inactive_or_unevictable(page, vma);
+               folio_add_new_anon_rmap(folio, vma, haddr);
+               folio_add_lru_vma(folio, vma);
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
                set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
                update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
@@ -660,7 +938,7 @@ unlock_release:
 release:
        if (pgtable)
                pte_free(vma->vm_mm, pgtable);
-       put_page(page);
+       folio_put(folio);
        return ret;
 
 }
@@ -710,8 +988,7 @@ static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                return;
        entry = mk_pmd(zero_page, vma->vm_page_prot);
        entry = pmd_mkhuge(entry);
-       if (pgtable)
-               pgtable_trans_huge_deposit(mm, pmd, pgtable);
+       pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, haddr, pmd, entry);
        mm_inc_nr_ptes(mm);
 }
@@ -723,11 +1000,11 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
        struct folio *folio;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 
-       if (!transhuge_vma_suitable(vma, haddr))
+       if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
                return VM_FAULT_FALLBACK;
        if (unlikely(anon_vma_prepare(vma)))
                return VM_FAULT_OOM;
-       khugepaged_enter(vma, vma->vm_flags);
+       khugepaged_enter_vma(vma, vma->vm_flags);
 
        if (!(vmf->flags & FAULT_FLAG_WRITE) &&
                        !mm_forbids_zeropage(vma->vm_mm) &&
@@ -825,23 +1102,20 @@ out_unlock:
 }
 
 /**
- * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
+ * vmf_insert_pfn_pmd - insert a pmd size pfn
  * @vmf: Structure describing the fault
  * @pfn: pfn to insert
- * @pgprot: page protection to use
  * @write: whether it's a write fault
  *
- * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
- * also consult the vmf_insert_mixed_prot() documentation when
- * @pgprot != @vmf->vma->vm_page_prot.
+ * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
  *
  * Return: vm_fault_t value.
  */
-vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
-                                  pgprot_t pgprot, bool write)
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
 {
        unsigned long addr = vmf->address & PMD_MASK;
        struct vm_area_struct *vma = vmf->vma;
+       pgprot_t pgprot = vma->vm_page_prot;
        pgtable_t pgtable = NULL;
 
        /*
@@ -869,7 +1143,7 @@ vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
        insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
        return VM_FAULT_NOPAGE;
 }
-EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
 
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
@@ -880,9 +1154,10 @@ static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
 }
 
 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
-               pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
+               pud_t *pud, pfn_t pfn, bool write)
 {
        struct mm_struct *mm = vma->vm_mm;
+       pgprot_t prot = vma->vm_page_prot;
        pud_t entry;
        spinlock_t *ptl;
 
@@ -916,23 +1191,20 @@ out_unlock:
 }
 
 /**
- * vmf_insert_pfn_pud_prot - insert a pud size pfn
+ * vmf_insert_pfn_pud - insert a pud size pfn
  * @vmf: Structure describing the fault
  * @pfn: pfn to insert
- * @pgprot: page protection to use
  * @write: whether it's a write fault
  *
- * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
- * also consult the vmf_insert_mixed_prot() documentation when
- * @pgprot != @vmf->vma->vm_page_prot.
+ * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
  *
  * Return: vm_fault_t value.
  */
-vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
-                                  pgprot_t pgprot, bool write)
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
 {
        unsigned long addr = vmf->address & PUD_MASK;
        struct vm_area_struct *vma = vmf->vma;
+       pgprot_t pgprot = vma->vm_page_prot;
 
        /*
         * If we had pud_special, we could avoid all these restrictions,
@@ -950,22 +1222,22 @@ vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
 
        track_pfn_insert(vma, &pgprot, pfn);
 
-       insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
+       insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
        return VM_FAULT_NOPAGE;
 }
-EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, int flags)
+                     pmd_t *pmd, bool write)
 {
        pmd_t _pmd;
 
        _pmd = pmd_mkyoung(*pmd);
-       if (flags & FOLL_WRITE)
+       if (write)
                _pmd = pmd_mkdirty(_pmd);
        if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
-                               pmd, _pmd, flags & FOLL_WRITE))
+                                 pmd, _pmd, write))
                update_mmu_cache_pmd(vma, addr, pmd);
 }
 
@@ -975,20 +1247,10 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
        unsigned long pfn = pmd_pfn(*pmd);
        struct mm_struct *mm = vma->vm_mm;
        struct page *page;
+       int ret;
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       /*
-        * When we COW a devmap PMD entry, we split it into PTEs, so we should
-        * not be in this function with `flags & FOLL_COW` set.
-        */
-       WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
-
-       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
-       if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
-                        (FOLL_PIN | FOLL_GET)))
-               return NULL;
-
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                return NULL;
 
@@ -998,7 +1260,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd, flags);
+               touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
 
        /*
         * device mapped pages can only be returned if the
@@ -1012,8 +1274,9 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
        if (!*pgmap)
                return ERR_PTR(-EFAULT);
        page = pfn_to_page(pfn);
-       if (!try_grab_page(page, flags))
-               page = ERR_PTR(-ENOMEM);
+       ret = try_grab_page(page, flags);
+       if (ret)
+               page = ERR_PTR(ret);
 
        return page;
 }
@@ -1024,6 +1287,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 {
        spinlock_t *dst_ptl, *src_ptl;
        struct page *src_page;
+       struct folio *src_folio;
        pmd_t pmd;
        pgtable_t pgtable = NULL;
        int ret = -ENOMEM;
@@ -1090,11 +1354,12 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 
        src_page = pmd_page(pmd);
        VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
+       src_folio = page_folio(src_page);
 
-       get_page(src_page);
-       if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
+       folio_get(src_folio);
+       if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
                /* Page maybe pinned: split and retry the fault on PTEs. */
-               put_page(src_page);
+               folio_put(src_folio);
                pte_free(dst_mm, pgtable);
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
@@ -1121,15 +1386,15 @@ out:
 
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
-               pud_t *pud, int flags)
+                     pud_t *pud, bool write)
 {
        pud_t _pud;
 
        _pud = pud_mkyoung(*pud);
-       if (flags & FOLL_WRITE)
+       if (write)
                _pud = pud_mkdirty(_pud);
        if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
-                               pud, _pud, flags & FOLL_WRITE))
+                                 pud, _pud, write))
                update_mmu_cache_pud(vma, addr, pud);
 }
 
@@ -1139,24 +1404,20 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
        unsigned long pfn = pud_pfn(*pud);
        struct mm_struct *mm = vma->vm_mm;
        struct page *page;
+       int ret;
 
        assert_spin_locked(pud_lockptr(mm, pud));
 
        if (flags & FOLL_WRITE && !pud_write(*pud))
                return NULL;
 
-       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
-       if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
-                        (FOLL_PIN | FOLL_GET)))
-               return NULL;
-
        if (pud_present(*pud) && pud_devmap(*pud))
                /* pass */;
        else
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pud(vma, addr, pud, flags);
+               touch_pud(vma, addr, pud, flags & FOLL_WRITE);
 
        /*
         * device mapped pages can only be returned if the
@@ -1172,8 +1433,10 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
        if (!*pgmap)
                return ERR_PTR(-EFAULT);
        page = pfn_to_page(pfn);
-       if (!try_grab_page(page, flags))
-               page = ERR_PTR(-ENOMEM);
+
+       ret = try_grab_page(page, flags);
+       if (ret)
+               page = ERR_PTR(ret);
 
        return page;
 }
@@ -1205,8 +1468,8 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        }
 
        /*
-        * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
-        * and split if duplicating fails.
+        * TODO: once we support anonymous pages, use
+        * folio_try_dup_anon_rmap_*() and split if duplicating fails.
         */
        pudp_set_wrprotect(src_mm, addr, src_pud);
        pud = pud_mkold(pud_wrprotect(pud));
@@ -1221,21 +1484,13 @@ out_unlock:
 
 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
 {
-       pud_t entry;
-       unsigned long haddr;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
 
        vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
        if (unlikely(!pud_same(*vmf->pud, orig_pud)))
                goto unlock;
 
-       entry = pud_mkyoung(orig_pud);
-       if (write)
-               entry = pud_mkdirty(entry);
-       haddr = vmf->address & HPAGE_PUD_MASK;
-       if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
-               update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
-
+       touch_pud(vmf->vma, vmf->address, vmf->pud, write);
 unlock:
        spin_unlock(vmf->ptl);
 }
@@ -1243,21 +1498,13 @@ unlock:
 
 void huge_pmd_set_accessed(struct vm_fault *vmf)
 {
-       pmd_t entry;
-       unsigned long haddr;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
-       pmd_t orig_pmd = vmf->orig_pmd;
 
        vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
-       if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
+       if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
                goto unlock;
 
-       entry = pmd_mkyoung(orig_pmd);
-       if (write)
-               entry = pmd_mkdirty(entry);
-       haddr = vmf->address & HPAGE_PMD_MASK;
-       if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
-               update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
+       touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
 
 unlock:
        spin_unlock(vmf->ptl);
@@ -1267,6 +1514,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
 {
        const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
        struct vm_area_struct *vma = vmf->vma;
+       struct folio *folio;
        struct page *page;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
        pmd_t orig_pmd = vmf->orig_pmd;
@@ -1274,9 +1522,6 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
        vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
        VM_BUG_ON_VMA(!vma->anon_vma, vma);
 
-       VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
-       VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
-
        if (is_huge_zero_pmd(orig_pmd))
                goto fallback;
 
@@ -1288,46 +1533,49 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
        }
 
        page = pmd_page(orig_pmd);
+       folio = page_folio(page);
        VM_BUG_ON_PAGE(!PageHead(page), page);
 
        /* Early check when only holding the PT lock. */
        if (PageAnonExclusive(page))
                goto reuse;
 
-       if (!trylock_page(page)) {
-               get_page(page);
+       if (!folio_trylock(folio)) {
+               folio_get(folio);
                spin_unlock(vmf->ptl);
-               lock_page(page);
+               folio_lock(folio);
                spin_lock(vmf->ptl);
                if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
                        spin_unlock(vmf->ptl);
-                       unlock_page(page);
-                       put_page(page);
+                       folio_unlock(folio);
+                       folio_put(folio);
                        return 0;
                }
-               put_page(page);
+               folio_put(folio);
        }
 
        /* Recheck after temporarily dropping the PT lock. */
        if (PageAnonExclusive(page)) {
-               unlock_page(page);
+               folio_unlock(folio);
                goto reuse;
        }
 
        /*
-        * See do_wp_page(): we can only reuse the page exclusively if there are
-        * no additional references. Note that we always drain the LRU
-        * pagevecs immediately after adding a THP.
+        * See do_wp_page(): we can only reuse the folio exclusively if
+        * there are no additional references. Note that we always drain
+        * the LRU cache immediately after adding a THP.
         */
-       if (page_count(page) > 1 + PageSwapCache(page) * thp_nr_pages(page))
+       if (folio_ref_count(folio) >
+                       1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
                goto unlock_fallback;
-       if (PageSwapCache(page))
-               try_to_free_swap(page);
-       if (page_count(page) == 1) {
+       if (folio_test_swapcache(folio))
+               folio_free_swap(folio);
+       if (folio_ref_count(folio) == 1) {
                pmd_t entry;
 
-               page_move_anon_rmap(page, vma);
-               unlock_page(page);
+               folio_move_anon_rmap(folio, vma);
+               SetPageAnonExclusive(page);
+               folio_unlock(folio);
 reuse:
                if (unlikely(unshare)) {
                        spin_unlock(vmf->ptl);
@@ -1338,25 +1586,83 @@ reuse:
                if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
                        update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                spin_unlock(vmf->ptl);
-               return VM_FAULT_WRITE;
+               return 0;
        }
 
 unlock_fallback:
-       unlock_page(page);
+       folio_unlock(folio);
        spin_unlock(vmf->ptl);
 fallback:
        __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
        return VM_FAULT_FALLBACK;
 }
 
-/*
- * FOLL_FORCE can write to even unwritable pmd's, but only
- * after we've gone through a COW cycle and they are dirty.
- */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
+                                          unsigned long addr, pmd_t pmd)
 {
-       return pmd_write(pmd) ||
-              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+       struct page *page;
+
+       if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
+               return false;
+
+       /* Don't touch entries that are not even readable (NUMA hinting). */
+       if (pmd_protnone(pmd))
+               return false;
+
+       /* Do we need write faults for softdirty tracking? */
+       if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
+               return false;
+
+       /* Do we need write faults for uffd-wp tracking? */
+       if (userfaultfd_huge_pmd_wp(vma, pmd))
+               return false;
+
+       if (!(vma->vm_flags & VM_SHARED)) {
+               /* See can_change_pte_writable(). */
+               page = vm_normal_page_pmd(vma, addr, pmd);
+               return page && PageAnon(page) && PageAnonExclusive(page);
+       }
+
+       /* See can_change_pte_writable(). */
+       return pmd_dirty(pmd);
+}
+
+/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
+static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
+                                       struct vm_area_struct *vma,
+                                       unsigned int flags)
+{
+       /* If the pmd is writable, we can write to the page. */
+       if (pmd_write(pmd))
+               return true;
+
+       /* Maybe FOLL_FORCE is set to override it? */
+       if (!(flags & FOLL_FORCE))
+               return false;
+
+       /* But FOLL_FORCE has no effect on shared mappings */
+       if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
+               return false;
+
+       /* ... or read-only private ones */
+       if (!(vma->vm_flags & VM_MAYWRITE))
+               return false;
+
+       /* ... or already writable ones that just need to take a write fault */
+       if (vma->vm_flags & VM_WRITE)
+               return false;
+
+       /*
+        * See can_change_pte_writable(): we broke COW and could map the page
+        * writable if we have an exclusive anonymous page ...
+        */
+       if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+               return false;
+
+       /* ... and a write-fault isn't required for other reasons. */
+       if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
+               return false;
+       return !userfaultfd_huge_pmd_wp(vma, pmd);
 }
 
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1365,40 +1671,41 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned int flags)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct page *page = NULL;
+       struct page *page;
+       int ret;
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
-               goto out;
+       page = pmd_page(*pmd);
+       VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+
+       if ((flags & FOLL_WRITE) &&
+           !can_follow_write_pmd(*pmd, page, vma, flags))
+               return NULL;
 
        /* Avoid dumping huge zero page */
        if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
                return ERR_PTR(-EFAULT);
 
-       /* Full NUMA hinting faults to serialise migration in fault paths */
-       if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
-               goto out;
-
-       page = pmd_page(*pmd);
-       VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+       if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
+               return NULL;
 
-       if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
+       if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
                return ERR_PTR(-EMLINK);
 
        VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
                        !PageAnonExclusive(page), page);
 
-       if (!try_grab_page(page, flags))
-               return ERR_PTR(-ENOMEM);
+       ret = try_grab_page(page, flags);
+       if (ret)
+               return ERR_PTR(ret);
 
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd, flags);
+               touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
 
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
 
-out:
        return page;
 }
 
@@ -1408,12 +1715,11 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        pmd_t oldpmd = vmf->orig_pmd;
        pmd_t pmd;
-       struct page *page;
+       struct folio *folio;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
-       int page_nid = NUMA_NO_NODE;
-       int target_nid, last_cpupid = -1;
-       bool migrated = false;
-       bool was_writable = pmd_savedwrite(oldpmd);
+       int nid = NUMA_NO_NODE;
+       int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+       bool migrated = false, writable = false;
        int flags = 0;
 
        vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
@@ -1423,30 +1729,44 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
        }
 
        pmd = pmd_modify(oldpmd, vma->vm_page_prot);
-       page = vm_normal_page_pmd(vma, haddr, pmd);
-       if (!page)
+
+       /*
+        * Detect now whether the PMD could be writable; this information
+        * is only valid while holding the PT lock.
+        */
+       writable = pmd_write(pmd);
+       if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
+           can_change_pmd_writable(vma, vmf->address, pmd))
+               writable = true;
+
+       folio = vm_normal_folio_pmd(vma, haddr, pmd);
+       if (!folio)
                goto out_map;
 
        /* See similar comment in do_numa_page for explanation */
-       if (!was_writable)
+       if (!writable)
                flags |= TNF_NO_GROUP;
 
-       page_nid = page_to_nid(page);
-       last_cpupid = page_cpupid_last(page);
-       target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
-                                      &flags);
-
+       nid = folio_nid(folio);
+       /*
+        * For memory tiering mode, cpupid of slow memory page is used
+        * to record page access time.  So use default value.
+        */
+       if (node_is_toptier(nid))
+               last_cpupid = folio_last_cpupid(folio);
+       target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
        if (target_nid == NUMA_NO_NODE) {
-               put_page(page);
+               folio_put(folio);
                goto out_map;
        }
 
        spin_unlock(vmf->ptl);
+       writable = false;
 
-       migrated = migrate_misplaced_page(page, vma, target_nid);
+       migrated = migrate_misplaced_folio(folio, vma, target_nid);
        if (migrated) {
                flags |= TNF_MIGRATED;
-               page_nid = target_nid;
+               nid = target_nid;
        } else {
                flags |= TNF_MIGRATE_FAIL;
                vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
@@ -1458,9 +1778,8 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
        }
 
 out:
-       if (page_nid != NUMA_NO_NODE)
-               task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
-                               flags);
+       if (nid != NUMA_NO_NODE)
+               task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
 
        return 0;
 
@@ -1468,8 +1787,8 @@ out_map:
        /* Restore the PMD */
        pmd = pmd_modify(oldpmd, vma->vm_page_prot);
        pmd = pmd_mkyoung(pmd);
-       if (was_writable)
-               pmd = pmd_mkwrite(pmd);
+       if (writable)
+               pmd = pmd_mkwrite(pmd, vma);
        set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
        update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
        spin_unlock(vmf->ptl);
@@ -1485,7 +1804,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 {
        spinlock_t *ptl;
        pmd_t orig_pmd;
-       struct page *page;
+       struct folio *folio;
        struct mm_struct *mm = tlb->mm;
        bool ret = false;
 
@@ -1505,15 +1824,15 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                goto out;
        }
 
-       page = pmd_page(orig_pmd);
+       folio = pfn_folio(pmd_pfn(orig_pmd));
        /*
-        * If other processes are mapping this page, we couldn't discard
-        * the page unless they all do MADV_FREE so let's skip the page.
+        * If other processes are mapping this folio, we couldn't discard
+        * the folio unless they all do MADV_FREE so let's skip the folio.
         */
-       if (total_mapcount(page) != 1)
+       if (folio_estimated_sharers(folio) != 1)
                goto out;
 
-       if (!trylock_page(page))
+       if (!folio_trylock(folio))
                goto out;
 
        /*
@@ -1521,17 +1840,17 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
         * will deactivate only them.
         */
        if (next - addr != HPAGE_PMD_SIZE) {
-               get_page(page);
+               folio_get(folio);
                spin_unlock(ptl);
-               split_huge_page(page);
-               unlock_page(page);
-               put_page(page);
+               split_folio(folio);
+               folio_unlock(folio);
+               folio_put(folio);
                goto out_unlocked;
        }
 
-       if (PageDirty(page))
-               ClearPageDirty(page);
-       unlock_page(page);
+       if (folio_test_dirty(folio))
+               folio_clear_dirty(folio);
+       folio_unlock(folio);
 
        if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
                pmdp_invalidate(vma, addr, pmd);
@@ -1542,7 +1861,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
        }
 
-       mark_page_lazyfree(page);
+       folio_mark_lazyfree(folio);
        ret = true;
 out:
        spin_unlock(ptl);
@@ -1578,6 +1897,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
         */
        orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
                                                tlb->fullmm);
+       arch_check_zapped_pmd(vma, orig_pmd);
        tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
        if (vma_is_special_huge(vma)) {
                if (arch_needs_pgtable_deposit())
@@ -1587,12 +1907,14 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
        } else {
-               struct page *page = NULL;
+               struct folio *folio = NULL;
                int flush_needed = 1;
 
                if (pmd_present(orig_pmd)) {
-                       page = pmd_page(orig_pmd);
-                       page_remove_rmap(page, vma, true);
+                       struct page *page = pmd_page(orig_pmd);
+
+                       folio = page_folio(page);
+                       folio_remove_rmap_pmd(folio, page, vma);
                        VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
                        VM_BUG_ON_PAGE(!PageHead(page), page);
                } else if (thp_migration_supported()) {
@@ -1600,23 +1922,24 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 
                        VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
                        entry = pmd_to_swp_entry(orig_pmd);
-                       page = pfn_swap_entry_to_page(entry);
+                       folio = pfn_swap_entry_folio(entry);
                        flush_needed = 0;
                } else
                        WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
 
-               if (PageAnon(page)) {
+               if (folio_test_anon(folio)) {
                        zap_deposited_table(tlb->mm, pmd);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                } else {
                        if (arch_needs_pgtable_deposit())
                                zap_deposited_table(tlb->mm, pmd);
-                       add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
+                       add_mm_counter(tlb->mm, mm_counter_file(folio),
+                                      -HPAGE_PMD_NR);
                }
 
                spin_unlock(ptl);
                if (flush_needed)
-                       tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
+                       tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
        }
        return 1;
 }
@@ -1657,9 +1980,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 
        /*
         * The destination pmd shouldn't be established, free_pgtables()
-        * should have release it.
+        * should have released it; but move_page_tables() might have already
+        * inserted a page table, if racing against shmem/file collapse.
         */
-       if (WARN_ON(!pmd_none(*new_pmd))) {
+       if (!pmd_none(*new_pmd)) {
                VM_BUG_ON(pmd_trans_huge(*new_pmd));
                return false;
        }
@@ -1686,7 +2010,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
                pmd = move_soft_dirty_pmd(pmd);
                set_pmd_at(mm, new_addr, new_pmd, pmd);
                if (force_flush)
-                       flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+                       flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
                if (new_ptl != old_ptl)
                        spin_unlock(new_ptl);
                spin_unlock(old_ptl);
@@ -1709,11 +2033,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        spinlock_t *ptl;
        pmd_t oldpmd, entry;
-       bool preserve_write;
-       int ret;
        bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
        bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
        bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+       int ret = 1;
 
        tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
 
@@ -1724,38 +2047,42 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (!ptl)
                return 0;
 
-       preserve_write = prot_numa && pmd_write(*pmd);
-       ret = 1;
-
 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
        if (is_swap_pmd(*pmd)) {
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
-               struct page *page = pfn_swap_entry_to_page(entry);
+               struct folio *folio = pfn_swap_entry_folio(entry);
+               pmd_t newpmd;
 
                VM_BUG_ON(!is_pmd_migration_entry(*pmd));
                if (is_writable_migration_entry(entry)) {
-                       pmd_t newpmd;
                        /*
                         * A protection check is difficult so
                         * just be safe and disable write
                         */
-                       if (PageAnon(page))
+                       if (folio_test_anon(folio))
                                entry = make_readable_exclusive_migration_entry(swp_offset(entry));
                        else
                                entry = make_readable_migration_entry(swp_offset(entry));
                        newpmd = swp_entry_to_pmd(entry);
                        if (pmd_swp_soft_dirty(*pmd))
                                newpmd = pmd_swp_mksoft_dirty(newpmd);
-                       if (pmd_swp_uffd_wp(*pmd))
-                               newpmd = pmd_swp_mkuffd_wp(newpmd);
-                       set_pmd_at(mm, addr, pmd, newpmd);
+               } else {
+                       newpmd = *pmd;
                }
+
+               if (uffd_wp)
+                       newpmd = pmd_swp_mkuffd_wp(newpmd);
+               else if (uffd_wp_resolve)
+                       newpmd = pmd_swp_clear_uffd_wp(newpmd);
+               if (!pmd_same(*pmd, newpmd))
+                       set_pmd_at(mm, addr, pmd, newpmd);
                goto unlock;
        }
 #endif
 
        if (prot_numa) {
-               struct page *page;
+               struct folio *folio;
+               bool toptier;
                /*
                 * Avoid trapping faults against the zero page. The read-only
                 * data is likely to be read-cached on the local CPU and
@@ -1767,14 +2094,20 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                if (pmd_protnone(*pmd))
                        goto unlock;
 
-               page = pmd_page(*pmd);
+               folio = page_folio(pmd_page(*pmd));
+               toptier = node_is_toptier(folio_nid(folio));
                /*
                 * Skip scanning top tier node if normal numa
                 * balancing is disabled
                 */
                if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
-                   node_is_toptier(page_to_nid(page)))
+                   toptier)
                        goto unlock;
+
+               if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
+                   !toptier)
+                       folio_xchg_access_time(folio,
+                                              jiffies_to_msecs(jiffies));
        }
        /*
         * In case prot_numa, we are under mmap_read_lock(mm). It's critical
@@ -1800,31 +2133,171 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
 
        entry = pmd_modify(oldpmd, newprot);
-       if (preserve_write)
-               entry = pmd_mk_savedwrite(entry);
-       if (uffd_wp) {
-               entry = pmd_wrprotect(entry);
+       if (uffd_wp)
                entry = pmd_mkuffd_wp(entry);
-       } else if (uffd_wp_resolve) {
+       else if (uffd_wp_resolve)
                /*
                 * Leave the write bit to be handled by PF interrupt
                 * handler, then things like COW could be properly
                 * handled.
                 */
                entry = pmd_clear_uffd_wp(entry);
-       }
+
+       /* See change_pte_range(). */
+       if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
+           can_change_pmd_writable(vma, addr, entry))
+               entry = pmd_mkwrite(entry, vma);
+
        ret = HPAGE_PMD_NR;
        set_pmd_at(mm, addr, pmd, entry);
 
        if (huge_pmd_needs_flush(oldpmd, entry))
                tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
-
-       BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
 unlock:
        spin_unlock(ptl);
        return ret;
 }
 
+#ifdef CONFIG_USERFAULTFD
+/*
+ * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
+ * the caller, but it must return after releasing the page_table_lock.
+ * Just move the page from src_pmd to dst_pmd if possible.
+ * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
+ * repeated by the caller, or other errors in case of failure.
+ */
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+                       struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+                       unsigned long dst_addr, unsigned long src_addr)
+{
+       pmd_t _dst_pmd, src_pmdval;
+       struct page *src_page;
+       struct folio *src_folio;
+       struct anon_vma *src_anon_vma;
+       spinlock_t *src_ptl, *dst_ptl;
+       pgtable_t src_pgtable;
+       struct mmu_notifier_range range;
+       int err = 0;
+
+       src_pmdval = *src_pmd;
+       src_ptl = pmd_lockptr(mm, src_pmd);
+
+       lockdep_assert_held(src_ptl);
+       vma_assert_locked(src_vma);
+       vma_assert_locked(dst_vma);
+
+       /* Sanity checks before the operation */
+       if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
+           WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
+               spin_unlock(src_ptl);
+               return -EINVAL;
+       }
+
+       if (!pmd_trans_huge(src_pmdval)) {
+               spin_unlock(src_ptl);
+               if (is_pmd_migration_entry(src_pmdval)) {
+                       pmd_migration_entry_wait(mm, &src_pmdval);
+                       return -EAGAIN;
+               }
+               return -ENOENT;
+       }
+
+       src_page = pmd_page(src_pmdval);
+
+       if (!is_huge_zero_pmd(src_pmdval)) {
+               if (unlikely(!PageAnonExclusive(src_page))) {
+                       spin_unlock(src_ptl);
+                       return -EBUSY;
+               }
+
+               src_folio = page_folio(src_page);
+               folio_get(src_folio);
+       } else
+               src_folio = NULL;
+
+       spin_unlock(src_ptl);
+
+       flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
+                               src_addr + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
+
+       if (src_folio) {
+               folio_lock(src_folio);
+
+               /*
+                * split_huge_page walks the anon_vma chain without the page
+                * lock. Serialize against it with the anon_vma lock, the page
+                * lock is not enough.
+                */
+               src_anon_vma = folio_get_anon_vma(src_folio);
+               if (!src_anon_vma) {
+                       err = -EAGAIN;
+                       goto unlock_folio;
+               }
+               anon_vma_lock_write(src_anon_vma);
+       } else
+               src_anon_vma = NULL;
+
+       dst_ptl = pmd_lockptr(mm, dst_pmd);
+       double_pt_lock(src_ptl, dst_ptl);
+       if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
+                    !pmd_same(*dst_pmd, dst_pmdval))) {
+               err = -EAGAIN;
+               goto unlock_ptls;
+       }
+       if (src_folio) {
+               if (folio_maybe_dma_pinned(src_folio) ||
+                   !PageAnonExclusive(&src_folio->page)) {
+                       err = -EBUSY;
+                       goto unlock_ptls;
+               }
+
+               if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
+                   WARN_ON_ONCE(!folio_test_anon(src_folio))) {
+                       err = -EBUSY;
+                       goto unlock_ptls;
+               }
+
+               folio_move_anon_rmap(src_folio, dst_vma);
+               WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
+
+               src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
+               /* Folio got pinned from under us. Put it back and fail the move. */
+               if (folio_maybe_dma_pinned(src_folio)) {
+                       set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
+                       err = -EBUSY;
+                       goto unlock_ptls;
+               }
+
+               _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
+               /* Follow mremap() behavior and treat the entry dirty after the move */
+               _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
+       } else {
+               src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
+               _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
+       }
+       set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
+
+       src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
+       pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
+unlock_ptls:
+       double_pt_unlock(src_ptl, dst_ptl);
+       if (src_anon_vma) {
+               anon_vma_unlock_write(src_anon_vma);
+               put_anon_vma(src_anon_vma);
+       }
+unlock_folio:
+       /* unblock rmap walks */
+       if (src_folio)
+               folio_unlock(src_folio);
+       mmu_notifier_invalidate_range_end(&range);
+       if (src_folio)
+               folio_put(src_folio);
+       return err;
+}
+#endif /* CONFIG_USERFAULTFD */
+
 /*
  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
  *
@@ -1843,10 +2316,10 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
 }
 
 /*
- * Returns true if a given pud maps a thp, false otherwise.
+ * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
  *
- * Note that if it returns true, this routine returns without unlocking page
- * table lock. So callers must unlock it.
+ * Note that if it returns page table lock pointer, this routine returns without
+ * unlocking page table lock. So callers must unlock it.
  */
 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
 {
@@ -1868,13 +2341,8 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
        ptl = __pud_trans_huge_lock(pud, vma);
        if (!ptl)
                return 0;
-       /*
-        * For architectures like ppc64 we look at deposited pgtable
-        * when calling pudp_huge_get_and_clear. So do the
-        * pgtable_trans_huge_withdraw after finishing pudp related
-        * operations.
-        */
-       pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
+
+       pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
        tlb_remove_pud_tlb_entry(tlb, pud, addr);
        if (vma_is_special_huge(vma)) {
                spin_unlock(ptl);
@@ -1896,7 +2364,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
 
        count_vm_event(THP_SPLIT_PUD);
 
-       pudp_huge_clear_flush_notify(vma, haddr, pud);
+       pudp_huge_clear_flush(vma, haddr, pud);
 }
 
 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
@@ -1905,7 +2373,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
        spinlock_t *ptl;
        struct mmu_notifier_range range;
 
-       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
                                address & HPAGE_PUD_MASK,
                                (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
        mmu_notifier_invalidate_range_start(&range);
@@ -1916,11 +2384,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
 
 out:
        spin_unlock(ptl);
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback as
-        * the above pudp_huge_clear_flush_notify() did already call it.
-        */
-       mmu_notifier_invalidate_range_only_end(&range);
+       mmu_notifier_invalidate_range_end(&range);
 }
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
@@ -1929,7 +2393,9 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
 {
        struct mm_struct *mm = vma->vm_mm;
        pgtable_t pgtable;
-       pmd_t _pmd;
+       pmd_t _pmd, old_pmd;
+       unsigned long addr;
+       pte_t *pte;
        int i;
 
        /*
@@ -1938,22 +2404,27 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
         * replacing a zero pmd write protected page with a zero pte write
         * protected page.
         *
-        * See Documentation/vm/mmu_notifier.rst
+        * See Documentation/mm/mmu_notifier.rst
         */
-       pmdp_huge_clear_flush(vma, haddr, pmd);
+       old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
 
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
        pmd_populate(mm, &_pmd, pgtable);
 
-       for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
-               pte_t *pte, entry;
-               entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+       pte = pte_offset_map(&_pmd, haddr);
+       VM_BUG_ON(!pte);
+       for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
+               pte_t entry;
+
+               entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
                entry = pte_mkspecial(entry);
-               pte = pte_offset_map(&_pmd, haddr);
-               VM_BUG_ON(!pte_none(*pte));
-               set_pte_at(mm, haddr, pte, entry);
-               pte_unmap(pte);
+               if (pmd_uffd_wp(old_pmd))
+                       entry = pte_mkuffd_wp(entry);
+               VM_BUG_ON(!pte_none(ptep_get(pte)));
+               set_pte_at(mm, addr, pte, entry);
+               pte++;
        }
+       pte_unmap(pte - 1);
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
 }
@@ -1962,12 +2433,14 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long haddr, bool freeze)
 {
        struct mm_struct *mm = vma->vm_mm;
+       struct folio *folio;
        struct page *page;
        pgtable_t pgtable;
        pmd_t old_pmd, _pmd;
        bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
-       bool anon_exclusive = false;
+       bool anon_exclusive = false, dirty = false;
        unsigned long addr;
+       pte_t *pte;
        int i;
 
        VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
@@ -1979,7 +2452,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        count_vm_event(THP_SPLIT_PMD);
 
        if (!vma_is_anonymous(vma)) {
-               old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+               old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
                /*
                 * We are going to unmap this huge page. So
                 * just go ahead and zap it
@@ -1992,25 +2465,26 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        swp_entry_t entry;
 
                        entry = pmd_to_swp_entry(old_pmd);
-                       page = pfn_swap_entry_to_page(entry);
+                       folio = pfn_swap_entry_folio(entry);
                } else {
                        page = pmd_page(old_pmd);
-                       if (!PageDirty(page) && pmd_dirty(old_pmd))
-                               set_page_dirty(page);
-                       if (!PageReferenced(page) && pmd_young(old_pmd))
-                               SetPageReferenced(page);
-                       page_remove_rmap(page, vma, true);
-                       put_page(page);
+                       folio = page_folio(page);
+                       if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
+                               folio_mark_dirty(folio);
+                       if (!folio_test_referenced(folio) && pmd_young(old_pmd))
+                               folio_set_referenced(folio);
+                       folio_remove_rmap_pmd(folio, page, vma);
+                       folio_put(folio);
                }
-               add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
+               add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
                return;
        }
 
        if (is_huge_zero_pmd(*pmd)) {
                /*
                 * FIXME: Do we want to invalidate secondary mmu by calling
-                * mmu_notifier_invalidate_range() see comments below inside
-                * __split_huge_pmd() ?
+                * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
+                * inside __split_huge_pmd() ?
                 *
                 * We are going from a zero huge page write protected to zero
                 * small page also write protected so it does not seems useful
@@ -2050,20 +2524,24 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                write = is_writable_migration_entry(entry);
                if (PageAnon(page))
                        anon_exclusive = is_readable_exclusive_migration_entry(entry);
-               young = false;
+               young = is_migration_entry_young(entry);
+               dirty = is_migration_entry_dirty(entry);
                soft_dirty = pmd_swp_soft_dirty(old_pmd);
                uffd_wp = pmd_swp_uffd_wp(old_pmd);
        } else {
                page = pmd_page(old_pmd);
-               if (pmd_dirty(old_pmd))
-                       SetPageDirty(page);
+               folio = page_folio(page);
+               if (pmd_dirty(old_pmd)) {
+                       dirty = true;
+                       folio_set_dirty(folio);
+               }
                write = pmd_write(old_pmd);
                young = pmd_young(old_pmd);
                soft_dirty = pmd_soft_dirty(old_pmd);
                uffd_wp = pmd_uffd_wp(old_pmd);
 
-               VM_BUG_ON_PAGE(!page_count(page), page);
-               page_ref_add(page, HPAGE_PMD_NR - 1);
+               VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
+               VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
 
                /*
                 * Without "freeze", we'll simply split the PMD, propagating the
@@ -2077,10 +2555,22 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                 *
                 * In case we cannot clear PageAnonExclusive(), split the PMD
                 * only and let try_to_migrate_one() fail later.
+                *
+                * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
                 */
-               anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
-               if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
+               anon_exclusive = PageAnonExclusive(page);
+               if (freeze && anon_exclusive &&
+                   folio_try_share_anon_rmap_pmd(folio, page))
                        freeze = false;
+               if (!freeze) {
+                       rmap_t rmap_flags = RMAP_NONE;
+
+                       folio_ref_add(folio, HPAGE_PMD_NR - 1);
+                       if (anon_exclusive)
+                               rmap_flags |= RMAP_EXCLUSIVE;
+                       folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
+                                                vma, haddr, rmap_flags);
+               }
        }
 
        /*
@@ -2090,15 +2580,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
        pmd_populate(mm, &_pmd, pgtable);
 
-       for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
-               pte_t entry, *pte;
-               /*
-                * Note that NUMA hinting access restrictions are not
-                * transferred to avoid any possibility of altering
-                * permissions across VMAs.
-                */
-               if (freeze || pmd_migration) {
+       pte = pte_offset_map(&_pmd, haddr);
+       VM_BUG_ON(!pte);
+
+       /*
+        * Note that NUMA hinting access restrictions are not transferred to
+        * avoid any possibility of altering permissions across VMAs.
+        */
+       if (freeze || pmd_migration) {
+               for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
+                       pte_t entry;
                        swp_entry_t swp_entry;
+
                        if (write)
                                swp_entry = make_writable_migration_entry(
                                                        page_to_pfn(page + i));
@@ -2108,70 +2601,49 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        else
                                swp_entry = make_readable_migration_entry(
                                                        page_to_pfn(page + i));
+                       if (young)
+                               swp_entry = make_migration_entry_young(swp_entry);
+                       if (dirty)
+                               swp_entry = make_migration_entry_dirty(swp_entry);
                        entry = swp_entry_to_pte(swp_entry);
                        if (soft_dirty)
                                entry = pte_swp_mksoft_dirty(entry);
                        if (uffd_wp)
                                entry = pte_swp_mkuffd_wp(entry);
-               } else {
-                       entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
-                       entry = maybe_mkwrite(entry, vma);
-                       if (anon_exclusive)
-                               SetPageAnonExclusive(page + i);
-                       if (!write)
-                               entry = pte_wrprotect(entry);
-                       if (!young)
-                               entry = pte_mkold(entry);
-                       if (soft_dirty)
-                               entry = pte_mksoft_dirty(entry);
-                       if (uffd_wp)
-                               entry = pte_mkuffd_wp(entry);
-               }
-               pte = pte_offset_map(&_pmd, addr);
-               BUG_ON(!pte_none(*pte));
-               set_pte_at(mm, addr, pte, entry);
-               if (!pmd_migration)
-                       atomic_inc(&page[i]._mapcount);
-               pte_unmap(pte);
-       }
 
-       if (!pmd_migration) {
-               /*
-                * Set PG_double_map before dropping compound_mapcount to avoid
-                * false-negative page_mapped().
-                */
-               if (compound_mapcount(page) > 1 &&
-                   !TestSetPageDoubleMap(page)) {
-                       for (i = 0; i < HPAGE_PMD_NR; i++)
-                               atomic_inc(&page[i]._mapcount);
-               }
-
-               lock_page_memcg(page);
-               if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
-                       /* Last compound_mapcount is gone. */
-                       __mod_lruvec_page_state(page, NR_ANON_THPS,
-                                               -HPAGE_PMD_NR);
-                       if (TestClearPageDoubleMap(page)) {
-                               /* No need in mapcount reference anymore */
-                               for (i = 0; i < HPAGE_PMD_NR; i++)
-                                       atomic_dec(&page[i]._mapcount);
-                       }
+                       VM_WARN_ON(!pte_none(ptep_get(pte + i)));
+                       set_pte_at(mm, addr, pte + i, entry);
                }
-               unlock_page_memcg(page);
-
-               /* Above is effectively page_remove_rmap(page, vma, true) */
-               munlock_vma_page(page, vma, true);
-       }
+       } else {
+               pte_t entry;
+
+               entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
+               if (write)
+                       entry = pte_mkwrite(entry, vma);
+               if (!young)
+                       entry = pte_mkold(entry);
+               /* NOTE: this may set soft-dirty too on some archs */
+               if (dirty)
+                       entry = pte_mkdirty(entry);
+               if (soft_dirty)
+                       entry = pte_mksoft_dirty(entry);
+               if (uffd_wp)
+                       entry = pte_mkuffd_wp(entry);
+
+               for (i = 0; i < HPAGE_PMD_NR; i++)
+                       VM_WARN_ON(!pte_none(ptep_get(pte + i)));
+
+               set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
+       }
+       pte_unmap(pte);
+
+       if (!pmd_migration)
+               folio_remove_rmap_pmd(folio, page, vma);
+       if (freeze)
+               put_page(page);
 
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
-
-       if (freeze) {
-               for (i = 0; i < HPAGE_PMD_NR; i++) {
-                       page_remove_rmap(page + i, vma, false);
-                       put_page(page + i);
-               }
-       }
 }
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
@@ -2180,7 +2652,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        spinlock_t *ptl;
        struct mmu_notifier_range range;
 
-       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
                                address & HPAGE_PMD_MASK,
                                (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
        mmu_notifier_invalidate_range_start(&range);
@@ -2195,6 +2667,10 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
        if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
            is_pmd_migration_entry(*pmd)) {
+               /*
+                * It's safe to call pmd_page when folio is set because it's
+                * guaranteed that pmd is present.
+                */
                if (folio && folio != page_folio(pmd_page(*pmd)))
                        goto out;
                __split_huge_pmd_locked(vma, pmd, range.start, freeze);
@@ -2202,44 +2678,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
 out:
        spin_unlock(ptl);
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback.
-        * They are 3 cases to consider inside __split_huge_pmd_locked():
-        *  1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
-        *  2) __split_huge_zero_page_pmd() read only zero page and any write
-        *    fault will trigger a flush_notify before pointing to a new page
-        *    (it is fine if the secondary mmu keeps pointing to the old zero
-        *    page in the meantime)
-        *  3) Split a huge pmd into pte pointing to the same page. No need
-        *     to invalidate secondary tlb entry they are all still valid.
-        *     any further changes to individual pte will notify. So no need
-        *     to call mmu_notifier->invalidate_range()
-        */
-       mmu_notifier_invalidate_range_only_end(&range);
+       mmu_notifier_invalidate_range_end(&range);
 }
 
 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
                bool freeze, struct folio *folio)
 {
-       pgd_t *pgd;
-       p4d_t *p4d;
-       pud_t *pud;
-       pmd_t *pmd;
-
-       pgd = pgd_offset(vma->vm_mm, address);
-       if (!pgd_present(*pgd))
-               return;
+       pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
 
-       p4d = p4d_offset(pgd, address);
-       if (!p4d_present(*p4d))
+       if (!pmd)
                return;
 
-       pud = pud_offset(p4d, address);
-       if (!pud_present(*pud))
-               return;
-
-       pmd = pmd_offset(pud, address);
-
        __split_huge_pmd(vma, pmd, address, freeze, folio);
 }
 
@@ -2267,24 +2716,26 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
        split_huge_pmd_if_needed(vma, end);
 
        /*
-        * If we're also updating the vma->vm_next->vm_start,
+        * If we're also updating the next vma vm_start,
         * check if we need to split it.
         */
        if (adjust_next > 0) {
-               struct vm_area_struct *next = vma->vm_next;
+               struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
                unsigned long nstart = next->vm_start;
                nstart += adjust_next;
                split_huge_pmd_if_needed(next, nstart);
        }
 }
 
-static void unmap_page(struct page *page)
+static void unmap_folio(struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
-       enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
-               TTU_SYNC;
+       enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
+               TTU_BATCH_FLUSH;
 
-       VM_BUG_ON_PAGE(!PageHead(page), page);
+       VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+
+       if (folio_test_pmd_mappable(folio))
+               ttu_flags |= TTU_SPLIT_HUGE_PMD;
 
        /*
         * Anon pages need migration entries to preserve them, but file
@@ -2295,13 +2746,15 @@ static void unmap_page(struct page *page)
                try_to_migrate(folio, ttu_flags);
        else
                try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
+
+       try_to_unmap_flush();
 }
 
 static void remap_page(struct folio *folio, unsigned long nr)
 {
        int i = 0;
 
-       /* If unmap_page() uses try_to_migrate() on file, remove this check */
+       /* If unmap_folio() uses try_to_migrate() on file, remove this check */
        if (!folio_test_anon(folio))
                return;
        for (;;) {
@@ -2317,7 +2770,6 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
                struct lruvec *lruvec, struct list_head *list)
 {
        VM_BUG_ON_PAGE(!PageHead(head), head);
-       VM_BUG_ON_PAGE(PageCompound(tail), head);
        VM_BUG_ON_PAGE(PageLRU(tail), head);
        lockdep_assert_held(&lruvec->lru_lock);
 
@@ -2337,10 +2789,17 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
        }
 }
 
-static void __split_huge_page_tail(struct page *head, int tail,
-               struct lruvec *lruvec, struct list_head *list)
+static void __split_huge_page_tail(struct folio *folio, int tail,
+               struct lruvec *lruvec, struct list_head *list,
+               unsigned int new_order)
 {
+       struct page *head = &folio->page;
        struct page *page_tail = head + tail;
+       /*
+        * Careful: new_folio is not a "real" folio before we cleared PageTail.
+        * Don't pass it around before clear_compound_head().
+        */
+       struct folio *new_folio = (struct folio *)page_tail;
 
        VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
 
@@ -2351,7 +2810,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
         * for example lock_page() which set PG_waiters.
         *
         * Note that for mapped sub-pages of an anonymous THP,
-        * PG_anon_exclusive has been cleared in unmap_page() and is stored in
+        * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
         * the migration entry instead from where remap_page() will restore it.
         * We can still have PG_anon_exclusive set on effectively unmapped and
         * unreferenced sub-pages of an anonymous THP: we can simply drop
@@ -2368,17 +2827,29 @@ static void __split_huge_page_tail(struct page *head, int tail,
                         (1L << PG_workingset) |
                         (1L << PG_locked) |
                         (1L << PG_unevictable) |
-#ifdef CONFIG_64BIT
+#ifdef CONFIG_ARCH_USES_PG_ARCH_X
                         (1L << PG_arch_2) |
+                        (1L << PG_arch_3) |
 #endif
-                        (1L << PG_dirty)));
+                        (1L << PG_dirty) |
+                        LRU_GEN_MASK | LRU_REFS_MASK));
 
-       /* ->mapping in first tail page is compound_mapcount */
+       /* ->mapping in first and second tail page is replaced by other uses */
        VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
                        page_tail);
        page_tail->mapping = head->mapping;
        page_tail->index = head->index + tail;
-       page_tail->private = 0;
+
+       /*
+        * page->private should not be set in tail pages. Fix up and warn once
+        * if private is unexpectedly set.
+        */
+       if (unlikely(page_tail->private)) {
+               VM_WARN_ON_ONCE_PAGE(true, page_tail);
+               page_tail->private = 0;
+       }
+       if (folio_test_swapcache(folio))
+               new_folio->swap.val = folio->swap.val + tail;
 
        /* Page flags must be visible before we make the page non-compound. */
        smp_wmb();
@@ -2390,17 +2861,22 @@ static void __split_huge_page_tail(struct page *head, int tail,
         * which needs correct compound_head().
         */
        clear_compound_head(page_tail);
+       if (new_order) {
+               prep_compound_page(page_tail, new_order);
+               folio_prep_large_rmappable(new_folio);
+       }
 
        /* Finally unfreeze refcount. Additional reference from page cache. */
-       page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
-                                         PageSwapCache(head)));
+       page_ref_unfreeze(page_tail,
+               1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
+                            folio_nr_pages(new_folio) : 0));
 
-       if (page_is_young(head))
-               set_page_young(page_tail);
-       if (page_is_idle(head))
-               set_page_idle(page_tail);
+       if (folio_test_young(folio))
+               folio_set_young(new_folio);
+       if (folio_test_idle(folio))
+               folio_set_idle(new_folio);
 
-       page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
+       folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
 
        /*
         * always add to the tail because some iterators expect new
@@ -2411,24 +2887,24 @@ static void __split_huge_page_tail(struct page *head, int tail,
 }
 
 static void __split_huge_page(struct page *page, struct list_head *list,
-               pgoff_t end)
+               pgoff_t end, unsigned int new_order)
 {
        struct folio *folio = page_folio(page);
        struct page *head = &folio->page;
        struct lruvec *lruvec;
        struct address_space *swap_cache = NULL;
        unsigned long offset = 0;
-       unsigned int nr = thp_nr_pages(head);
-       int i;
+       int i, nr_dropped = 0;
+       unsigned int new_nr = 1 << new_order;
+       int order = folio_order(folio);
+       unsigned int nr = 1 << order;
 
        /* complete memcg works before add pages to LRU */
-       split_page_memcg(head, nr);
-
-       if (PageAnon(head) && PageSwapCache(head)) {
-               swp_entry_t entry = { .val = page_private(head) };
+       split_page_memcg(head, order, new_order);
 
-               offset = swp_offset(entry);
-               swap_cache = swap_address_space(entry);
+       if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
+               offset = swp_offset(folio->swap);
+               swap_cache = swap_address_space(folio->swap);
                xa_lock(&swap_cache->i_pages);
        }
 
@@ -2437,21 +2913,21 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 
        ClearPageHasHWPoisoned(head);
 
-       for (i = nr - 1; i >= 1; i--) {
-               __split_huge_page_tail(head, i, lruvec, list);
+       for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
+               __split_huge_page_tail(folio, i, lruvec, list, new_order);
                /* Some pages can be beyond EOF: drop them from page cache */
                if (head[i].index >= end) {
                        struct folio *tail = page_folio(head + i);
 
-                       if (shmem_mapping(head->mapping))
-                               shmem_uncharge(head->mapping->host, 1);
+                       if (shmem_mapping(folio->mapping))
+                               nr_dropped++;
                        else if (folio_test_clear_dirty(tail))
                                folio_account_cleaned(tail,
                                        inode_to_wb(folio->mapping->host));
                        __filemap_remove_folio(tail, NULL);
                        folio_put(tail);
                } else if (!PageAnon(page)) {
-                       __xa_store(&head->mapping->i_pages, head[i].index,
+                       __xa_store(&folio->mapping->i_pages, head[i].index,
                                        head + i, 0);
                } else if (swap_cache) {
                        __xa_store(&swap_cache->i_pages, offset + i,
@@ -2459,41 +2935,55 @@ static void __split_huge_page(struct page *page, struct list_head *list,
                }
        }
 
-       ClearPageCompound(head);
+       if (!new_order)
+               ClearPageCompound(head);
+       else {
+               struct folio *new_folio = (struct folio *)head;
+
+               folio_set_order(new_folio, new_order);
+       }
        unlock_page_lruvec(lruvec);
        /* Caller disabled irqs, so they are still disabled here */
 
-       split_page_owner(head, nr);
+       split_page_owner(head, order, new_order);
 
        /* See comment in __split_huge_page_tail() */
-       if (PageAnon(head)) {
+       if (folio_test_anon(folio)) {
                /* Additional pin to swap cache */
-               if (PageSwapCache(head)) {
-                       page_ref_add(head, 2);
+               if (folio_test_swapcache(folio)) {
+                       folio_ref_add(folio, 1 + new_nr);
                        xa_unlock(&swap_cache->i_pages);
                } else {
-                       page_ref_inc(head);
+                       folio_ref_inc(folio);
                }
        } else {
                /* Additional pin to page cache */
-               page_ref_add(head, 2);
-               xa_unlock(&head->mapping->i_pages);
+               folio_ref_add(folio, 1 + new_nr);
+               xa_unlock(&folio->mapping->i_pages);
        }
        local_irq_enable();
 
+       if (nr_dropped)
+               shmem_uncharge(folio->mapping->host, nr_dropped);
        remap_page(folio, nr);
 
-       if (PageSwapCache(head)) {
-               swp_entry_t entry = { .val = page_private(head) };
+       if (folio_test_swapcache(folio))
+               split_swap_cluster(folio->swap);
 
-               split_swap_cluster(entry);
-       }
+       /*
+        * set page to its compound_head when split to non order-0 pages, so
+        * we can skip unlocking it below, since PG_locked is transferred to
+        * the compound_head of the page and the caller will unlock it.
+        */
+       if (new_order)
+               page = compound_head(page);
 
-       for (i = 0; i < nr; i++) {
+       for (i = 0; i < nr; i += new_nr) {
                struct page *subpage = head + i;
+               struct folio *new_folio = page_folio(subpage);
                if (subpage == page)
                        continue;
-               unlock_page(subpage);
+               folio_unlock(new_folio);
 
                /*
                 * Subpages may be freed if there wasn't any mapping
@@ -2502,7 +2992,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
                 * requires taking the lru_lock so we do the put_page
                 * of the tail pages after the split is complete.
                 */
-               put_page(subpage);
+               free_page_and_swap_cache(subpage);
        }
 }
 
@@ -2523,48 +3013,83 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
 }
 
 /*
- * This function splits huge page into normal pages. @page can point to any
- * subpage of huge page to split. Split doesn't change the position of @page.
+ * This function splits huge page into pages in @new_order. @page can point to
+ * any subpage of huge page to split. Split doesn't change the position of
+ * @page.
+ *
+ * NOTE: order-1 anonymous folio is not supported because _deferred_list,
+ * which is used by partially mapped folios, is stored in subpage 2 and an
+ * order-1 folio only has subpage 0 and 1. File-backed order-1 folios are OK,
+ * since they do not use _deferred_list.
  *
  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
  * The huge page must be locked.
  *
  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
  *
- * Both head page and tail pages will inherit mapping, flags, and so on from
- * the hugepage.
+ * Pages in new_order will inherit mapping, flags, and so on from the hugepage.
  *
- * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
- * they are not mapped.
+ * GUP pin and PG_locked transferred to @page or the compound page @page belongs
+ * to. Rest subpages can be freed if they are not mapped.
  *
  * Returns 0 if the hugepage is split successfully.
  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
  * us.
  */
-int split_huge_page_to_list(struct page *page, struct list_head *list)
+int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+                                    unsigned int new_order)
 {
        struct folio *folio = page_folio(page);
-       struct page *head = &folio->page;
-       struct deferred_split *ds_queue = get_deferred_split_queue(head);
-       XA_STATE(xas, &head->mapping->i_pages, head->index);
+       struct deferred_split *ds_queue = get_deferred_split_queue(folio);
+       /* reset xarray order to new order after split */
+       XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
        int extra_pins, ret;
        pgoff_t end;
        bool is_hzp;
 
-       VM_BUG_ON_PAGE(!PageLocked(head), head);
-       VM_BUG_ON_PAGE(!PageCompound(head), head);
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+       VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+
+       if (new_order >= folio_order(folio))
+               return -EINVAL;
+
+       /* Cannot split anonymous THP to order-1 */
+       if (new_order == 1 && folio_test_anon(folio)) {
+               VM_WARN_ONCE(1, "Cannot split to order-1 folio");
+               return -EINVAL;
+       }
 
-       is_hzp = is_huge_zero_page(head);
-       VM_WARN_ON_ONCE_PAGE(is_hzp, head);
-       if (is_hzp)
+       if (new_order) {
+               /* Only swapping a whole PMD-mapped folio is supported */
+               if (folio_test_swapcache(folio))
+                       return -EINVAL;
+               /* Split shmem folio to non-zero order not supported */
+               if (shmem_mapping(folio->mapping)) {
+                       VM_WARN_ONCE(1,
+                               "Cannot split shmem folio to non-0 order");
+                       return -EINVAL;
+               }
+               /* No split if the file system does not support large folio */
+               if (!mapping_large_folio_support(folio->mapping)) {
+                       VM_WARN_ONCE(1,
+                               "Cannot split file folio to non-0 order");
+                       return -EINVAL;
+               }
+       }
+
+
+       is_hzp = is_huge_zero_page(&folio->page);
+       if (is_hzp) {
+               pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
                return -EBUSY;
+       }
 
-       if (PageWriteback(head))
+       if (folio_test_writeback(folio))
                return -EBUSY;
 
-       if (PageAnon(head)) {
+       if (folio_test_anon(folio)) {
                /*
                 * The caller does not necessarily hold an mmap_lock that would
                 * prevent the anon_vma disappearing so we first we take a
@@ -2573,7 +3098,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                 * is taken to serialise against parallel split or collapse
                 * operations.
                 */
-               anon_vma = page_get_anon_vma(head);
+               anon_vma = folio_get_anon_vma(folio);
                if (!anon_vma) {
                        ret = -EBUSY;
                        goto out;
@@ -2582,7 +3107,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                mapping = NULL;
                anon_vma_lock_write(anon_vma);
        } else {
-               mapping = head->mapping;
+               gfp_t gfp;
+
+               mapping = folio->mapping;
 
                /* Truncated ? */
                if (!mapping) {
@@ -2590,8 +3117,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                        goto out;
                }
 
-               xas_split_alloc(&xas, head, compound_order(head),
-                               mapping_gfp_mask(mapping) & GFP_RECLAIM_MASK);
+               gfp = current_gfp_context(mapping_gfp_mask(mapping) &
+                                                       GFP_RECLAIM_MASK);
+
+               if (!filemap_release_folio(folio, gfp)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+
+               xas_split_alloc(&xas, folio, folio_order(folio), gfp);
                if (xas_error(&xas)) {
                        ret = xas_error(&xas);
                        goto out;
@@ -2605,7 +3139,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
                 * which cannot be nested inside the page tree lock. So note
                 * end now: i_size itself may be changed at any moment, but
-                * head page lock is good enough to serialize the trimming.
+                * folio lock is good enough to serialize the trimming.
                 */
                end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
                if (shmem_mapping(mapping))
@@ -2613,52 +3147,62 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        }
 
        /*
-        * Racy check if we can split the page, before unmap_page() will
+        * Racy check if we can split the page, before unmap_folio() will
         * split PMDs
         */
        if (!can_split_folio(folio, &extra_pins)) {
-               ret = -EBUSY;
+               ret = -EAGAIN;
                goto out_unlock;
        }
 
-       unmap_page(head);
+       unmap_folio(folio);
 
        /* block interrupt reentry in xa_lock and spinlock */
        local_irq_disable();
        if (mapping) {
                /*
-                * Check if the head page is present in page cache.
-                * We assume all tail are present too, if head is there.
+                * Check if the folio is present in page cache.
+                * We assume all tail are present too, if folio is there.
                 */
                xas_lock(&xas);
                xas_reset(&xas);
-               if (xas_load(&xas) != head)
+               if (xas_load(&xas) != folio)
                        goto fail;
        }
 
        /* Prevent deferred_split_scan() touching ->_refcount */
        spin_lock(&ds_queue->split_queue_lock);
-       if (page_ref_freeze(head, 1 + extra_pins)) {
-               if (!list_empty(page_deferred_list(head))) {
+       if (folio_ref_freeze(folio, 1 + extra_pins)) {
+               if (folio_order(folio) > 1 &&
+                   !list_empty(&folio->_deferred_list)) {
                        ds_queue->split_queue_len--;
-                       list_del(page_deferred_list(head));
+                       /*
+                        * Reinitialize page_deferred_list after removing the
+                        * page from the split_queue, otherwise a subsequent
+                        * split will see list corruption when checking the
+                        * page_deferred_list.
+                        */
+                       list_del_init(&folio->_deferred_list);
                }
                spin_unlock(&ds_queue->split_queue_lock);
                if (mapping) {
-                       int nr = thp_nr_pages(head);
-
-                       xas_split(&xas, head, thp_order(head));
-                       if (PageSwapBacked(head)) {
-                               __mod_lruvec_page_state(head, NR_SHMEM_THPS,
-                                                       -nr);
-                       } else {
-                               __mod_lruvec_page_state(head, NR_FILE_THPS,
-                                                       -nr);
-                               filemap_nr_thps_dec(mapping);
+                       int nr = folio_nr_pages(folio);
+
+                       xas_split(&xas, folio, folio_order(folio));
+                       if (folio_test_pmd_mappable(folio) &&
+                           new_order < HPAGE_PMD_ORDER) {
+                               if (folio_test_swapbacked(folio)) {
+                                       __lruvec_stat_mod_folio(folio,
+                                                       NR_SHMEM_THPS, -nr);
+                               } else {
+                                       __lruvec_stat_mod_folio(folio,
+                                                       NR_FILE_THPS, -nr);
+                                       filemap_nr_thps_dec(mapping);
+                               }
                        }
                }
 
-               __split_huge_page(page, list, end);
+               __split_huge_page(page, list, end, new_order);
                ret = 0;
        } else {
                spin_unlock(&ds_queue->split_queue_lock);
@@ -2667,7 +3211,7 @@ fail:
                        xas_unlock(&xas);
                local_irq_enable();
                remap_page(folio, folio_nr_pages(folio));
-               ret = -EBUSY;
+               ret = -EAGAIN;
        }
 
 out_unlock:
@@ -2683,52 +3227,71 @@ out:
        return ret;
 }
 
-void free_transhuge_page(struct page *page)
+void folio_undo_large_rmappable(struct folio *folio)
 {
-       struct deferred_split *ds_queue = get_deferred_split_queue(page);
+       struct deferred_split *ds_queue;
        unsigned long flags;
 
+       if (folio_order(folio) <= 1)
+               return;
+
+       /*
+        * At this point, there is no one trying to add the folio to
+        * deferred_list. If folio is not in deferred_list, it's safe
+        * to check without acquiring the split_queue_lock.
+        */
+       if (data_race(list_empty(&folio->_deferred_list)))
+               return;
+
+       ds_queue = get_deferred_split_queue(folio);
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
-       if (!list_empty(page_deferred_list(page))) {
+       if (!list_empty(&folio->_deferred_list)) {
                ds_queue->split_queue_len--;
-               list_del(page_deferred_list(page));
+               list_del_init(&folio->_deferred_list);
        }
        spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
-       free_compound_page(page);
 }
 
-void deferred_split_huge_page(struct page *page)
+void deferred_split_folio(struct folio *folio)
 {
-       struct deferred_split *ds_queue = get_deferred_split_queue(page);
+       struct deferred_split *ds_queue = get_deferred_split_queue(folio);
 #ifdef CONFIG_MEMCG
-       struct mem_cgroup *memcg = page_memcg(compound_head(page));
+       struct mem_cgroup *memcg = folio_memcg(folio);
 #endif
        unsigned long flags;
 
-       VM_BUG_ON_PAGE(!PageTransHuge(page), page);
+       /*
+        * Order 1 folios have no space for a deferred list, but we also
+        * won't waste much memory by not adding them to the deferred list.
+        */
+       if (folio_order(folio) <= 1)
+               return;
 
        /*
         * The try_to_unmap() in page reclaim path might reach here too,
         * this may cause a race condition to corrupt deferred split queue.
-        * And, if page reclaim is already handling the same page, it is
+        * And, if page reclaim is already handling the same folio, it is
         * unnecessary to handle it again in shrinker.
         *
-        * Check PageSwapCache to determine if the page is being
-        * handled by page reclaim since THP swap would add the page into
+        * Check the swapcache flag to determine if the folio is being
+        * handled by page reclaim since THP swap would add the folio into
         * swap cache before calling try_to_unmap().
         */
-       if (PageSwapCache(page))
+       if (folio_test_swapcache(folio))
+               return;
+
+       if (!list_empty(&folio->_deferred_list))
                return;
 
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
-       if (list_empty(page_deferred_list(page))) {
+       if (list_empty(&folio->_deferred_list)) {
                count_vm_event(THP_DEFERRED_SPLIT_PAGE);
-               list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
+               list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
                ds_queue->split_queue_len++;
 #ifdef CONFIG_MEMCG
                if (memcg)
-                       set_shrinker_bit(memcg, page_to_nid(page),
-                                        deferred_split_shrinker.id);
+                       set_shrinker_bit(memcg, folio_nid(folio),
+                                        deferred_split_shrinker->id);
 #endif
        }
        spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
@@ -2753,8 +3316,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
        struct pglist_data *pgdata = NODE_DATA(sc->nid);
        struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
        unsigned long flags;
-       LIST_HEAD(list), *pos, *next;
-       struct page *page;
+       LIST_HEAD(list);
+       struct folio *folio, *next;
        int split = 0;
 
 #ifdef CONFIG_MEMCG
@@ -2764,14 +3327,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
 
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        /* Take pin on all head pages to avoid freeing them under us */
-       list_for_each_safe(pos, next, &ds_queue->split_queue) {
-               page = list_entry((void *)pos, struct page, deferred_list);
-               page = compound_head(page);
-               if (get_page_unless_zero(page)) {
-                       list_move(page_deferred_list(page), &list);
+       list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
+                                                       _deferred_list) {
+               if (folio_try_get(folio)) {
+                       list_move(&folio->_deferred_list, &list);
                } else {
-                       /* We lost race with put_compound_page() */
-                       list_del_init(page_deferred_list(page));
+                       /* We lost race with folio_put() */
+                       list_del_init(&folio->_deferred_list);
                        ds_queue->split_queue_len--;
                }
                if (!--sc->nr_to_scan)
@@ -2779,16 +3341,15 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
        }
        spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
 
-       list_for_each_safe(pos, next, &list) {
-               page = list_entry((void *)pos, struct page, deferred_list);
-               if (!trylock_page(page))
+       list_for_each_entry_safe(folio, next, &list, _deferred_list) {
+               if (!folio_trylock(folio))
                        goto next;
                /* split_huge_page() removes page from list on success */
-               if (!split_huge_page(page))
+               if (!split_folio(folio))
                        split++;
-               unlock_page(page);
+               folio_unlock(folio);
 next:
-               put_page(page);
+               folio_put(folio);
        }
 
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
@@ -2804,46 +3365,50 @@ next:
        return split;
 }
 
-static struct shrinker deferred_split_shrinker = {
-       .count_objects = deferred_split_count,
-       .scan_objects = deferred_split_scan,
-       .seeks = DEFAULT_SEEKS,
-       .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
-                SHRINKER_NONSLAB,
-};
-
 #ifdef CONFIG_DEBUG_FS
 static void split_huge_pages_all(void)
 {
        struct zone *zone;
        struct page *page;
+       struct folio *folio;
        unsigned long pfn, max_zone_pfn;
        unsigned long total = 0, split = 0;
 
        pr_debug("Split all THPs\n");
-       for_each_populated_zone(zone) {
+       for_each_zone(zone) {
+               if (!managed_zone(zone))
+                       continue;
                max_zone_pfn = zone_end_pfn(zone);
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
-                       if (!pfn_valid(pfn))
-                               continue;
+                       int nr_pages;
 
-                       page = pfn_to_page(pfn);
-                       if (!get_page_unless_zero(page))
+                       page = pfn_to_online_page(pfn);
+                       if (!page || PageTail(page))
                                continue;
+                       folio = page_folio(page);
+                       if (!folio_try_get(folio))
+                               continue;
+
+                       if (unlikely(page_folio(page) != folio))
+                               goto next;
 
-                       if (zone != page_zone(page))
+                       if (zone != folio_zone(folio))
                                goto next;
 
-                       if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
+                       if (!folio_test_large(folio)
+                               || folio_test_hugetlb(folio)
+                               || !folio_test_lru(folio))
                                goto next;
 
                        total++;
-                       lock_page(page);
-                       if (!split_huge_page(page))
+                       folio_lock(folio);
+                       nr_pages = folio_nr_pages(folio);
+                       if (!split_folio(folio))
                                split++;
-                       unlock_page(page);
+                       pfn += nr_pages - 1;
+                       folio_unlock(folio);
 next:
-                       put_page(page);
+                       folio_put(folio);
                        cond_resched();
                }
        }
@@ -2858,7 +3423,7 @@ static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
 }
 
 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
-                               unsigned long vaddr_end)
+                               unsigned long vaddr_end, unsigned int new_order)
 {
        int ret = 0;
        struct task_struct *task;
@@ -2898,10 +3463,11 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
         * table filled with PTE-mapped THPs, each of which is distinct.
         */
        for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
-               struct vm_area_struct *vma = find_vma(mm, addr);
+               struct vm_area_struct *vma = vma_lookup(mm, addr);
                struct page *page;
+               struct folio *folio;
 
-               if (!vma || addr < vma->vm_start)
+               if (!vma)
                        break;
 
                /* skip special VMA and hugetlb VMA */
@@ -2913,27 +3479,35 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
                /* FOLL_DUMP to ignore special (like zero) pages */
                page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
 
-               if (IS_ERR(page))
-                       continue;
-               if (!page)
+               if (IS_ERR_OR_NULL(page))
                        continue;
 
-               if (!is_transparent_hugepage(page))
+               folio = page_folio(page);
+               if (!is_transparent_hugepage(folio))
+                       goto next;
+
+               if (new_order >= folio_order(folio))
                        goto next;
 
                total++;
-               if (!can_split_folio(page_folio(page), NULL))
+               /*
+                * For folios with private, split_huge_page_to_list_to_order()
+                * will try to drop it before split and then check if the folio
+                * can be split or not. So skip the check here.
+                */
+               if (!folio_test_private(folio) &&
+                   !can_split_folio(folio, NULL))
                        goto next;
 
-               if (!trylock_page(page))
+               if (!folio_trylock(folio))
                        goto next;
 
-               if (!split_huge_page(page))
+               if (!split_folio_to_order(folio, new_order))
                        split++;
 
-               unlock_page(page);
+               folio_unlock(folio);
 next:
-               put_page(page);
+               folio_put(folio);
                cond_resched();
        }
        mmap_read_unlock(mm);
@@ -2946,7 +3520,7 @@ out:
 }
 
 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
-                               pgoff_t off_end)
+                               pgoff_t off_end, unsigned int new_order)
 {
        struct filename *file;
        struct file *candidate;
@@ -2970,28 +3544,30 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
        mapping = candidate->f_mapping;
 
        for (index = off_start; index < off_end; index += nr_pages) {
-               struct page *fpage = pagecache_get_page(mapping, index,
-                                               FGP_ENTRY | FGP_HEAD, 0);
+               struct folio *folio = filemap_get_folio(mapping, index);
 
                nr_pages = 1;
-               if (xa_is_value(fpage) || !fpage)
+               if (IS_ERR(folio))
                        continue;
 
-               if (!is_transparent_hugepage(fpage))
+               if (!folio_test_large(folio))
                        goto next;
 
                total++;
-               nr_pages = thp_nr_pages(fpage);
+               nr_pages = folio_nr_pages(folio);
 
-               if (!trylock_page(fpage))
+               if (new_order >= folio_order(folio))
                        goto next;
 
-               if (!split_huge_page(fpage))
+               if (!folio_trylock(folio))
+                       goto next;
+
+               if (!split_folio_to_order(folio, new_order))
                        split++;
 
-               unlock_page(fpage);
+               folio_unlock(folio);
 next:
-               put_page(fpage);
+               folio_put(folio);
                cond_resched();
        }
 
@@ -3011,10 +3587,14 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
 {
        static DEFINE_MUTEX(split_debug_mutex);
        ssize_t ret;
-       /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
+       /*
+        * hold pid, start_vaddr, end_vaddr, new_order or
+        * file_path, off_start, off_end, new_order
+        */
        char input_buf[MAX_INPUT_BUF_SZ];
        int pid;
        unsigned long vaddr_start, vaddr_end;
+       unsigned int new_order = 0;
 
        ret = mutex_lock_interruptible(&split_debug_mutex);
        if (ret)
@@ -3043,29 +3623,29 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
                        goto out;
                }
 
-               ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
-               if (ret != 2) {
+               ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order);
+               if (ret != 2 && ret != 3) {
                        ret = -EINVAL;
                        goto out;
                }
-               ret = split_huge_pages_in_file(file_path, off_start, off_end);
+               ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order);
                if (!ret)
                        ret = input_len;
 
                goto out;
        }
 
-       ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
+       ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order);
        if (ret == 1 && pid == 1) {
                split_huge_pages_all();
                ret = strlen(input_buf);
                goto out;
-       } else if (ret != 3) {
+       } else if (ret != 3 && ret != 4) {
                ret = -EINVAL;
                goto out;
        }
 
-       ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
+       ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order);
        if (!ret)
                ret = strlen(input_buf);
 out:
@@ -3093,6 +3673,7 @@ late_initcall(split_huge_pages_debugfs);
 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
                struct page *page)
 {
+       struct folio *folio = page_folio(page);
        struct vm_area_struct *vma = pvmw->vma;
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address = pvmw->address;
@@ -3107,26 +3688,33 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
        flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
        pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
 
-       anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
-       if (anon_exclusive && page_try_share_anon_rmap(page)) {
+       /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
+       anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
+       if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
                set_pmd_at(mm, address, pvmw->pmd, pmdval);
                return -EBUSY;
        }
 
        if (pmd_dirty(pmdval))
-               set_page_dirty(page);
+               folio_mark_dirty(folio);
        if (pmd_write(pmdval))
                entry = make_writable_migration_entry(page_to_pfn(page));
        else if (anon_exclusive)
                entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
        else
                entry = make_readable_migration_entry(page_to_pfn(page));
+       if (pmd_young(pmdval))
+               entry = make_migration_entry_young(entry);
+       if (pmd_dirty(pmdval))
+               entry = make_migration_entry_dirty(entry);
        pmdswp = swp_entry_to_pmd(entry);
        if (pmd_soft_dirty(pmdval))
                pmdswp = pmd_swp_mksoft_dirty(pmdswp);
+       if (pmd_uffd_wp(pmdval))
+               pmdswp = pmd_swp_mkuffd_wp(pmdswp);
        set_pmd_at(mm, address, pvmw->pmd, pmdswp);
-       page_remove_rmap(page, vma, true);
-       put_page(page);
+       folio_remove_rmap_pmd(folio, page, vma);
+       folio_put(folio);
        trace_set_migration_pmd(address, pmd_val(pmdswp));
 
        return 0;
@@ -3134,10 +3722,11 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 
 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
 {
+       struct folio *folio = page_folio(new);
        struct vm_area_struct *vma = pvmw->vma;
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address = pvmw->address;
-       unsigned long mmun_start = address & HPAGE_PMD_MASK;
+       unsigned long haddr = address & HPAGE_PMD_MASK;
        pmd_t pmde;
        swp_entry_t entry;
 
@@ -3145,27 +3734,32 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
                return;
 
        entry = pmd_to_swp_entry(*pvmw->pmd);
-       get_page(new);
-       pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
+       folio_get(folio);
+       pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
        if (is_writable_migration_entry(entry))
-               pmde = maybe_pmd_mkwrite(pmde, vma);
+               pmde = pmd_mkwrite(pmde, vma);
        if (pmd_swp_uffd_wp(*pvmw->pmd))
-               pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
+               pmde = pmd_mkuffd_wp(pmde);
+       if (!is_migration_entry_young(entry))
+               pmde = pmd_mkold(pmde);
+       /* NOTE: this may contain setting soft-dirty on some archs */
+       if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
+               pmde = pmd_mkdirty(pmde);
 
-       if (PageAnon(new)) {
-               rmap_t rmap_flags = RMAP_COMPOUND;
+       if (folio_test_anon(folio)) {
+               rmap_t rmap_flags = RMAP_NONE;
 
                if (!is_readable_migration_entry(entry))
                        rmap_flags |= RMAP_EXCLUSIVE;
 
-               page_add_anon_rmap(new, vma, mmun_start, rmap_flags);
+               folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
        } else {
-               page_add_file_rmap(new, vma, true);
+               folio_add_file_rmap_pmd(folio, new, vma);
        }
-       VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
-       set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
+       VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
+       set_pmd_at(mm, haddr, pvmw->pmd, pmde);
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache_pmd(vma, address, pvmw->pmd);