1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 Red Hat, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
42 #include <asm/pgalloc.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/thp.h>
50 * By default, transparent hugepage support is disabled in order to avoid
51 * risking an increased memory footprint for applications that are not
52 * guaranteed to benefit from it. When transparent hugepage support is
53 * enabled, it is for all mappings, and khugepaged scans all mappings.
54 * Defrag is invoked by khugepaged hugepage allocations and by page faults
55 * for all hugepage allocations.
57 unsigned long transparent_hugepage_flags __read_mostly =
58 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
59 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
66 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
68 static struct shrinker *deferred_split_shrinker;
69 static unsigned long deferred_split_count(struct shrinker *shrink,
70 struct shrink_control *sc);
71 static unsigned long deferred_split_scan(struct shrinker *shrink,
72 struct shrink_control *sc);
74 static atomic_t huge_zero_refcount;
75 struct page *huge_zero_page __read_mostly;
76 unsigned long huge_zero_pfn __read_mostly = ~0UL;
77 unsigned long huge_anon_orders_always __read_mostly;
78 unsigned long huge_anon_orders_madvise __read_mostly;
79 unsigned long huge_anon_orders_inherit __read_mostly;
81 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
82 unsigned long vm_flags, bool smaps,
83 bool in_pf, bool enforce_sysfs,
86 /* Check the intersection of requested and supported orders. */
87 orders &= vma_is_anonymous(vma) ?
88 THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
92 if (!vma->vm_mm) /* vdso */
96 * Explicitly disabled through madvise or prctl, or some
97 * architectures may disable THP for some mappings, for
100 if ((vm_flags & VM_NOHUGEPAGE) ||
101 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
104 * If the hardware/firmware marked hugepage support disabled.
106 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
109 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
111 return in_pf ? orders : 0;
114 * khugepaged special VMA and hugetlb VMA.
115 * Must be checked after dax since some dax mappings may have
118 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
122 * Check alignment for file vma and size for both file and anon vma by
123 * filtering out the unsuitable orders.
125 * Skip the check for page fault. Huge fault does the check in fault
129 int order = highest_order(orders);
133 addr = vma->vm_end - (PAGE_SIZE << order);
134 if (thp_vma_suitable_order(vma, addr, order))
136 order = next_order(&orders, order);
144 * Enabled via shmem mount options or sysfs settings.
145 * Must be done before hugepage flags check since shmem has its
148 if (!in_pf && shmem_file(vma->vm_file))
149 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
150 !enforce_sysfs, vma->vm_mm, vm_flags)
153 if (!vma_is_anonymous(vma)) {
155 * Enforce sysfs THP requirements as necessary. Anonymous vmas
156 * were already handled in thp_vma_allowable_orders().
159 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
160 !hugepage_global_always())))
164 * Trust that ->huge_fault() handlers know what they are doing
167 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
169 /* Only regular file is valid in collapse path */
170 if (((!in_pf || smaps)) && file_thp_enabled(vma))
175 if (vma_is_temporary_stack(vma))
179 * THPeligible bit of smaps should show 1 for proper VMAs even
180 * though anon_vma is not initialized yet.
182 * Allow page fault since anon_vma may be not initialized until
183 * the first page fault.
186 return (smaps || in_pf) ? orders : 0;
191 static bool get_huge_zero_page(void)
193 struct page *zero_page;
195 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
198 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
201 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
205 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
207 __free_pages(zero_page, compound_order(zero_page));
210 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
212 /* We take additional reference here. It will be put back by shrinker */
213 atomic_set(&huge_zero_refcount, 2);
215 count_vm_event(THP_ZERO_PAGE_ALLOC);
219 static void put_huge_zero_page(void)
222 * Counter should never go to zero here. Only shrinker can put
225 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
228 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
230 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
231 return READ_ONCE(huge_zero_page);
233 if (!get_huge_zero_page())
236 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
237 put_huge_zero_page();
239 return READ_ONCE(huge_zero_page);
242 void mm_put_huge_zero_page(struct mm_struct *mm)
244 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
245 put_huge_zero_page();
248 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
249 struct shrink_control *sc)
251 /* we can free zero page only if last reference remains */
252 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
255 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
256 struct shrink_control *sc)
258 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
259 struct page *zero_page = xchg(&huge_zero_page, NULL);
260 BUG_ON(zero_page == NULL);
261 WRITE_ONCE(huge_zero_pfn, ~0UL);
262 __free_pages(zero_page, compound_order(zero_page));
269 static struct shrinker *huge_zero_page_shrinker;
272 static ssize_t enabled_show(struct kobject *kobj,
273 struct kobj_attribute *attr, char *buf)
277 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
278 output = "[always] madvise never";
279 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
280 &transparent_hugepage_flags))
281 output = "always [madvise] never";
283 output = "always madvise [never]";
285 return sysfs_emit(buf, "%s\n", output);
288 static ssize_t enabled_store(struct kobject *kobj,
289 struct kobj_attribute *attr,
290 const char *buf, size_t count)
294 if (sysfs_streq(buf, "always")) {
295 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
296 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
297 } else if (sysfs_streq(buf, "madvise")) {
298 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
299 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
300 } else if (sysfs_streq(buf, "never")) {
301 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
302 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
307 int err = start_stop_khugepaged();
314 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
316 ssize_t single_hugepage_flag_show(struct kobject *kobj,
317 struct kobj_attribute *attr, char *buf,
318 enum transparent_hugepage_flag flag)
320 return sysfs_emit(buf, "%d\n",
321 !!test_bit(flag, &transparent_hugepage_flags));
324 ssize_t single_hugepage_flag_store(struct kobject *kobj,
325 struct kobj_attribute *attr,
326 const char *buf, size_t count,
327 enum transparent_hugepage_flag flag)
332 ret = kstrtoul(buf, 10, &value);
339 set_bit(flag, &transparent_hugepage_flags);
341 clear_bit(flag, &transparent_hugepage_flags);
346 static ssize_t defrag_show(struct kobject *kobj,
347 struct kobj_attribute *attr, char *buf)
351 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
352 &transparent_hugepage_flags))
353 output = "[always] defer defer+madvise madvise never";
354 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
355 &transparent_hugepage_flags))
356 output = "always [defer] defer+madvise madvise never";
357 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
358 &transparent_hugepage_flags))
359 output = "always defer [defer+madvise] madvise never";
360 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
361 &transparent_hugepage_flags))
362 output = "always defer defer+madvise [madvise] never";
364 output = "always defer defer+madvise madvise [never]";
366 return sysfs_emit(buf, "%s\n", output);
369 static ssize_t defrag_store(struct kobject *kobj,
370 struct kobj_attribute *attr,
371 const char *buf, size_t count)
373 if (sysfs_streq(buf, "always")) {
374 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
375 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
376 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
377 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
378 } else if (sysfs_streq(buf, "defer+madvise")) {
379 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
380 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
381 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
382 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
383 } else if (sysfs_streq(buf, "defer")) {
384 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
385 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
386 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
387 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
388 } else if (sysfs_streq(buf, "madvise")) {
389 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
390 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
391 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
392 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
393 } else if (sysfs_streq(buf, "never")) {
394 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
395 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
403 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
405 static ssize_t use_zero_page_show(struct kobject *kobj,
406 struct kobj_attribute *attr, char *buf)
408 return single_hugepage_flag_show(kobj, attr, buf,
409 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
411 static ssize_t use_zero_page_store(struct kobject *kobj,
412 struct kobj_attribute *attr, const char *buf, size_t count)
414 return single_hugepage_flag_store(kobj, attr, buf, count,
415 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
417 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
419 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
420 struct kobj_attribute *attr, char *buf)
422 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
424 static struct kobj_attribute hpage_pmd_size_attr =
425 __ATTR_RO(hpage_pmd_size);
427 static struct attribute *hugepage_attr[] = {
430 &use_zero_page_attr.attr,
431 &hpage_pmd_size_attr.attr,
433 &shmem_enabled_attr.attr,
438 static const struct attribute_group hugepage_attr_group = {
439 .attrs = hugepage_attr,
442 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
443 static void thpsize_release(struct kobject *kobj);
444 static DEFINE_SPINLOCK(huge_anon_orders_lock);
445 static LIST_HEAD(thpsize_list);
449 struct list_head node;
453 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
455 static ssize_t thpsize_enabled_show(struct kobject *kobj,
456 struct kobj_attribute *attr, char *buf)
458 int order = to_thpsize(kobj)->order;
461 if (test_bit(order, &huge_anon_orders_always))
462 output = "[always] inherit madvise never";
463 else if (test_bit(order, &huge_anon_orders_inherit))
464 output = "always [inherit] madvise never";
465 else if (test_bit(order, &huge_anon_orders_madvise))
466 output = "always inherit [madvise] never";
468 output = "always inherit madvise [never]";
470 return sysfs_emit(buf, "%s\n", output);
473 static ssize_t thpsize_enabled_store(struct kobject *kobj,
474 struct kobj_attribute *attr,
475 const char *buf, size_t count)
477 int order = to_thpsize(kobj)->order;
480 if (sysfs_streq(buf, "always")) {
481 spin_lock(&huge_anon_orders_lock);
482 clear_bit(order, &huge_anon_orders_inherit);
483 clear_bit(order, &huge_anon_orders_madvise);
484 set_bit(order, &huge_anon_orders_always);
485 spin_unlock(&huge_anon_orders_lock);
486 } else if (sysfs_streq(buf, "inherit")) {
487 spin_lock(&huge_anon_orders_lock);
488 clear_bit(order, &huge_anon_orders_always);
489 clear_bit(order, &huge_anon_orders_madvise);
490 set_bit(order, &huge_anon_orders_inherit);
491 spin_unlock(&huge_anon_orders_lock);
492 } else if (sysfs_streq(buf, "madvise")) {
493 spin_lock(&huge_anon_orders_lock);
494 clear_bit(order, &huge_anon_orders_always);
495 clear_bit(order, &huge_anon_orders_inherit);
496 set_bit(order, &huge_anon_orders_madvise);
497 spin_unlock(&huge_anon_orders_lock);
498 } else if (sysfs_streq(buf, "never")) {
499 spin_lock(&huge_anon_orders_lock);
500 clear_bit(order, &huge_anon_orders_always);
501 clear_bit(order, &huge_anon_orders_inherit);
502 clear_bit(order, &huge_anon_orders_madvise);
503 spin_unlock(&huge_anon_orders_lock);
510 static struct kobj_attribute thpsize_enabled_attr =
511 __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
513 static struct attribute *thpsize_attrs[] = {
514 &thpsize_enabled_attr.attr,
518 static const struct attribute_group thpsize_attr_group = {
519 .attrs = thpsize_attrs,
522 static const struct kobj_type thpsize_ktype = {
523 .release = &thpsize_release,
524 .sysfs_ops = &kobj_sysfs_ops,
527 static struct thpsize *thpsize_create(int order, struct kobject *parent)
529 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
530 struct thpsize *thpsize;
533 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
535 return ERR_PTR(-ENOMEM);
537 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
538 "hugepages-%lukB", size);
544 ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
546 kobject_put(&thpsize->kobj);
550 thpsize->order = order;
554 static void thpsize_release(struct kobject *kobj)
556 kfree(to_thpsize(kobj));
559 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
562 struct thpsize *thpsize;
563 unsigned long orders;
567 * Default to setting PMD-sized THP to inherit the global setting and
568 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
569 * constant so we have to do this here.
571 huge_anon_orders_inherit = BIT(PMD_ORDER);
573 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
574 if (unlikely(!*hugepage_kobj)) {
575 pr_err("failed to create transparent hugepage kobject\n");
579 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
581 pr_err("failed to register transparent hugepage group\n");
585 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
587 pr_err("failed to register transparent hugepage group\n");
588 goto remove_hp_group;
591 orders = THP_ORDERS_ALL_ANON;
592 order = highest_order(orders);
594 thpsize = thpsize_create(order, *hugepage_kobj);
595 if (IS_ERR(thpsize)) {
596 pr_err("failed to create thpsize for order %d\n", order);
597 err = PTR_ERR(thpsize);
600 list_add(&thpsize->node, &thpsize_list);
601 order = next_order(&orders, order);
607 hugepage_exit_sysfs(*hugepage_kobj);
610 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
612 kobject_put(*hugepage_kobj);
616 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
618 struct thpsize *thpsize, *tmp;
620 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
621 list_del(&thpsize->node);
622 kobject_put(&thpsize->kobj);
625 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
626 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
627 kobject_put(hugepage_kobj);
630 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
635 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
638 #endif /* CONFIG_SYSFS */
640 static int __init thp_shrinker_init(void)
642 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
643 if (!huge_zero_page_shrinker)
646 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
647 SHRINKER_MEMCG_AWARE |
649 "thp-deferred_split");
650 if (!deferred_split_shrinker) {
651 shrinker_free(huge_zero_page_shrinker);
655 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
656 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
657 shrinker_register(huge_zero_page_shrinker);
659 deferred_split_shrinker->count_objects = deferred_split_count;
660 deferred_split_shrinker->scan_objects = deferred_split_scan;
661 shrinker_register(deferred_split_shrinker);
666 static void __init thp_shrinker_exit(void)
668 shrinker_free(huge_zero_page_shrinker);
669 shrinker_free(deferred_split_shrinker);
672 static int __init hugepage_init(void)
675 struct kobject *hugepage_kobj;
677 if (!has_transparent_hugepage()) {
678 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
683 * hugepages can't be allocated by the buddy allocator
685 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
687 * we use page->mapping and page->index in second tail page
688 * as list_head: assuming THP order >= 2
690 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
692 err = hugepage_init_sysfs(&hugepage_kobj);
696 err = khugepaged_init();
700 err = thp_shrinker_init();
705 * By default disable transparent hugepages on smaller systems,
706 * where the extra memory used could hurt more than TLB overhead
707 * is likely to save. The admin can still enable it through /sys.
709 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
710 transparent_hugepage_flags = 0;
714 err = start_stop_khugepaged();
722 khugepaged_destroy();
724 hugepage_exit_sysfs(hugepage_kobj);
728 subsys_initcall(hugepage_init);
730 static int __init setup_transparent_hugepage(char *str)
735 if (!strcmp(str, "always")) {
736 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
737 &transparent_hugepage_flags);
738 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
739 &transparent_hugepage_flags);
741 } else if (!strcmp(str, "madvise")) {
742 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
743 &transparent_hugepage_flags);
744 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
745 &transparent_hugepage_flags);
747 } else if (!strcmp(str, "never")) {
748 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
749 &transparent_hugepage_flags);
750 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
751 &transparent_hugepage_flags);
756 pr_warn("transparent_hugepage= cannot parse, ignored\n");
759 __setup("transparent_hugepage=", setup_transparent_hugepage);
761 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
763 if (likely(vma->vm_flags & VM_WRITE))
764 pmd = pmd_mkwrite(pmd, vma);
770 struct deferred_split *get_deferred_split_queue(struct folio *folio)
772 struct mem_cgroup *memcg = folio_memcg(folio);
773 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
776 return &memcg->deferred_split_queue;
778 return &pgdat->deferred_split_queue;
782 struct deferred_split *get_deferred_split_queue(struct folio *folio)
784 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
786 return &pgdat->deferred_split_queue;
790 void folio_prep_large_rmappable(struct folio *folio)
792 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
793 INIT_LIST_HEAD(&folio->_deferred_list);
794 folio_set_large_rmappable(folio);
797 static inline bool is_transparent_hugepage(struct folio *folio)
799 if (!folio_test_large(folio))
802 return is_huge_zero_page(&folio->page) ||
803 folio_test_large_rmappable(folio);
806 static unsigned long __thp_get_unmapped_area(struct file *filp,
807 unsigned long addr, unsigned long len,
808 loff_t off, unsigned long flags, unsigned long size)
810 loff_t off_end = off + len;
811 loff_t off_align = round_up(off, size);
812 unsigned long len_pad, ret;
814 if (off_end <= off_align || (off_end - off_align) < size)
817 len_pad = len + size;
818 if (len_pad < len || (off + len_pad) < off)
821 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
822 off >> PAGE_SHIFT, flags);
825 * The failure might be due to length padding. The caller will retry
826 * without the padding.
828 if (IS_ERR_VALUE(ret))
832 * Do not try to align to THP boundary if allocation at the address
838 ret += (off - ret) & (size - 1);
842 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
843 unsigned long len, unsigned long pgoff, unsigned long flags)
846 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
848 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
852 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
854 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
856 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
857 struct page *page, gfp_t gfp)
859 struct vm_area_struct *vma = vmf->vma;
860 struct folio *folio = page_folio(page);
862 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
865 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
867 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
869 count_vm_event(THP_FAULT_FALLBACK);
870 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
871 return VM_FAULT_FALLBACK;
873 folio_throttle_swaprate(folio, gfp);
875 pgtable = pte_alloc_one(vma->vm_mm);
876 if (unlikely(!pgtable)) {
881 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
883 * The memory barrier inside __folio_mark_uptodate makes sure that
884 * clear_huge_page writes become visible before the set_pmd_at()
887 __folio_mark_uptodate(folio);
889 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
890 if (unlikely(!pmd_none(*vmf->pmd))) {
895 ret = check_stable_address_space(vma->vm_mm);
899 /* Deliver the page fault to userland */
900 if (userfaultfd_missing(vma)) {
901 spin_unlock(vmf->ptl);
903 pte_free(vma->vm_mm, pgtable);
904 ret = handle_userfault(vmf, VM_UFFD_MISSING);
905 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
909 entry = mk_huge_pmd(page, vma->vm_page_prot);
910 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
911 folio_add_new_anon_rmap(folio, vma, haddr);
912 folio_add_lru_vma(folio, vma);
913 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
914 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
915 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
916 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
917 mm_inc_nr_ptes(vma->vm_mm);
918 spin_unlock(vmf->ptl);
919 count_vm_event(THP_FAULT_ALLOC);
920 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
925 spin_unlock(vmf->ptl);
928 pte_free(vma->vm_mm, pgtable);
935 * always: directly stall for all thp allocations
936 * defer: wake kswapd and fail if not immediately available
937 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
938 * fail if not immediately available
939 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
941 * never: never stall for any thp allocation
943 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
945 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
947 /* Always do synchronous compaction */
948 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
949 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
951 /* Kick kcompactd and fail quickly */
952 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
953 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
955 /* Synchronous compaction if madvised, otherwise kick kcompactd */
956 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
957 return GFP_TRANSHUGE_LIGHT |
958 (vma_madvised ? __GFP_DIRECT_RECLAIM :
959 __GFP_KSWAPD_RECLAIM);
961 /* Only do synchronous compaction if madvised */
962 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
963 return GFP_TRANSHUGE_LIGHT |
964 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
966 return GFP_TRANSHUGE_LIGHT;
969 /* Caller must hold page table lock. */
970 static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
971 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
972 struct page *zero_page)
977 entry = mk_pmd(zero_page, vma->vm_page_prot);
978 entry = pmd_mkhuge(entry);
979 pgtable_trans_huge_deposit(mm, pmd, pgtable);
980 set_pmd_at(mm, haddr, pmd, entry);
984 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
986 struct vm_area_struct *vma = vmf->vma;
989 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
991 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
992 return VM_FAULT_FALLBACK;
993 if (unlikely(anon_vma_prepare(vma)))
995 khugepaged_enter_vma(vma, vma->vm_flags);
997 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
998 !mm_forbids_zeropage(vma->vm_mm) &&
999 transparent_hugepage_use_zero_page()) {
1001 struct page *zero_page;
1003 pgtable = pte_alloc_one(vma->vm_mm);
1004 if (unlikely(!pgtable))
1005 return VM_FAULT_OOM;
1006 zero_page = mm_get_huge_zero_page(vma->vm_mm);
1007 if (unlikely(!zero_page)) {
1008 pte_free(vma->vm_mm, pgtable);
1009 count_vm_event(THP_FAULT_FALLBACK);
1010 return VM_FAULT_FALLBACK;
1012 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1014 if (pmd_none(*vmf->pmd)) {
1015 ret = check_stable_address_space(vma->vm_mm);
1017 spin_unlock(vmf->ptl);
1018 pte_free(vma->vm_mm, pgtable);
1019 } else if (userfaultfd_missing(vma)) {
1020 spin_unlock(vmf->ptl);
1021 pte_free(vma->vm_mm, pgtable);
1022 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1023 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1025 set_huge_zero_page(pgtable, vma->vm_mm, vma,
1026 haddr, vmf->pmd, zero_page);
1027 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1028 spin_unlock(vmf->ptl);
1031 spin_unlock(vmf->ptl);
1032 pte_free(vma->vm_mm, pgtable);
1036 gfp = vma_thp_gfp_mask(vma);
1037 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1038 if (unlikely(!folio)) {
1039 count_vm_event(THP_FAULT_FALLBACK);
1040 return VM_FAULT_FALLBACK;
1042 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1045 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
1046 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1049 struct mm_struct *mm = vma->vm_mm;
1053 ptl = pmd_lock(mm, pmd);
1054 if (!pmd_none(*pmd)) {
1056 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1057 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1060 entry = pmd_mkyoung(*pmd);
1061 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1062 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1063 update_mmu_cache_pmd(vma, addr, pmd);
1069 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1070 if (pfn_t_devmap(pfn))
1071 entry = pmd_mkdevmap(entry);
1073 entry = pmd_mkyoung(pmd_mkdirty(entry));
1074 entry = maybe_pmd_mkwrite(entry, vma);
1078 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1083 set_pmd_at(mm, addr, pmd, entry);
1084 update_mmu_cache_pmd(vma, addr, pmd);
1089 pte_free(mm, pgtable);
1093 * vmf_insert_pfn_pmd - insert a pmd size pfn
1094 * @vmf: Structure describing the fault
1095 * @pfn: pfn to insert
1096 * @write: whether it's a write fault
1098 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1100 * Return: vm_fault_t value.
1102 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
1104 unsigned long addr = vmf->address & PMD_MASK;
1105 struct vm_area_struct *vma = vmf->vma;
1106 pgprot_t pgprot = vma->vm_page_prot;
1107 pgtable_t pgtable = NULL;
1110 * If we had pmd_special, we could avoid all these restrictions,
1111 * but we need to be consistent with PTEs and architectures that
1112 * can't support a 'special' bit.
1114 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1115 !pfn_t_devmap(pfn));
1116 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1117 (VM_PFNMAP|VM_MIXEDMAP));
1118 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1120 if (addr < vma->vm_start || addr >= vma->vm_end)
1121 return VM_FAULT_SIGBUS;
1123 if (arch_needs_pgtable_deposit()) {
1124 pgtable = pte_alloc_one(vma->vm_mm);
1126 return VM_FAULT_OOM;
1129 track_pfn_insert(vma, &pgprot, pfn);
1131 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1132 return VM_FAULT_NOPAGE;
1134 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1136 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1137 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1139 if (likely(vma->vm_flags & VM_WRITE))
1140 pud = pud_mkwrite(pud);
1144 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
1145 pud_t *pud, pfn_t pfn, bool write)
1147 struct mm_struct *mm = vma->vm_mm;
1148 pgprot_t prot = vma->vm_page_prot;
1152 ptl = pud_lock(mm, pud);
1153 if (!pud_none(*pud)) {
1155 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
1156 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
1159 entry = pud_mkyoung(*pud);
1160 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1161 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1162 update_mmu_cache_pud(vma, addr, pud);
1167 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1168 if (pfn_t_devmap(pfn))
1169 entry = pud_mkdevmap(entry);
1171 entry = pud_mkyoung(pud_mkdirty(entry));
1172 entry = maybe_pud_mkwrite(entry, vma);
1174 set_pud_at(mm, addr, pud, entry);
1175 update_mmu_cache_pud(vma, addr, pud);
1182 * vmf_insert_pfn_pud - insert a pud size pfn
1183 * @vmf: Structure describing the fault
1184 * @pfn: pfn to insert
1185 * @write: whether it's a write fault
1187 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1189 * Return: vm_fault_t value.
1191 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1193 unsigned long addr = vmf->address & PUD_MASK;
1194 struct vm_area_struct *vma = vmf->vma;
1195 pgprot_t pgprot = vma->vm_page_prot;
1198 * If we had pud_special, we could avoid all these restrictions,
1199 * but we need to be consistent with PTEs and architectures that
1200 * can't support a 'special' bit.
1202 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1203 !pfn_t_devmap(pfn));
1204 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1205 (VM_PFNMAP|VM_MIXEDMAP));
1206 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1208 if (addr < vma->vm_start || addr >= vma->vm_end)
1209 return VM_FAULT_SIGBUS;
1211 track_pfn_insert(vma, &pgprot, pfn);
1213 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1214 return VM_FAULT_NOPAGE;
1216 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1217 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1219 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1220 pmd_t *pmd, bool write)
1224 _pmd = pmd_mkyoung(*pmd);
1226 _pmd = pmd_mkdirty(_pmd);
1227 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1229 update_mmu_cache_pmd(vma, addr, pmd);
1232 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1233 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1235 unsigned long pfn = pmd_pfn(*pmd);
1236 struct mm_struct *mm = vma->vm_mm;
1240 assert_spin_locked(pmd_lockptr(mm, pmd));
1242 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1245 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1250 if (flags & FOLL_TOUCH)
1251 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1254 * device mapped pages can only be returned if the
1255 * caller will manage the page reference count.
1257 if (!(flags & (FOLL_GET | FOLL_PIN)))
1258 return ERR_PTR(-EEXIST);
1260 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1261 *pgmap = get_dev_pagemap(pfn, *pgmap);
1263 return ERR_PTR(-EFAULT);
1264 page = pfn_to_page(pfn);
1265 ret = try_grab_page(page, flags);
1267 page = ERR_PTR(ret);
1272 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1273 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1274 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1276 spinlock_t *dst_ptl, *src_ptl;
1277 struct page *src_page;
1278 struct folio *src_folio;
1280 pgtable_t pgtable = NULL;
1283 /* Skip if can be re-fill on fault */
1284 if (!vma_is_anonymous(dst_vma))
1287 pgtable = pte_alloc_one(dst_mm);
1288 if (unlikely(!pgtable))
1291 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1292 src_ptl = pmd_lockptr(src_mm, src_pmd);
1293 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1298 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1299 if (unlikely(is_swap_pmd(pmd))) {
1300 swp_entry_t entry = pmd_to_swp_entry(pmd);
1302 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1303 if (!is_readable_migration_entry(entry)) {
1304 entry = make_readable_migration_entry(
1306 pmd = swp_entry_to_pmd(entry);
1307 if (pmd_swp_soft_dirty(*src_pmd))
1308 pmd = pmd_swp_mksoft_dirty(pmd);
1309 if (pmd_swp_uffd_wp(*src_pmd))
1310 pmd = pmd_swp_mkuffd_wp(pmd);
1311 set_pmd_at(src_mm, addr, src_pmd, pmd);
1313 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1314 mm_inc_nr_ptes(dst_mm);
1315 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1316 if (!userfaultfd_wp(dst_vma))
1317 pmd = pmd_swp_clear_uffd_wp(pmd);
1318 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1324 if (unlikely(!pmd_trans_huge(pmd))) {
1325 pte_free(dst_mm, pgtable);
1329 * When page table lock is held, the huge zero pmd should not be
1330 * under splitting since we don't split the page itself, only pmd to
1333 if (is_huge_zero_pmd(pmd)) {
1335 * get_huge_zero_page() will never allocate a new page here,
1336 * since we already have a zero page to copy. It just takes a
1339 mm_get_huge_zero_page(dst_mm);
1343 src_page = pmd_page(pmd);
1344 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1345 src_folio = page_folio(src_page);
1347 folio_get(src_folio);
1348 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
1349 /* Page maybe pinned: split and retry the fault on PTEs. */
1350 folio_put(src_folio);
1351 pte_free(dst_mm, pgtable);
1352 spin_unlock(src_ptl);
1353 spin_unlock(dst_ptl);
1354 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1357 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1359 mm_inc_nr_ptes(dst_mm);
1360 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1361 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1362 if (!userfaultfd_wp(dst_vma))
1363 pmd = pmd_clear_uffd_wp(pmd);
1364 pmd = pmd_mkold(pmd_wrprotect(pmd));
1365 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1369 spin_unlock(src_ptl);
1370 spin_unlock(dst_ptl);
1375 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1376 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1377 pud_t *pud, bool write)
1381 _pud = pud_mkyoung(*pud);
1383 _pud = pud_mkdirty(_pud);
1384 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1386 update_mmu_cache_pud(vma, addr, pud);
1389 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1390 pud_t *pud, int flags, struct dev_pagemap **pgmap)
1392 unsigned long pfn = pud_pfn(*pud);
1393 struct mm_struct *mm = vma->vm_mm;
1397 assert_spin_locked(pud_lockptr(mm, pud));
1399 if (flags & FOLL_WRITE && !pud_write(*pud))
1402 if (pud_present(*pud) && pud_devmap(*pud))
1407 if (flags & FOLL_TOUCH)
1408 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1411 * device mapped pages can only be returned if the
1412 * caller will manage the page reference count.
1414 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1416 if (!(flags & (FOLL_GET | FOLL_PIN)))
1417 return ERR_PTR(-EEXIST);
1419 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1420 *pgmap = get_dev_pagemap(pfn, *pgmap);
1422 return ERR_PTR(-EFAULT);
1423 page = pfn_to_page(pfn);
1425 ret = try_grab_page(page, flags);
1427 page = ERR_PTR(ret);
1432 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1433 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1434 struct vm_area_struct *vma)
1436 spinlock_t *dst_ptl, *src_ptl;
1440 dst_ptl = pud_lock(dst_mm, dst_pud);
1441 src_ptl = pud_lockptr(src_mm, src_pud);
1442 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1446 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1450 * When page table lock is held, the huge zero pud should not be
1451 * under splitting since we don't split the page itself, only pud to
1454 if (is_huge_zero_pud(pud)) {
1455 /* No huge zero pud yet */
1459 * TODO: once we support anonymous pages, use
1460 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
1462 pudp_set_wrprotect(src_mm, addr, src_pud);
1463 pud = pud_mkold(pud_wrprotect(pud));
1464 set_pud_at(dst_mm, addr, dst_pud, pud);
1468 spin_unlock(src_ptl);
1469 spin_unlock(dst_ptl);
1473 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1475 bool write = vmf->flags & FAULT_FLAG_WRITE;
1477 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1478 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1481 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1483 spin_unlock(vmf->ptl);
1485 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1487 void huge_pmd_set_accessed(struct vm_fault *vmf)
1489 bool write = vmf->flags & FAULT_FLAG_WRITE;
1491 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1492 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1495 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1498 spin_unlock(vmf->ptl);
1501 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1503 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1504 struct vm_area_struct *vma = vmf->vma;
1505 struct folio *folio;
1507 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1508 pmd_t orig_pmd = vmf->orig_pmd;
1510 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1511 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1513 if (is_huge_zero_pmd(orig_pmd))
1516 spin_lock(vmf->ptl);
1518 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1519 spin_unlock(vmf->ptl);
1523 page = pmd_page(orig_pmd);
1524 folio = page_folio(page);
1525 VM_BUG_ON_PAGE(!PageHead(page), page);
1527 /* Early check when only holding the PT lock. */
1528 if (PageAnonExclusive(page))
1531 if (!folio_trylock(folio)) {
1533 spin_unlock(vmf->ptl);
1535 spin_lock(vmf->ptl);
1536 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1537 spin_unlock(vmf->ptl);
1538 folio_unlock(folio);
1545 /* Recheck after temporarily dropping the PT lock. */
1546 if (PageAnonExclusive(page)) {
1547 folio_unlock(folio);
1552 * See do_wp_page(): we can only reuse the folio exclusively if
1553 * there are no additional references. Note that we always drain
1554 * the LRU cache immediately after adding a THP.
1556 if (folio_ref_count(folio) >
1557 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1558 goto unlock_fallback;
1559 if (folio_test_swapcache(folio))
1560 folio_free_swap(folio);
1561 if (folio_ref_count(folio) == 1) {
1564 folio_move_anon_rmap(folio, vma);
1565 SetPageAnonExclusive(page);
1566 folio_unlock(folio);
1568 if (unlikely(unshare)) {
1569 spin_unlock(vmf->ptl);
1572 entry = pmd_mkyoung(orig_pmd);
1573 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1574 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1575 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1576 spin_unlock(vmf->ptl);
1581 folio_unlock(folio);
1582 spin_unlock(vmf->ptl);
1584 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1585 return VM_FAULT_FALLBACK;
1588 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1589 unsigned long addr, pmd_t pmd)
1593 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1596 /* Don't touch entries that are not even readable (NUMA hinting). */
1597 if (pmd_protnone(pmd))
1600 /* Do we need write faults for softdirty tracking? */
1601 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1604 /* Do we need write faults for uffd-wp tracking? */
1605 if (userfaultfd_huge_pmd_wp(vma, pmd))
1608 if (!(vma->vm_flags & VM_SHARED)) {
1609 /* See can_change_pte_writable(). */
1610 page = vm_normal_page_pmd(vma, addr, pmd);
1611 return page && PageAnon(page) && PageAnonExclusive(page);
1614 /* See can_change_pte_writable(). */
1615 return pmd_dirty(pmd);
1618 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1619 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1620 struct vm_area_struct *vma,
1623 /* If the pmd is writable, we can write to the page. */
1627 /* Maybe FOLL_FORCE is set to override it? */
1628 if (!(flags & FOLL_FORCE))
1631 /* But FOLL_FORCE has no effect on shared mappings */
1632 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1635 /* ... or read-only private ones */
1636 if (!(vma->vm_flags & VM_MAYWRITE))
1639 /* ... or already writable ones that just need to take a write fault */
1640 if (vma->vm_flags & VM_WRITE)
1644 * See can_change_pte_writable(): we broke COW and could map the page
1645 * writable if we have an exclusive anonymous page ...
1647 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1650 /* ... and a write-fault isn't required for other reasons. */
1651 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1653 return !userfaultfd_huge_pmd_wp(vma, pmd);
1656 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1661 struct mm_struct *mm = vma->vm_mm;
1665 assert_spin_locked(pmd_lockptr(mm, pmd));
1667 page = pmd_page(*pmd);
1668 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1670 if ((flags & FOLL_WRITE) &&
1671 !can_follow_write_pmd(*pmd, page, vma, flags))
1674 /* Avoid dumping huge zero page */
1675 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1676 return ERR_PTR(-EFAULT);
1678 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
1681 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1682 return ERR_PTR(-EMLINK);
1684 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1685 !PageAnonExclusive(page), page);
1687 ret = try_grab_page(page, flags);
1689 return ERR_PTR(ret);
1691 if (flags & FOLL_TOUCH)
1692 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1694 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1695 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1700 /* NUMA hinting page fault entry point for trans huge pmds */
1701 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1703 struct vm_area_struct *vma = vmf->vma;
1704 pmd_t oldpmd = vmf->orig_pmd;
1706 struct folio *folio;
1707 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1708 int nid = NUMA_NO_NODE;
1709 int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1710 bool migrated = false, writable = false;
1713 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1714 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1715 spin_unlock(vmf->ptl);
1719 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1722 * Detect now whether the PMD could be writable; this information
1723 * is only valid while holding the PT lock.
1725 writable = pmd_write(pmd);
1726 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1727 can_change_pmd_writable(vma, vmf->address, pmd))
1730 folio = vm_normal_folio_pmd(vma, haddr, pmd);
1734 /* See similar comment in do_numa_page for explanation */
1736 flags |= TNF_NO_GROUP;
1738 nid = folio_nid(folio);
1740 * For memory tiering mode, cpupid of slow memory page is used
1741 * to record page access time. So use default value.
1743 if (node_is_toptier(nid))
1744 last_cpupid = folio_last_cpupid(folio);
1745 target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
1746 if (target_nid == NUMA_NO_NODE) {
1751 spin_unlock(vmf->ptl);
1754 migrated = migrate_misplaced_folio(folio, vma, target_nid);
1756 flags |= TNF_MIGRATED;
1759 flags |= TNF_MIGRATE_FAIL;
1760 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1761 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1762 spin_unlock(vmf->ptl);
1769 if (nid != NUMA_NO_NODE)
1770 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1775 /* Restore the PMD */
1776 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1777 pmd = pmd_mkyoung(pmd);
1779 pmd = pmd_mkwrite(pmd, vma);
1780 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1781 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1782 spin_unlock(vmf->ptl);
1787 * Return true if we do MADV_FREE successfully on entire pmd page.
1788 * Otherwise, return false.
1790 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1791 pmd_t *pmd, unsigned long addr, unsigned long next)
1795 struct folio *folio;
1796 struct mm_struct *mm = tlb->mm;
1799 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1801 ptl = pmd_trans_huge_lock(pmd, vma);
1806 if (is_huge_zero_pmd(orig_pmd))
1809 if (unlikely(!pmd_present(orig_pmd))) {
1810 VM_BUG_ON(thp_migration_supported() &&
1811 !is_pmd_migration_entry(orig_pmd));
1815 folio = pfn_folio(pmd_pfn(orig_pmd));
1817 * If other processes are mapping this folio, we couldn't discard
1818 * the folio unless they all do MADV_FREE so let's skip the folio.
1820 if (folio_estimated_sharers(folio) != 1)
1823 if (!folio_trylock(folio))
1827 * If user want to discard part-pages of THP, split it so MADV_FREE
1828 * will deactivate only them.
1830 if (next - addr != HPAGE_PMD_SIZE) {
1834 folio_unlock(folio);
1839 if (folio_test_dirty(folio))
1840 folio_clear_dirty(folio);
1841 folio_unlock(folio);
1843 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1844 pmdp_invalidate(vma, addr, pmd);
1845 orig_pmd = pmd_mkold(orig_pmd);
1846 orig_pmd = pmd_mkclean(orig_pmd);
1848 set_pmd_at(mm, addr, pmd, orig_pmd);
1849 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1852 folio_mark_lazyfree(folio);
1860 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1864 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1865 pte_free(mm, pgtable);
1869 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1870 pmd_t *pmd, unsigned long addr)
1875 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1877 ptl = __pmd_trans_huge_lock(pmd, vma);
1881 * For architectures like ppc64 we look at deposited pgtable
1882 * when calling pmdp_huge_get_and_clear. So do the
1883 * pgtable_trans_huge_withdraw after finishing pmdp related
1886 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1888 arch_check_zapped_pmd(vma, orig_pmd);
1889 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1890 if (vma_is_special_huge(vma)) {
1891 if (arch_needs_pgtable_deposit())
1892 zap_deposited_table(tlb->mm, pmd);
1894 } else if (is_huge_zero_pmd(orig_pmd)) {
1895 zap_deposited_table(tlb->mm, pmd);
1898 struct page *page = NULL;
1899 int flush_needed = 1;
1901 if (pmd_present(orig_pmd)) {
1902 page = pmd_page(orig_pmd);
1903 folio_remove_rmap_pmd(page_folio(page), page, vma);
1904 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1905 VM_BUG_ON_PAGE(!PageHead(page), page);
1906 } else if (thp_migration_supported()) {
1909 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1910 entry = pmd_to_swp_entry(orig_pmd);
1911 page = pfn_swap_entry_to_page(entry);
1914 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1916 if (PageAnon(page)) {
1917 zap_deposited_table(tlb->mm, pmd);
1918 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1920 if (arch_needs_pgtable_deposit())
1921 zap_deposited_table(tlb->mm, pmd);
1922 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1927 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1932 #ifndef pmd_move_must_withdraw
1933 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1934 spinlock_t *old_pmd_ptl,
1935 struct vm_area_struct *vma)
1938 * With split pmd lock we also need to move preallocated
1939 * PTE page table if new_pmd is on different PMD page table.
1941 * We also don't deposit and withdraw tables for file pages.
1943 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1947 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1949 #ifdef CONFIG_MEM_SOFT_DIRTY
1950 if (unlikely(is_pmd_migration_entry(pmd)))
1951 pmd = pmd_swp_mksoft_dirty(pmd);
1952 else if (pmd_present(pmd))
1953 pmd = pmd_mksoft_dirty(pmd);
1958 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1959 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1961 spinlock_t *old_ptl, *new_ptl;
1963 struct mm_struct *mm = vma->vm_mm;
1964 bool force_flush = false;
1967 * The destination pmd shouldn't be established, free_pgtables()
1968 * should have released it; but move_page_tables() might have already
1969 * inserted a page table, if racing against shmem/file collapse.
1971 if (!pmd_none(*new_pmd)) {
1972 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1977 * We don't have to worry about the ordering of src and dst
1978 * ptlocks because exclusive mmap_lock prevents deadlock.
1980 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1982 new_ptl = pmd_lockptr(mm, new_pmd);
1983 if (new_ptl != old_ptl)
1984 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1985 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1986 if (pmd_present(pmd))
1988 VM_BUG_ON(!pmd_none(*new_pmd));
1990 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1992 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1993 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1995 pmd = move_soft_dirty_pmd(pmd);
1996 set_pmd_at(mm, new_addr, new_pmd, pmd);
1998 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1999 if (new_ptl != old_ptl)
2000 spin_unlock(new_ptl);
2001 spin_unlock(old_ptl);
2009 * - 0 if PMD could not be locked
2010 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2011 * or if prot_numa but THP migration is not supported
2012 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
2014 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2015 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2016 unsigned long cp_flags)
2018 struct mm_struct *mm = vma->vm_mm;
2020 pmd_t oldpmd, entry;
2021 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2022 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2023 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2026 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2028 if (prot_numa && !thp_migration_supported())
2031 ptl = __pmd_trans_huge_lock(pmd, vma);
2035 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2036 if (is_swap_pmd(*pmd)) {
2037 swp_entry_t entry = pmd_to_swp_entry(*pmd);
2038 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
2041 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
2042 if (is_writable_migration_entry(entry)) {
2044 * A protection check is difficult so
2045 * just be safe and disable write
2047 if (folio_test_anon(folio))
2048 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2050 entry = make_readable_migration_entry(swp_offset(entry));
2051 newpmd = swp_entry_to_pmd(entry);
2052 if (pmd_swp_soft_dirty(*pmd))
2053 newpmd = pmd_swp_mksoft_dirty(newpmd);
2059 newpmd = pmd_swp_mkuffd_wp(newpmd);
2060 else if (uffd_wp_resolve)
2061 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2062 if (!pmd_same(*pmd, newpmd))
2063 set_pmd_at(mm, addr, pmd, newpmd);
2069 struct folio *folio;
2072 * Avoid trapping faults against the zero page. The read-only
2073 * data is likely to be read-cached on the local CPU and
2074 * local/remote hits to the zero page are not interesting.
2076 if (is_huge_zero_pmd(*pmd))
2079 if (pmd_protnone(*pmd))
2082 folio = page_folio(pmd_page(*pmd));
2083 toptier = node_is_toptier(folio_nid(folio));
2085 * Skip scanning top tier node if normal numa
2086 * balancing is disabled
2088 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
2092 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
2094 folio_xchg_access_time(folio,
2095 jiffies_to_msecs(jiffies));
2098 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2099 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2100 * which is also under mmap_read_lock(mm):
2103 * change_huge_pmd(prot_numa=1)
2104 * pmdp_huge_get_and_clear_notify()
2105 * madvise_dontneed()
2107 * pmd_trans_huge(*pmd) == 0 (without ptl)
2110 * // pmd is re-established
2112 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2113 * which may break userspace.
2115 * pmdp_invalidate_ad() is required to make sure we don't miss
2116 * dirty/young flags set by hardware.
2118 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2120 entry = pmd_modify(oldpmd, newprot);
2122 entry = pmd_mkuffd_wp(entry);
2123 else if (uffd_wp_resolve)
2125 * Leave the write bit to be handled by PF interrupt
2126 * handler, then things like COW could be properly
2129 entry = pmd_clear_uffd_wp(entry);
2131 /* See change_pte_range(). */
2132 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2133 can_change_pmd_writable(vma, addr, entry))
2134 entry = pmd_mkwrite(entry, vma);
2137 set_pmd_at(mm, addr, pmd, entry);
2139 if (huge_pmd_needs_flush(oldpmd, entry))
2140 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2146 #ifdef CONFIG_USERFAULTFD
2148 * The PT lock for src_pmd and the mmap_lock for reading are held by
2149 * the caller, but it must return after releasing the page_table_lock.
2150 * Just move the page from src_pmd to dst_pmd if possible.
2151 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2152 * repeated by the caller, or other errors in case of failure.
2154 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2155 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2156 unsigned long dst_addr, unsigned long src_addr)
2158 pmd_t _dst_pmd, src_pmdval;
2159 struct page *src_page;
2160 struct folio *src_folio;
2161 struct anon_vma *src_anon_vma;
2162 spinlock_t *src_ptl, *dst_ptl;
2163 pgtable_t src_pgtable;
2164 struct mmu_notifier_range range;
2167 src_pmdval = *src_pmd;
2168 src_ptl = pmd_lockptr(mm, src_pmd);
2170 lockdep_assert_held(src_ptl);
2171 mmap_assert_locked(mm);
2173 /* Sanity checks before the operation */
2174 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2175 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2176 spin_unlock(src_ptl);
2180 if (!pmd_trans_huge(src_pmdval)) {
2181 spin_unlock(src_ptl);
2182 if (is_pmd_migration_entry(src_pmdval)) {
2183 pmd_migration_entry_wait(mm, &src_pmdval);
2189 src_page = pmd_page(src_pmdval);
2190 if (unlikely(!PageAnonExclusive(src_page))) {
2191 spin_unlock(src_ptl);
2195 src_folio = page_folio(src_page);
2196 folio_get(src_folio);
2197 spin_unlock(src_ptl);
2199 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2200 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2201 src_addr + HPAGE_PMD_SIZE);
2202 mmu_notifier_invalidate_range_start(&range);
2204 folio_lock(src_folio);
2207 * split_huge_page walks the anon_vma chain without the page
2208 * lock. Serialize against it with the anon_vma lock, the page
2209 * lock is not enough.
2211 src_anon_vma = folio_get_anon_vma(src_folio);
2212 if (!src_anon_vma) {
2216 anon_vma_lock_write(src_anon_vma);
2218 dst_ptl = pmd_lockptr(mm, dst_pmd);
2219 double_pt_lock(src_ptl, dst_ptl);
2220 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2221 !pmd_same(*dst_pmd, dst_pmdval))) {
2225 if (folio_maybe_dma_pinned(src_folio) ||
2226 !PageAnonExclusive(&src_folio->page)) {
2231 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2232 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2237 folio_move_anon_rmap(src_folio, dst_vma);
2238 WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
2240 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2241 /* Folio got pinned from under us. Put it back and fail the move. */
2242 if (folio_maybe_dma_pinned(src_folio)) {
2243 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2248 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2249 /* Follow mremap() behavior and treat the entry dirty after the move */
2250 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2251 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2253 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2254 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2256 double_pt_unlock(src_ptl, dst_ptl);
2257 anon_vma_unlock_write(src_anon_vma);
2258 put_anon_vma(src_anon_vma);
2260 /* unblock rmap walks */
2261 folio_unlock(src_folio);
2262 mmu_notifier_invalidate_range_end(&range);
2263 folio_put(src_folio);
2266 #endif /* CONFIG_USERFAULTFD */
2269 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2271 * Note that if it returns page table lock pointer, this routine returns without
2272 * unlocking page table lock. So callers must unlock it.
2274 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2277 ptl = pmd_lock(vma->vm_mm, pmd);
2278 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2286 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2288 * Note that if it returns page table lock pointer, this routine returns without
2289 * unlocking page table lock. So callers must unlock it.
2291 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2295 ptl = pud_lock(vma->vm_mm, pud);
2296 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2302 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2303 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2304 pud_t *pud, unsigned long addr)
2308 ptl = __pud_trans_huge_lock(pud, vma);
2312 pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2313 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2314 if (vma_is_special_huge(vma)) {
2316 /* No zero page support yet */
2318 /* No support for anonymous PUD pages yet */
2324 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2325 unsigned long haddr)
2327 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2328 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2329 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2330 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2332 count_vm_event(THP_SPLIT_PUD);
2334 pudp_huge_clear_flush(vma, haddr, pud);
2337 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2338 unsigned long address)
2341 struct mmu_notifier_range range;
2343 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2344 address & HPAGE_PUD_MASK,
2345 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2346 mmu_notifier_invalidate_range_start(&range);
2347 ptl = pud_lock(vma->vm_mm, pud);
2348 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2350 __split_huge_pud_locked(vma, pud, range.start);
2354 mmu_notifier_invalidate_range_end(&range);
2356 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2358 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2359 unsigned long haddr, pmd_t *pmd)
2361 struct mm_struct *mm = vma->vm_mm;
2363 pmd_t _pmd, old_pmd;
2369 * Leave pmd empty until pte is filled note that it is fine to delay
2370 * notification until mmu_notifier_invalidate_range_end() as we are
2371 * replacing a zero pmd write protected page with a zero pte write
2374 * See Documentation/mm/mmu_notifier.rst
2376 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2378 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2379 pmd_populate(mm, &_pmd, pgtable);
2381 pte = pte_offset_map(&_pmd, haddr);
2383 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2386 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2387 entry = pte_mkspecial(entry);
2388 if (pmd_uffd_wp(old_pmd))
2389 entry = pte_mkuffd_wp(entry);
2390 VM_BUG_ON(!pte_none(ptep_get(pte)));
2391 set_pte_at(mm, addr, pte, entry);
2395 smp_wmb(); /* make pte visible before pmd */
2396 pmd_populate(mm, pmd, pgtable);
2399 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2400 unsigned long haddr, bool freeze)
2402 struct mm_struct *mm = vma->vm_mm;
2403 struct folio *folio;
2406 pmd_t old_pmd, _pmd;
2407 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2408 bool anon_exclusive = false, dirty = false;
2413 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2414 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2415 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2416 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2417 && !pmd_devmap(*pmd));
2419 count_vm_event(THP_SPLIT_PMD);
2421 if (!vma_is_anonymous(vma)) {
2422 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2424 * We are going to unmap this huge page. So
2425 * just go ahead and zap it
2427 if (arch_needs_pgtable_deposit())
2428 zap_deposited_table(mm, pmd);
2429 if (vma_is_special_huge(vma))
2431 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2434 entry = pmd_to_swp_entry(old_pmd);
2435 page = pfn_swap_entry_to_page(entry);
2437 page = pmd_page(old_pmd);
2438 folio = page_folio(page);
2439 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
2440 folio_set_dirty(folio);
2441 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2442 folio_set_referenced(folio);
2443 folio_remove_rmap_pmd(folio, page, vma);
2446 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2450 if (is_huge_zero_pmd(*pmd)) {
2452 * FIXME: Do we want to invalidate secondary mmu by calling
2453 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2454 * inside __split_huge_pmd() ?
2456 * We are going from a zero huge page write protected to zero
2457 * small page also write protected so it does not seems useful
2458 * to invalidate secondary mmu at this time.
2460 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2464 * Up to this point the pmd is present and huge and userland has the
2465 * whole access to the hugepage during the split (which happens in
2466 * place). If we overwrite the pmd with the not-huge version pointing
2467 * to the pte here (which of course we could if all CPUs were bug
2468 * free), userland could trigger a small page size TLB miss on the
2469 * small sized TLB while the hugepage TLB entry is still established in
2470 * the huge TLB. Some CPU doesn't like that.
2471 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2472 * 383 on page 105. Intel should be safe but is also warns that it's
2473 * only safe if the permission and cache attributes of the two entries
2474 * loaded in the two TLB is identical (which should be the case here).
2475 * But it is generally safer to never allow small and huge TLB entries
2476 * for the same virtual address to be loaded simultaneously. So instead
2477 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2478 * current pmd notpresent (atomically because here the pmd_trans_huge
2479 * must remain set at all times on the pmd until the split is complete
2480 * for this pmd), then we flush the SMP TLB and finally we write the
2481 * non-huge version of the pmd entry with pmd_populate.
2483 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2485 pmd_migration = is_pmd_migration_entry(old_pmd);
2486 if (unlikely(pmd_migration)) {
2489 entry = pmd_to_swp_entry(old_pmd);
2490 page = pfn_swap_entry_to_page(entry);
2491 write = is_writable_migration_entry(entry);
2493 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2494 young = is_migration_entry_young(entry);
2495 dirty = is_migration_entry_dirty(entry);
2496 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2497 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2499 page = pmd_page(old_pmd);
2500 folio = page_folio(page);
2501 if (pmd_dirty(old_pmd)) {
2503 folio_set_dirty(folio);
2505 write = pmd_write(old_pmd);
2506 young = pmd_young(old_pmd);
2507 soft_dirty = pmd_soft_dirty(old_pmd);
2508 uffd_wp = pmd_uffd_wp(old_pmd);
2510 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2511 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2514 * Without "freeze", we'll simply split the PMD, propagating the
2515 * PageAnonExclusive() flag for each PTE by setting it for
2516 * each subpage -- no need to (temporarily) clear.
2518 * With "freeze" we want to replace mapped pages by
2519 * migration entries right away. This is only possible if we
2520 * managed to clear PageAnonExclusive() -- see
2521 * set_pmd_migration_entry().
2523 * In case we cannot clear PageAnonExclusive(), split the PMD
2524 * only and let try_to_migrate_one() fail later.
2526 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
2528 anon_exclusive = PageAnonExclusive(page);
2529 if (freeze && anon_exclusive &&
2530 folio_try_share_anon_rmap_pmd(folio, page))
2533 rmap_t rmap_flags = RMAP_NONE;
2535 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2537 rmap_flags |= RMAP_EXCLUSIVE;
2538 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2539 vma, haddr, rmap_flags);
2544 * Withdraw the table only after we mark the pmd entry invalid.
2545 * This's critical for some architectures (Power).
2547 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2548 pmd_populate(mm, &_pmd, pgtable);
2550 pte = pte_offset_map(&_pmd, haddr);
2552 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2555 * Note that NUMA hinting access restrictions are not
2556 * transferred to avoid any possibility of altering
2557 * permissions across VMAs.
2559 if (freeze || pmd_migration) {
2560 swp_entry_t swp_entry;
2562 swp_entry = make_writable_migration_entry(
2563 page_to_pfn(page + i));
2564 else if (anon_exclusive)
2565 swp_entry = make_readable_exclusive_migration_entry(
2566 page_to_pfn(page + i));
2568 swp_entry = make_readable_migration_entry(
2569 page_to_pfn(page + i));
2571 swp_entry = make_migration_entry_young(swp_entry);
2573 swp_entry = make_migration_entry_dirty(swp_entry);
2574 entry = swp_entry_to_pte(swp_entry);
2576 entry = pte_swp_mksoft_dirty(entry);
2578 entry = pte_swp_mkuffd_wp(entry);
2580 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2582 entry = pte_mkwrite(entry, vma);
2584 entry = pte_mkold(entry);
2585 /* NOTE: this may set soft-dirty too on some archs */
2587 entry = pte_mkdirty(entry);
2589 entry = pte_mksoft_dirty(entry);
2591 entry = pte_mkuffd_wp(entry);
2593 VM_BUG_ON(!pte_none(ptep_get(pte)));
2594 set_pte_at(mm, addr, pte, entry);
2600 folio_remove_rmap_pmd(folio, page, vma);
2604 smp_wmb(); /* make pte visible before pmd */
2605 pmd_populate(mm, pmd, pgtable);
2608 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2609 unsigned long address, bool freeze, struct folio *folio)
2612 struct mmu_notifier_range range;
2614 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2615 address & HPAGE_PMD_MASK,
2616 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2617 mmu_notifier_invalidate_range_start(&range);
2618 ptl = pmd_lock(vma->vm_mm, pmd);
2621 * If caller asks to setup a migration entry, we need a folio to check
2622 * pmd against. Otherwise we can end up replacing wrong folio.
2624 VM_BUG_ON(freeze && !folio);
2625 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2627 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2628 is_pmd_migration_entry(*pmd)) {
2630 * It's safe to call pmd_page when folio is set because it's
2631 * guaranteed that pmd is present.
2633 if (folio && folio != page_folio(pmd_page(*pmd)))
2635 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2640 mmu_notifier_invalidate_range_end(&range);
2643 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2644 bool freeze, struct folio *folio)
2646 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2651 __split_huge_pmd(vma, pmd, address, freeze, folio);
2654 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2657 * If the new address isn't hpage aligned and it could previously
2658 * contain an hugepage: check if we need to split an huge pmd.
2660 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2661 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2662 ALIGN(address, HPAGE_PMD_SIZE)))
2663 split_huge_pmd_address(vma, address, false, NULL);
2666 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2667 unsigned long start,
2671 /* Check if we need to split start first. */
2672 split_huge_pmd_if_needed(vma, start);
2674 /* Check if we need to split end next. */
2675 split_huge_pmd_if_needed(vma, end);
2678 * If we're also updating the next vma vm_start,
2679 * check if we need to split it.
2681 if (adjust_next > 0) {
2682 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2683 unsigned long nstart = next->vm_start;
2684 nstart += adjust_next;
2685 split_huge_pmd_if_needed(next, nstart);
2689 static void unmap_folio(struct folio *folio)
2691 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2692 TTU_SYNC | TTU_BATCH_FLUSH;
2694 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2697 * Anon pages need migration entries to preserve them, but file
2698 * pages can simply be left unmapped, then faulted back on demand.
2699 * If that is ever changed (perhaps for mlock), update remap_page().
2701 if (folio_test_anon(folio))
2702 try_to_migrate(folio, ttu_flags);
2704 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2706 try_to_unmap_flush();
2709 static void remap_page(struct folio *folio, unsigned long nr)
2713 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2714 if (!folio_test_anon(folio))
2717 remove_migration_ptes(folio, folio, true);
2718 i += folio_nr_pages(folio);
2721 folio = folio_next(folio);
2725 static void lru_add_page_tail(struct page *head, struct page *tail,
2726 struct lruvec *lruvec, struct list_head *list)
2728 VM_BUG_ON_PAGE(!PageHead(head), head);
2729 VM_BUG_ON_PAGE(PageCompound(tail), head);
2730 VM_BUG_ON_PAGE(PageLRU(tail), head);
2731 lockdep_assert_held(&lruvec->lru_lock);
2734 /* page reclaim is reclaiming a huge page */
2735 VM_WARN_ON(PageLRU(head));
2737 list_add_tail(&tail->lru, list);
2739 /* head is still on lru (and we have it frozen) */
2740 VM_WARN_ON(!PageLRU(head));
2741 if (PageUnevictable(tail))
2742 tail->mlock_count = 0;
2744 list_add_tail(&tail->lru, &head->lru);
2749 static void __split_huge_page_tail(struct folio *folio, int tail,
2750 struct lruvec *lruvec, struct list_head *list)
2752 struct page *head = &folio->page;
2753 struct page *page_tail = head + tail;
2755 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2756 * Don't pass it around before clear_compound_head().
2758 struct folio *new_folio = (struct folio *)page_tail;
2760 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2763 * Clone page flags before unfreezing refcount.
2765 * After successful get_page_unless_zero() might follow flags change,
2766 * for example lock_page() which set PG_waiters.
2768 * Note that for mapped sub-pages of an anonymous THP,
2769 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2770 * the migration entry instead from where remap_page() will restore it.
2771 * We can still have PG_anon_exclusive set on effectively unmapped and
2772 * unreferenced sub-pages of an anonymous THP: we can simply drop
2773 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2775 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2776 page_tail->flags |= (head->flags &
2777 ((1L << PG_referenced) |
2778 (1L << PG_swapbacked) |
2779 (1L << PG_swapcache) |
2780 (1L << PG_mlocked) |
2781 (1L << PG_uptodate) |
2783 (1L << PG_workingset) |
2785 (1L << PG_unevictable) |
2786 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2791 LRU_GEN_MASK | LRU_REFS_MASK));
2793 /* ->mapping in first and second tail page is replaced by other uses */
2794 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2796 page_tail->mapping = head->mapping;
2797 page_tail->index = head->index + tail;
2800 * page->private should not be set in tail pages. Fix up and warn once
2801 * if private is unexpectedly set.
2803 if (unlikely(page_tail->private)) {
2804 VM_WARN_ON_ONCE_PAGE(true, page_tail);
2805 page_tail->private = 0;
2807 if (folio_test_swapcache(folio))
2808 new_folio->swap.val = folio->swap.val + tail;
2810 /* Page flags must be visible before we make the page non-compound. */
2814 * Clear PageTail before unfreezing page refcount.
2816 * After successful get_page_unless_zero() might follow put_page()
2817 * which needs correct compound_head().
2819 clear_compound_head(page_tail);
2821 /* Finally unfreeze refcount. Additional reference from page cache. */
2822 page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
2823 folio_test_swapcache(folio)));
2825 if (folio_test_young(folio))
2826 folio_set_young(new_folio);
2827 if (folio_test_idle(folio))
2828 folio_set_idle(new_folio);
2830 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
2833 * always add to the tail because some iterators expect new
2834 * pages to show after the currently processed elements - e.g.
2837 lru_add_page_tail(head, page_tail, lruvec, list);
2840 static void __split_huge_page(struct page *page, struct list_head *list,
2843 struct folio *folio = page_folio(page);
2844 struct page *head = &folio->page;
2845 struct lruvec *lruvec;
2846 struct address_space *swap_cache = NULL;
2847 unsigned long offset = 0;
2848 unsigned int nr = thp_nr_pages(head);
2849 int i, nr_dropped = 0;
2851 /* complete memcg works before add pages to LRU */
2852 split_page_memcg(head, nr);
2854 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2855 offset = swp_offset(folio->swap);
2856 swap_cache = swap_address_space(folio->swap);
2857 xa_lock(&swap_cache->i_pages);
2860 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2861 lruvec = folio_lruvec_lock(folio);
2863 ClearPageHasHWPoisoned(head);
2865 for (i = nr - 1; i >= 1; i--) {
2866 __split_huge_page_tail(folio, i, lruvec, list);
2867 /* Some pages can be beyond EOF: drop them from page cache */
2868 if (head[i].index >= end) {
2869 struct folio *tail = page_folio(head + i);
2871 if (shmem_mapping(head->mapping))
2873 else if (folio_test_clear_dirty(tail))
2874 folio_account_cleaned(tail,
2875 inode_to_wb(folio->mapping->host));
2876 __filemap_remove_folio(tail, NULL);
2878 } else if (!PageAnon(page)) {
2879 __xa_store(&head->mapping->i_pages, head[i].index,
2881 } else if (swap_cache) {
2882 __xa_store(&swap_cache->i_pages, offset + i,
2887 ClearPageCompound(head);
2888 unlock_page_lruvec(lruvec);
2889 /* Caller disabled irqs, so they are still disabled here */
2891 split_page_owner(head, nr);
2893 /* See comment in __split_huge_page_tail() */
2894 if (PageAnon(head)) {
2895 /* Additional pin to swap cache */
2896 if (PageSwapCache(head)) {
2897 page_ref_add(head, 2);
2898 xa_unlock(&swap_cache->i_pages);
2903 /* Additional pin to page cache */
2904 page_ref_add(head, 2);
2905 xa_unlock(&head->mapping->i_pages);
2910 shmem_uncharge(head->mapping->host, nr_dropped);
2911 remap_page(folio, nr);
2913 if (folio_test_swapcache(folio))
2914 split_swap_cluster(folio->swap);
2916 for (i = 0; i < nr; i++) {
2917 struct page *subpage = head + i;
2918 if (subpage == page)
2920 unlock_page(subpage);
2923 * Subpages may be freed if there wasn't any mapping
2924 * like if add_to_swap() is running on a lru page that
2925 * had its mapping zapped. And freeing these pages
2926 * requires taking the lru_lock so we do the put_page
2927 * of the tail pages after the split is complete.
2929 free_page_and_swap_cache(subpage);
2933 /* Racy check whether the huge page can be split */
2934 bool can_split_folio(struct folio *folio, int *pextra_pins)
2938 /* Additional pins from page cache */
2939 if (folio_test_anon(folio))
2940 extra_pins = folio_test_swapcache(folio) ?
2941 folio_nr_pages(folio) : 0;
2943 extra_pins = folio_nr_pages(folio);
2945 *pextra_pins = extra_pins;
2946 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2950 * This function splits huge page into normal pages. @page can point to any
2951 * subpage of huge page to split. Split doesn't change the position of @page.
2953 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2954 * The huge page must be locked.
2956 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2958 * Both head page and tail pages will inherit mapping, flags, and so on from
2961 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2962 * they are not mapped.
2964 * Returns 0 if the hugepage is split successfully.
2965 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2968 int split_huge_page_to_list(struct page *page, struct list_head *list)
2970 struct folio *folio = page_folio(page);
2971 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2972 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
2973 struct anon_vma *anon_vma = NULL;
2974 struct address_space *mapping = NULL;
2975 int extra_pins, ret;
2979 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2980 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2982 is_hzp = is_huge_zero_page(&folio->page);
2984 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
2988 if (folio_test_writeback(folio))
2991 if (folio_test_anon(folio)) {
2993 * The caller does not necessarily hold an mmap_lock that would
2994 * prevent the anon_vma disappearing so we first we take a
2995 * reference to it and then lock the anon_vma for write. This
2996 * is similar to folio_lock_anon_vma_read except the write lock
2997 * is taken to serialise against parallel split or collapse
3000 anon_vma = folio_get_anon_vma(folio);
3007 anon_vma_lock_write(anon_vma);
3011 mapping = folio->mapping;
3019 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3022 if (!filemap_release_folio(folio, gfp)) {
3027 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
3028 if (xas_error(&xas)) {
3029 ret = xas_error(&xas);
3034 i_mmap_lock_read(mapping);
3037 *__split_huge_page() may need to trim off pages beyond EOF:
3038 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3039 * which cannot be nested inside the page tree lock. So note
3040 * end now: i_size itself may be changed at any moment, but
3041 * folio lock is good enough to serialize the trimming.
3043 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3044 if (shmem_mapping(mapping))
3045 end = shmem_fallocend(mapping->host, end);
3049 * Racy check if we can split the page, before unmap_folio() will
3052 if (!can_split_folio(folio, &extra_pins)) {
3059 /* block interrupt reentry in xa_lock and spinlock */
3060 local_irq_disable();
3063 * Check if the folio is present in page cache.
3064 * We assume all tail are present too, if folio is there.
3068 if (xas_load(&xas) != folio)
3072 /* Prevent deferred_split_scan() touching ->_refcount */
3073 spin_lock(&ds_queue->split_queue_lock);
3074 if (folio_ref_freeze(folio, 1 + extra_pins)) {
3075 if (!list_empty(&folio->_deferred_list)) {
3076 ds_queue->split_queue_len--;
3077 list_del(&folio->_deferred_list);
3079 spin_unlock(&ds_queue->split_queue_lock);
3081 int nr = folio_nr_pages(folio);
3083 xas_split(&xas, folio, folio_order(folio));
3084 if (folio_test_pmd_mappable(folio)) {
3085 if (folio_test_swapbacked(folio)) {
3086 __lruvec_stat_mod_folio(folio,
3087 NR_SHMEM_THPS, -nr);
3089 __lruvec_stat_mod_folio(folio,
3091 filemap_nr_thps_dec(mapping);
3096 __split_huge_page(page, list, end);
3099 spin_unlock(&ds_queue->split_queue_lock);
3104 remap_page(folio, folio_nr_pages(folio));
3110 anon_vma_unlock_write(anon_vma);
3111 put_anon_vma(anon_vma);
3114 i_mmap_unlock_read(mapping);
3117 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3121 void folio_undo_large_rmappable(struct folio *folio)
3123 struct deferred_split *ds_queue;
3124 unsigned long flags;
3127 * At this point, there is no one trying to add the folio to
3128 * deferred_list. If folio is not in deferred_list, it's safe
3129 * to check without acquiring the split_queue_lock.
3131 if (data_race(list_empty(&folio->_deferred_list)))
3134 ds_queue = get_deferred_split_queue(folio);
3135 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3136 if (!list_empty(&folio->_deferred_list)) {
3137 ds_queue->split_queue_len--;
3138 list_del_init(&folio->_deferred_list);
3140 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3143 void deferred_split_folio(struct folio *folio)
3145 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3147 struct mem_cgroup *memcg = folio_memcg(folio);
3149 unsigned long flags;
3151 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
3154 * The try_to_unmap() in page reclaim path might reach here too,
3155 * this may cause a race condition to corrupt deferred split queue.
3156 * And, if page reclaim is already handling the same folio, it is
3157 * unnecessary to handle it again in shrinker.
3159 * Check the swapcache flag to determine if the folio is being
3160 * handled by page reclaim since THP swap would add the folio into
3161 * swap cache before calling try_to_unmap().
3163 if (folio_test_swapcache(folio))
3166 if (!list_empty(&folio->_deferred_list))
3169 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3170 if (list_empty(&folio->_deferred_list)) {
3171 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
3172 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
3173 ds_queue->split_queue_len++;
3176 set_shrinker_bit(memcg, folio_nid(folio),
3177 deferred_split_shrinker->id);
3180 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3183 static unsigned long deferred_split_count(struct shrinker *shrink,
3184 struct shrink_control *sc)
3186 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3187 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3191 ds_queue = &sc->memcg->deferred_split_queue;
3193 return READ_ONCE(ds_queue->split_queue_len);
3196 static unsigned long deferred_split_scan(struct shrinker *shrink,
3197 struct shrink_control *sc)
3199 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3200 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3201 unsigned long flags;
3203 struct folio *folio, *next;
3208 ds_queue = &sc->memcg->deferred_split_queue;
3211 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3212 /* Take pin on all head pages to avoid freeing them under us */
3213 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3215 if (folio_try_get(folio)) {
3216 list_move(&folio->_deferred_list, &list);
3218 /* We lost race with folio_put() */
3219 list_del_init(&folio->_deferred_list);
3220 ds_queue->split_queue_len--;
3222 if (!--sc->nr_to_scan)
3225 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3227 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3228 if (!folio_trylock(folio))
3230 /* split_huge_page() removes page from list on success */
3231 if (!split_folio(folio))
3233 folio_unlock(folio);
3238 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3239 list_splice_tail(&list, &ds_queue->split_queue);
3240 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3243 * Stop shrinker if we didn't split any page, but the queue is empty.
3244 * This can happen if pages were freed under us.
3246 if (!split && list_empty(&ds_queue->split_queue))
3251 #ifdef CONFIG_DEBUG_FS
3252 static void split_huge_pages_all(void)
3256 struct folio *folio;
3257 unsigned long pfn, max_zone_pfn;
3258 unsigned long total = 0, split = 0;
3260 pr_debug("Split all THPs\n");
3261 for_each_zone(zone) {
3262 if (!managed_zone(zone))
3264 max_zone_pfn = zone_end_pfn(zone);
3265 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3268 page = pfn_to_online_page(pfn);
3269 if (!page || PageTail(page))
3271 folio = page_folio(page);
3272 if (!folio_try_get(folio))
3275 if (unlikely(page_folio(page) != folio))
3278 if (zone != folio_zone(folio))
3281 if (!folio_test_large(folio)
3282 || folio_test_hugetlb(folio)
3283 || !folio_test_lru(folio))
3288 nr_pages = folio_nr_pages(folio);
3289 if (!split_folio(folio))
3291 pfn += nr_pages - 1;
3292 folio_unlock(folio);
3299 pr_debug("%lu of %lu THP split\n", split, total);
3302 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3304 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3305 is_vm_hugetlb_page(vma);
3308 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3309 unsigned long vaddr_end)
3312 struct task_struct *task;
3313 struct mm_struct *mm;
3314 unsigned long total = 0, split = 0;
3317 vaddr_start &= PAGE_MASK;
3318 vaddr_end &= PAGE_MASK;
3320 /* Find the task_struct from pid */
3322 task = find_task_by_vpid(pid);
3328 get_task_struct(task);
3331 /* Find the mm_struct */
3332 mm = get_task_mm(task);
3333 put_task_struct(task);
3340 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3341 pid, vaddr_start, vaddr_end);
3345 * always increase addr by PAGE_SIZE, since we could have a PTE page
3346 * table filled with PTE-mapped THPs, each of which is distinct.
3348 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3349 struct vm_area_struct *vma = vma_lookup(mm, addr);
3351 struct folio *folio;
3356 /* skip special VMA and hugetlb VMA */
3357 if (vma_not_suitable_for_thp_split(vma)) {
3362 /* FOLL_DUMP to ignore special (like zero) pages */
3363 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3365 if (IS_ERR_OR_NULL(page))
3368 folio = page_folio(page);
3369 if (!is_transparent_hugepage(folio))
3373 if (!can_split_folio(folio, NULL))
3376 if (!folio_trylock(folio))
3379 if (!split_folio(folio))
3382 folio_unlock(folio);
3387 mmap_read_unlock(mm);
3390 pr_debug("%lu of %lu THP split\n", split, total);
3396 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3399 struct filename *file;
3400 struct file *candidate;
3401 struct address_space *mapping;
3405 unsigned long total = 0, split = 0;
3407 file = getname_kernel(file_path);
3411 candidate = file_open_name(file, O_RDONLY, 0);
3412 if (IS_ERR(candidate))
3415 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3416 file_path, off_start, off_end);
3418 mapping = candidate->f_mapping;
3420 for (index = off_start; index < off_end; index += nr_pages) {
3421 struct folio *folio = filemap_get_folio(mapping, index);
3427 if (!folio_test_large(folio))
3431 nr_pages = folio_nr_pages(folio);
3433 if (!folio_trylock(folio))
3436 if (!split_folio(folio))
3439 folio_unlock(folio);
3445 filp_close(candidate, NULL);
3448 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3454 #define MAX_INPUT_BUF_SZ 255
3456 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3457 size_t count, loff_t *ppops)
3459 static DEFINE_MUTEX(split_debug_mutex);
3461 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3462 char input_buf[MAX_INPUT_BUF_SZ];
3464 unsigned long vaddr_start, vaddr_end;
3466 ret = mutex_lock_interruptible(&split_debug_mutex);
3472 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3473 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3476 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3478 if (input_buf[0] == '/') {
3480 char *buf = input_buf;
3481 char file_path[MAX_INPUT_BUF_SZ];
3482 pgoff_t off_start = 0, off_end = 0;
3483 size_t input_len = strlen(input_buf);
3485 tok = strsep(&buf, ",");
3487 strcpy(file_path, tok);
3493 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3498 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3505 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3506 if (ret == 1 && pid == 1) {
3507 split_huge_pages_all();
3508 ret = strlen(input_buf);
3510 } else if (ret != 3) {
3515 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3517 ret = strlen(input_buf);
3519 mutex_unlock(&split_debug_mutex);
3524 static const struct file_operations split_huge_pages_fops = {
3525 .owner = THIS_MODULE,
3526 .write = split_huge_pages_write,
3527 .llseek = no_llseek,
3530 static int __init split_huge_pages_debugfs(void)
3532 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3533 &split_huge_pages_fops);
3536 late_initcall(split_huge_pages_debugfs);
3539 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3540 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3543 struct folio *folio = page_folio(page);
3544 struct vm_area_struct *vma = pvmw->vma;
3545 struct mm_struct *mm = vma->vm_mm;
3546 unsigned long address = pvmw->address;
3547 bool anon_exclusive;
3552 if (!(pvmw->pmd && !pvmw->pte))
3555 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3556 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3558 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
3559 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
3560 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
3561 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3565 if (pmd_dirty(pmdval))
3566 folio_set_dirty(folio);
3567 if (pmd_write(pmdval))
3568 entry = make_writable_migration_entry(page_to_pfn(page));
3569 else if (anon_exclusive)
3570 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3572 entry = make_readable_migration_entry(page_to_pfn(page));
3573 if (pmd_young(pmdval))
3574 entry = make_migration_entry_young(entry);
3575 if (pmd_dirty(pmdval))
3576 entry = make_migration_entry_dirty(entry);
3577 pmdswp = swp_entry_to_pmd(entry);
3578 if (pmd_soft_dirty(pmdval))
3579 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3580 if (pmd_uffd_wp(pmdval))
3581 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
3582 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3583 folio_remove_rmap_pmd(folio, page, vma);
3585 trace_set_migration_pmd(address, pmd_val(pmdswp));
3590 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3592 struct folio *folio = page_folio(new);
3593 struct vm_area_struct *vma = pvmw->vma;
3594 struct mm_struct *mm = vma->vm_mm;
3595 unsigned long address = pvmw->address;
3596 unsigned long haddr = address & HPAGE_PMD_MASK;
3600 if (!(pvmw->pmd && !pvmw->pte))
3603 entry = pmd_to_swp_entry(*pvmw->pmd);
3605 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3606 if (pmd_swp_soft_dirty(*pvmw->pmd))
3607 pmde = pmd_mksoft_dirty(pmde);
3608 if (is_writable_migration_entry(entry))
3609 pmde = pmd_mkwrite(pmde, vma);
3610 if (pmd_swp_uffd_wp(*pvmw->pmd))
3611 pmde = pmd_mkuffd_wp(pmde);
3612 if (!is_migration_entry_young(entry))
3613 pmde = pmd_mkold(pmde);
3614 /* NOTE: this may contain setting soft-dirty on some archs */
3615 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
3616 pmde = pmd_mkdirty(pmde);
3618 if (folio_test_anon(folio)) {
3619 rmap_t rmap_flags = RMAP_NONE;
3621 if (!is_readable_migration_entry(entry))
3622 rmap_flags |= RMAP_EXCLUSIVE;
3624 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
3626 folio_add_file_rmap_pmd(folio, new, vma);
3628 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
3629 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3631 /* No need to invalidate - it was non-present before */
3632 update_mmu_cache_pmd(vma, address, pvmw->pmd);
3633 trace_remove_migration_pmd(address, pmd_val(pmde));