mm/filemap: Support VM_HUGEPAGE for file mappings
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 25 Jul 2021 03:37:13 +0000 (23:37 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 21 Mar 2022 17:01:36 +0000 (13:01 -0400)
If the VM_HUGEPAGE flag is set, attempt to allocate PMD-sized folios
during readahead, even if we have no history of readahead being
successful.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/filemap.c

index fe76422..7608ee0 100644 (file)
@@ -2995,6 +2995,24 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        struct file *fpin = NULL;
        unsigned int mmap_miss;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       /* Use the readahead code, even if readahead is disabled */
+       if (vmf->vma->vm_flags & VM_HUGEPAGE) {
+               fpin = maybe_unlock_mmap_for_io(vmf, fpin);
+               ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
+               ra->size = HPAGE_PMD_NR;
+               /*
+                * Fetch two PMD folios, so we get the chance to actually
+                * readahead, unless we've been told not to.
+                */
+               if (!(vmf->vma->vm_flags & VM_RAND_READ))
+                       ra->size *= 2;
+               ra->async_size = HPAGE_PMD_NR;
+               page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
+               return fpin;
+       }
+#endif
+
        /* If we don't want any read-ahead, don't bother */
        if (vmf->vma->vm_flags & VM_RAND_READ)
                return fpin;