Merge tag 'fpga-fixes-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mdf...
[linux-2.6-microblaze.git] / mm / khugepaged.c
index c659c68..99d77ff 100644 (file)
@@ -29,6 +29,7 @@ enum scan_result {
        SCAN_PMD_NULL,
        SCAN_EXCEED_NONE_PTE,
        SCAN_PTE_NON_PRESENT,
+       SCAN_PTE_UFFD_WP,
        SCAN_PAGE_RO,
        SCAN_LACK_REFERENCED_PAGE,
        SCAN_PAGE_NULL,
@@ -414,8 +415,6 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
            (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
             vma->vm_file &&
             (vm_flags & VM_DENYWRITE))) {
-               if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
-                       return false;
                return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
                                HPAGE_PMD_NR);
        }
@@ -513,7 +512,7 @@ void __khugepaged_exit(struct mm_struct *mm)
 
 static void release_pte_page(struct page *page)
 {
-       dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
+       dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_lru(page));
        unlock_page(page);
        putback_lru_page(page);
 }
@@ -613,7 +612,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                        goto out;
                }
                inc_node_page_state(page,
-                               NR_ISOLATED_ANON + page_is_file_cache(page));
+                               NR_ISOLATED_ANON + page_is_file_lru(page));
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                VM_BUG_ON_PAGE(PageLRU(page), page);
 
@@ -1139,6 +1138,15 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                pte_t pteval = *_pte;
                if (is_swap_pte(pteval)) {
                        if (++unmapped <= khugepaged_max_ptes_swap) {
+                               /*
+                                * Always be strict with uffd-wp
+                                * enabled swap entries.  Please see
+                                * comment below for pte_uffd_wp().
+                                */
+                               if (pte_swp_uffd_wp(pteval)) {
+                                       result = SCAN_PTE_UFFD_WP;
+                                       goto out_unmap;
+                               }
                                continue;
                        } else {
                                result = SCAN_EXCEED_SWAP_PTE;
@@ -1158,6 +1166,19 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                        result = SCAN_PTE_NON_PRESENT;
                        goto out_unmap;
                }
+               if (pte_uffd_wp(pteval)) {
+                       /*
+                        * Don't collapse the page if any of the small
+                        * PTEs are armed with uffd write protection.
+                        * Here we can also mark the new huge pmd as
+                        * write protected if any of the small ones is
+                        * marked but that could bring uknown
+                        * userfault messages that falls outside of
+                        * the registered range.  So, just be simple.
+                        */
+                       result = SCAN_PTE_UFFD_WP;
+                       goto out_unmap;
+               }
                if (pte_write(pteval))
                        writable = true;
 
@@ -1258,7 +1279,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
        }
 }
 
-#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
+#ifdef CONFIG_SHMEM
 /*
  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
  * khugepaged should try to collapse the page table.
@@ -1973,6 +1994,8 @@ skip:
                if (khugepaged_scan.address < hstart)
                        khugepaged_scan.address = hstart;
                VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+               if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
+                       goto skip;
 
                while (khugepaged_scan.address < hend) {
                        int ret;
@@ -1984,14 +2007,10 @@ skip:
                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
                                  hend);
                        if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
-                               struct file *file;
+                               struct file *file = get_file(vma->vm_file);
                                pgoff_t pgoff = linear_page_index(vma,
                                                khugepaged_scan.address);
 
-                               if (shmem_file(vma->vm_file)
-                                   && !shmem_huge_enabled(vma))
-                                       goto skip;
-                               file = get_file(vma->vm_file);
                                up_read(&mm->mmap_sem);
                                ret = 1;
                                khugepaged_scan_file(mm, file, pgoff, hpage);