mm/khugepaged: bypassing unnecessary scans with MMF_DISABLE_THP check
[linux-2.6-microblaze.git] / mm / khugepaged.c
index fe43fbc..2771fc0 100644 (file)
@@ -410,6 +410,12 @@ static inline int hpage_collapse_test_exit(struct mm_struct *mm)
        return atomic_read(&mm->mm_users) == 0;
 }
 
+static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
+{
+       return hpage_collapse_test_exit(mm) ||
+              test_bit(MMF_DISABLE_THP, &mm->flags);
+}
+
 void __khugepaged_enter(struct mm_struct *mm)
 {
        struct khugepaged_mm_slot *mm_slot;
@@ -1422,7 +1428,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
 
        lockdep_assert_held(&khugepaged_mm_lock);
 
-       if (hpage_collapse_test_exit(mm)) {
+       if (hpage_collapse_test_exit_or_disable(mm)) {
                /* free mm_slot */
                hash_del(&slot->hash);
                list_del(&slot->mm_node);
@@ -2360,7 +2366,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
                goto breakouterloop_mmap_lock;
 
        progress++;
-       if (unlikely(hpage_collapse_test_exit(mm)))
+       if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
                goto breakouterloop;
 
        vma_iter_init(&vmi, mm, khugepaged_scan.address);
@@ -2368,7 +2374,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
                unsigned long hstart, hend;
 
                cond_resched();
-               if (unlikely(hpage_collapse_test_exit(mm))) {
+               if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
                        progress++;
                        break;
                }
@@ -2390,7 +2396,7 @@ skip:
                        bool mmap_locked = true;
 
                        cond_resched();
-                       if (unlikely(hpage_collapse_test_exit(mm)))
+                       if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
                                goto breakouterloop;
 
                        VM_BUG_ON(khugepaged_scan.address < hstart ||
@@ -2408,7 +2414,7 @@ skip:
                                fput(file);
                                if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
                                        mmap_read_lock(mm);
-                                       if (hpage_collapse_test_exit(mm))
+                                       if (hpage_collapse_test_exit_or_disable(mm))
                                                goto breakouterloop;
                                        *result = collapse_pte_mapped_thp(mm,
                                                khugepaged_scan.address, false);
@@ -2450,7 +2456,7 @@ breakouterloop_mmap_lock:
         * Release the current mm_slot if this mm is about to die, or
         * if we scanned all vmas of this mm.
         */
-       if (hpage_collapse_test_exit(mm) || !vma) {
+       if (hpage_collapse_test_exit_or_disable(mm) || !vma) {
                /*
                 * Make sure that if mm_users is reaching zero while
                 * khugepaged runs here, khugepaged_exit will find