Merge tag 'amd-drm-fixes-5.9-2020-08-20' of git://people.freedesktop.org/~agd5f/linux...
[linux-2.6-microblaze.git] / mm / khugepaged.c
index 700f516..15a9af7 100644 (file)
@@ -431,7 +431,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
 
 static inline int khugepaged_test_exit(struct mm_struct *mm)
 {
-       return atomic_read(&mm->mm_users) == 0;
+       return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
 }
 
 static bool hugepage_vma_check(struct vm_area_struct *vma,
@@ -1100,9 +1100,6 @@ static void collapse_huge_page(struct mm_struct *mm,
         * handled by the anon_vma lock + PG_lock.
         */
        mmap_write_lock(mm);
-       result = SCAN_ANY_PROCESS;
-       if (!mmget_still_valid(mm))
-               goto out;
        result = hugepage_vma_revalidate(mm, address, &vma);
        if (result)
                goto out;
@@ -1176,7 +1173,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        spin_lock(pmd_ptl);
        BUG_ON(!pmd_none(*pmd));
        page_add_new_anon_rmap(new_page, vma, address, true);
-       lru_cache_add_active_or_unevictable(new_page, vma);
+       lru_cache_add_inactive_or_unevictable(new_page, vma);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache_pmd(vma, address, pmd);
@@ -1412,7 +1409,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
 {
        unsigned long haddr = addr & HPAGE_PMD_MASK;
        struct vm_area_struct *vma = find_vma(mm, haddr);
-       struct page *hpage = NULL;
+       struct page *hpage;
        pte_t *start_pte, *pte;
        pmd_t *pmd, _pmd;
        spinlock_t *ptl;
@@ -1432,9 +1429,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
        if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
                return;
 
+       hpage = find_lock_page(vma->vm_file->f_mapping,
+                              linear_page_index(vma, haddr));
+       if (!hpage)
+               return;
+
+       if (!PageHead(hpage))
+               goto drop_hpage;
+
        pmd = mm_find_pmd(mm, haddr);
        if (!pmd)
-               return;
+               goto drop_hpage;
 
        start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
 
@@ -1453,30 +1458,11 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
 
                page = vm_normal_page(vma, addr, *pte);
 
-               if (!page || !PageCompound(page))
-                       goto abort;
-
-               if (!hpage) {
-                       hpage = compound_head(page);
-                       /*
-                        * The mapping of the THP should not change.
-                        *
-                        * Note that uprobe, debugger, or MAP_PRIVATE may
-                        * change the page table, but the new page will
-                        * not pass PageCompound() check.
-                        */
-                       if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
-                               goto abort;
-               }
-
                /*
-                * Confirm the page maps to the correct subpage.
-                *
-                * Note that uprobe, debugger, or MAP_PRIVATE may change
-                * the page table, but the new page will not pass
-                * PageCompound() check.
+                * Note that uprobe, debugger, or MAP_PRIVATE may change the
+                * page table, but the new page will not be a subpage of hpage.
                 */
-               if (WARN_ON(hpage + i != page))
+               if (hpage + i != page)
                        goto abort;
                count++;
        }
@@ -1495,21 +1481,26 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
        pte_unmap_unlock(start_pte, ptl);
 
        /* step 3: set proper refcount and mm_counters. */
-       if (hpage) {
+       if (count) {
                page_ref_sub(hpage, count);
                add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
        }
 
        /* step 4: collapse pmd */
        ptl = pmd_lock(vma->vm_mm, pmd);
-       _pmd = pmdp_collapse_flush(vma, addr, pmd);
+       _pmd = pmdp_collapse_flush(vma, haddr, pmd);
        spin_unlock(ptl);
        mm_dec_nr_ptes(mm);
        pte_free(mm, pmd_pgtable(_pmd));
+
+drop_hpage:
+       unlock_page(hpage);
+       put_page(hpage);
        return;
 
 abort:
        pte_unmap_unlock(start_pte, ptl);
+       goto drop_hpage;
 }
 
 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
@@ -1538,6 +1529,7 @@ out:
 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
 {
        struct vm_area_struct *vma;
+       struct mm_struct *mm;
        unsigned long addr;
        pmd_t *pmd, _pmd;
 
@@ -1566,7 +1558,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                        continue;
                if (vma->vm_end < addr + HPAGE_PMD_SIZE)
                        continue;
-               pmd = mm_find_pmd(vma->vm_mm, addr);
+               mm = vma->vm_mm;
+               pmd = mm_find_pmd(mm, addr);
                if (!pmd)
                        continue;
                /*
@@ -1576,17 +1569,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                 * mmap_lock while holding page lock. Fault path does it in
                 * reverse order. Trylock is a way to avoid deadlock.
                 */
-               if (mmap_write_trylock(vma->vm_mm)) {
-                       spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
-                       /* assume page table is clear */
-                       _pmd = pmdp_collapse_flush(vma, addr, pmd);
-                       spin_unlock(ptl);
-                       mmap_write_unlock(vma->vm_mm);
-                       mm_dec_nr_ptes(vma->vm_mm);
-                       pte_free(vma->vm_mm, pmd_pgtable(_pmd));
+               if (mmap_write_trylock(mm)) {
+                       if (!khugepaged_test_exit(mm)) {
+                               spinlock_t *ptl = pmd_lock(mm, pmd);
+                               /* assume page table is clear */
+                               _pmd = pmdp_collapse_flush(vma, addr, pmd);
+                               spin_unlock(ptl);
+                               mm_dec_nr_ptes(mm);
+                               pte_free(mm, pmd_pgtable(_pmd));
+                       }
+                       mmap_write_unlock(mm);
                } else {
                        /* Try again later */
-                       khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
+                       khugepaged_add_pte_mapped_thp(mm, addr);
                }
        }
        i_mmap_unlock_write(mapping);