net: qede: stop adding events on an already destroyed workqueue
[linux-2.6-microblaze.git] / mm / khugepaged.c
index 3f03248..b043c40 100644 (file)
@@ -534,10 +534,10 @@ void __khugepaged_exit(struct mm_struct *mm)
                 * under mmap sem read mode). Stop here (after we
                 * return all pagetables will be destroyed) until
                 * khugepaged has finished working on the pagetables
-                * under the mmap_sem.
+                * under the mmap_lock.
                 */
-               down_write(&mm->mmap_sem);
-               up_write(&mm->mmap_sem);
+               mmap_write_lock(mm);
+               mmap_write_unlock(mm);
        }
 }
 
@@ -933,8 +933,8 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 #endif
 
 /*
- * If mmap_sem temporarily dropped, revalidate vma
- * before taking mmap_sem.
+ * If mmap_lock temporarily dropped, revalidate vma
+ * before taking mmap_lock.
  * Return 0 if succeeds, otherwise return none-zero
  * value (scan code).
  */
@@ -966,7 +966,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
  * Only done if khugepaged_scan_pmd believes it is worthwhile.
  *
  * Called and returns without pte mapped or spinlocks held,
- * but with mmap_sem held to protect against vma changes.
+ * but with mmap_lock held to protect against vma changes.
  */
 
 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
@@ -993,9 +993,9 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                swapped_in++;
                ret = do_swap_page(&vmf);
 
-               /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
+               /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
                if (ret & VM_FAULT_RETRY) {
-                       down_read(&mm->mmap_sem);
+                       mmap_read_lock(mm);
                        if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
                                /* vma is no longer available, don't continue to swapin */
                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
@@ -1047,12 +1047,12 @@ static void collapse_huge_page(struct mm_struct *mm,
        gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 
        /*
-        * Before allocating the hugepage, release the mmap_sem read lock.
+        * Before allocating the hugepage, release the mmap_lock read lock.
         * The allocation can take potentially a long time if it involves
-        * sync compaction, and we do not need to hold the mmap_sem during
+        * sync compaction, and we do not need to hold the mmap_lock during
         * that. We will recheck the vma after taking it again in write mode.
         */
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        new_page = khugepaged_alloc_page(hpage, gfp, node);
        if (!new_page) {
                result = SCAN_ALLOC_HUGE_PAGE_FAIL;
@@ -1065,38 +1065,38 @@ static void collapse_huge_page(struct mm_struct *mm,
        }
        count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        result = hugepage_vma_revalidate(mm, address, &vma);
        if (result) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                goto out_nolock;
        }
 
        pmd = mm_find_pmd(mm, address);
        if (!pmd) {
                result = SCAN_PMD_NULL;
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                goto out_nolock;
        }
 
        /*
-        * __collapse_huge_page_swapin always returns with mmap_sem locked.
-        * If it fails, we release mmap_sem and jump out_nolock.
+        * __collapse_huge_page_swapin always returns with mmap_lock locked.
+        * If it fails, we release mmap_lock and jump out_nolock.
         * Continuing to collapse causes inconsistency.
         */
        if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
                                                     pmd, referenced)) {
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                goto out_nolock;
        }
 
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        /*
         * Prevent all access to pagetables with the exception of
         * gup_fast later handled by the ptep_clear_flush and the VM
         * handled by the anon_vma lock + PG_lock.
         */
-       down_write(&mm->mmap_sem);
+       mmap_write_lock(mm);
        result = SCAN_ANY_PROCESS;
        if (!mmget_still_valid(mm))
                goto out;
@@ -1184,7 +1184,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        khugepaged_pages_collapsed++;
        result = SCAN_SUCCEED;
 out_up_write:
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
 out_nolock:
        if (!IS_ERR_OR_NULL(*hpage))
                mem_cgroup_uncharge(*hpage);
@@ -1345,7 +1345,7 @@ out_unmap:
        pte_unmap_unlock(pte, ptl);
        if (ret) {
                node = khugepaged_find_target_node();
-               /* collapse_huge_page will return with the mmap_sem released */
+               /* collapse_huge_page will return with the mmap_lock released */
                collapse_huge_page(mm, address, hpage, node,
                                referenced, unmapped);
        }
@@ -1517,7 +1517,7 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
        if (likely(mm_slot->nr_pte_mapped_thp == 0))
                return 0;
 
-       if (!down_write_trylock(&mm->mmap_sem))
+       if (!mmap_write_trylock(mm))
                return -EBUSY;
 
        if (unlikely(khugepaged_test_exit(mm)))
@@ -1528,7 +1528,7 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
 
 out:
        mm_slot->nr_pte_mapped_thp = 0;
-       up_write(&mm->mmap_sem);
+       mmap_write_unlock(mm);
        return 0;
 }
 
@@ -1543,11 +1543,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                /*
                 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
                 * got written to. These VMAs are likely not worth investing
-                * down_write(mmap_sem) as PMD-mapping is likely to be split
+                * mmap_write_lock(mm) as PMD-mapping is likely to be split
                 * later.
                 *
                 * Not that vma->anon_vma check is racy: it can be set up after
-                * the check but before we took mmap_sem by the fault path.
+                * the check but before we took mmap_lock by the fault path.
                 * But page lock would prevent establishing any new ptes of the
                 * page, so we are safe.
                 *
@@ -1567,18 +1567,18 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                if (!pmd)
                        continue;
                /*
-                * We need exclusive mmap_sem to retract page table.
+                * We need exclusive mmap_lock to retract page table.
                 *
                 * We use trylock due to lock inversion: we need to acquire
-                * mmap_sem while holding page lock. Fault path does it in
+                * mmap_lock while holding page lock. Fault path does it in
                 * reverse order. Trylock is a way to avoid deadlock.
                 */
-               if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
+               if (mmap_write_trylock(vma->vm_mm)) {
                        spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
                        /* assume page table is clear */
                        _pmd = pmdp_collapse_flush(vma, addr, pmd);
                        spin_unlock(ptl);
-                       up_write(&vma->vm_mm->mmap_sem);
+                       mmap_write_unlock(vma->vm_mm);
                        mm_dec_nr_ptes(vma->vm_mm);
                        pte_free(vma->vm_mm, pmd_pgtable(_pmd));
                } else {
@@ -2057,8 +2057,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
         * the next mm on the list.
         */
        vma = NULL;
-       if (unlikely(!down_read_trylock(&mm->mmap_sem)))
-               goto breakouterloop_mmap_sem;
+       if (unlikely(!mmap_read_trylock(mm)))
+               goto breakouterloop_mmap_lock;
        if (likely(!khugepaged_test_exit(mm)))
                vma = find_vma(mm, khugepaged_scan.address);
 
@@ -2102,7 +2102,7 @@ skip:
                                pgoff_t pgoff = linear_page_index(vma,
                                                khugepaged_scan.address);
 
-                               up_read(&mm->mmap_sem);
+                               mmap_read_unlock(mm);
                                ret = 1;
                                khugepaged_scan_file(mm, file, pgoff, hpage);
                                fput(file);
@@ -2115,15 +2115,15 @@ skip:
                        khugepaged_scan.address += HPAGE_PMD_SIZE;
                        progress += HPAGE_PMD_NR;
                        if (ret)
-                               /* we released mmap_sem so break loop */
-                               goto breakouterloop_mmap_sem;
+                               /* we released mmap_lock so break loop */
+                               goto breakouterloop_mmap_lock;
                        if (progress >= pages)
                                goto breakouterloop;
                }
        }
 breakouterloop:
-       up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
-breakouterloop_mmap_sem:
+       mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
+breakouterloop_mmap_lock:
 
        spin_lock(&khugepaged_mm_lock);
        VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);