net: qede: stop adding events on an already destroyed workqueue
[linux-2.6-microblaze.git] / mm / khugepaged.c
index 19f3401..b043c40 100644 (file)
@@ -534,7 +534,7 @@ void __khugepaged_exit(struct mm_struct *mm)
                 * under mmap sem read mode). Stop here (after we
                 * return all pagetables will be destroyed) until
                 * khugepaged has finished working on the pagetables
-                * under the mmap_sem.
+                * under the mmap_lock.
                 */
                mmap_write_lock(mm);
                mmap_write_unlock(mm);
@@ -933,8 +933,8 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 #endif
 
 /*
- * If mmap_sem temporarily dropped, revalidate vma
- * before taking mmap_sem.
+ * If mmap_lock temporarily dropped, revalidate vma
+ * before taking mmap_lock.
  * Return 0 if succeeds, otherwise return none-zero
  * value (scan code).
  */
@@ -966,7 +966,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
  * Only done if khugepaged_scan_pmd believes it is worthwhile.
  *
  * Called and returns without pte mapped or spinlocks held,
- * but with mmap_sem held to protect against vma changes.
+ * but with mmap_lock held to protect against vma changes.
  */
 
 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
@@ -993,7 +993,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                swapped_in++;
                ret = do_swap_page(&vmf);
 
-               /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
+               /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
                if (ret & VM_FAULT_RETRY) {
                        mmap_read_lock(mm);
                        if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
@@ -1047,9 +1047,9 @@ static void collapse_huge_page(struct mm_struct *mm,
        gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 
        /*
-        * Before allocating the hugepage, release the mmap_sem read lock.
+        * Before allocating the hugepage, release the mmap_lock read lock.
         * The allocation can take potentially a long time if it involves
-        * sync compaction, and we do not need to hold the mmap_sem during
+        * sync compaction, and we do not need to hold the mmap_lock during
         * that. We will recheck the vma after taking it again in write mode.
         */
        mmap_read_unlock(mm);
@@ -1080,8 +1080,8 @@ static void collapse_huge_page(struct mm_struct *mm,
        }
 
        /*
-        * __collapse_huge_page_swapin always returns with mmap_sem locked.
-        * If it fails, we release mmap_sem and jump out_nolock.
+        * __collapse_huge_page_swapin always returns with mmap_lock locked.
+        * If it fails, we release mmap_lock and jump out_nolock.
         * Continuing to collapse causes inconsistency.
         */
        if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
@@ -1345,7 +1345,7 @@ out_unmap:
        pte_unmap_unlock(pte, ptl);
        if (ret) {
                node = khugepaged_find_target_node();
-               /* collapse_huge_page will return with the mmap_sem released */
+               /* collapse_huge_page will return with the mmap_lock released */
                collapse_huge_page(mm, address, hpage, node,
                                referenced, unmapped);
        }
@@ -1543,11 +1543,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                /*
                 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
                 * got written to. These VMAs are likely not worth investing
-                * down_write(mmap_sem) as PMD-mapping is likely to be split
+                * mmap_write_lock(mm) as PMD-mapping is likely to be split
                 * later.
                 *
                 * Not that vma->anon_vma check is racy: it can be set up after
-                * the check but before we took mmap_sem by the fault path.
+                * the check but before we took mmap_lock by the fault path.
                 * But page lock would prevent establishing any new ptes of the
                 * page, so we are safe.
                 *
@@ -1567,10 +1567,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                if (!pmd)
                        continue;
                /*
-                * We need exclusive mmap_sem to retract page table.
+                * We need exclusive mmap_lock to retract page table.
                 *
                 * We use trylock due to lock inversion: we need to acquire
-                * mmap_sem while holding page lock. Fault path does it in
+                * mmap_lock while holding page lock. Fault path does it in
                 * reverse order. Trylock is a way to avoid deadlock.
                 */
                if (mmap_write_trylock(vma->vm_mm)) {
@@ -2058,7 +2058,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
         */
        vma = NULL;
        if (unlikely(!mmap_read_trylock(mm)))
-               goto breakouterloop_mmap_sem;
+               goto breakouterloop_mmap_lock;
        if (likely(!khugepaged_test_exit(mm)))
                vma = find_vma(mm, khugepaged_scan.address);
 
@@ -2115,15 +2115,15 @@ skip:
                        khugepaged_scan.address += HPAGE_PMD_SIZE;
                        progress += HPAGE_PMD_NR;
                        if (ret)
-                               /* we released mmap_sem so break loop */
-                               goto breakouterloop_mmap_sem;
+                               /* we released mmap_lock so break loop */
+                               goto breakouterloop_mmap_lock;
                        if (progress >= pages)
                                goto breakouterloop;
                }
        }
 breakouterloop:
        mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
-breakouterloop_mmap_sem:
+breakouterloop_mmap_lock:
 
        spin_lock(&khugepaged_mm_lock);
        VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);