mm/sparse: cleanup the code surrounding memory_present()
[linux-2.6-microblaze.git] / mm / huge_memory.c
index d9b2e0e..462a7db 100644 (file)
@@ -1722,19 +1722,13 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
 }
 
 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
-                 unsigned long new_addr, unsigned long old_end,
-                 pmd_t *old_pmd, pmd_t *new_pmd)
+                 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
 {
        spinlock_t *old_ptl, *new_ptl;
        pmd_t pmd;
        struct mm_struct *mm = vma->vm_mm;
        bool force_flush = false;
 
-       if ((old_addr & ~HPAGE_PMD_MASK) ||
-           (new_addr & ~HPAGE_PMD_MASK) ||
-           old_end - old_addr < HPAGE_PMD_SIZE)
-               return false;
-
        /*
         * The destination pmd shouldn't be established, free_pgtables()
         * should have release it.
@@ -1746,7 +1740,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 
        /*
         * We don't have to worry about the ordering of src and dst
-        * ptlocks because exclusive mmap_sem prevents deadlock.
+        * ptlocks because exclusive mmap_lock prevents deadlock.
         */
        old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
        if (old_ptl) {
@@ -1833,9 +1827,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                goto unlock;
 
        /*
-        * In case prot_numa, we are under down_read(mmap_sem). It's critical
+        * In case prot_numa, we are under mmap_read_lock(mm). It's critical
         * to not clear pmd intermittently to avoid race with MADV_DONTNEED
-        * which is also under down_read(mmap_sem):
+        * which is also under mmap_read_lock(mm):
         *
         *      CPU0:                           CPU1:
         *                              change_huge_pmd(prot_numa=1)
@@ -2618,7 +2612,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
        if (PageAnon(head)) {
                /*
-                * The caller does not necessarily hold an mmap_sem that would
+                * The caller does not necessarily hold an mmap_lock that would
                 * prevent the anon_vma disappearing so we first we take a
                 * reference to it and then lock the anon_vma for write. This
                 * is similar to page_lock_anon_vma_read except the write lock