mm,do_huge_pmd_numa_page: remove unnecessary TLB flushing code
[linux-2.6-microblaze.git] / mm / huge_memory.c
index afff3ac..5e9ef0f 100644 (file)
@@ -1440,32 +1440,6 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
                goto out;
        }
 
-       /*
-        * Since we took the NUMA fault, we must have observed the !accessible
-        * bit. Make sure all other CPUs agree with that, to avoid them
-        * modifying the page we're about to migrate.
-        *
-        * Must be done under PTL such that we'll observe the relevant
-        * inc_tlb_flush_pending().
-        *
-        * We are not sure a pending tlb flush here is for a huge page
-        * mapping or not. Hence use the tlb range variant
-        */
-       if (mm_tlb_flush_pending(vma->vm_mm)) {
-               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-               /*
-                * change_huge_pmd() released the pmd lock before
-                * invalidating the secondary MMUs sharing the primary
-                * MMU pagetables (with ->invalidate_range()). The
-                * mmu_notifier_invalidate_range_end() (which
-                * internally calls ->invalidate_range()) in
-                * change_pmd_range() will run after us, so we can't
-                * rely on it here and we need an explicit invalidate.
-                */
-               mmu_notifier_invalidate_range(vma->vm_mm, haddr,
-                                             haddr + HPAGE_PMD_SIZE);
-       }
-
        pmd = pmd_modify(oldpmd, vma->vm_page_prot);
        page = vm_normal_page_pmd(vma, haddr, pmd);
        if (!page)
@@ -2454,11 +2428,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 
        for (i = nr - 1; i >= 1; i--) {
                __split_huge_page_tail(head, i, lruvec, list);
-               /* Some pages can be beyond i_size: drop them from page cache */
+               /* Some pages can be beyond EOF: drop them from page cache */
                if (head[i].index >= end) {
                        ClearPageDirty(head + i);
                        __delete_from_page_cache(head + i, NULL);
-                       if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
+                       if (shmem_mapping(head->mapping))
                                shmem_uncharge(head->mapping->host, 1);
                        put_page(head + i);
                } else if (!PageAnon(page)) {
@@ -2686,6 +2660,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                 * head page lock is good enough to serialize the trimming.
                 */
                end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
+               if (shmem_mapping(mapping))
+                       end = shmem_fallocend(mapping->host, end);
        }
 
        /*