X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=mm%2Fhugetlb.c;h=905a7d549b00f6099abb69682266eb49745b6700;hb=f158bbee9403b7bd2ad22f0c03b7e9762c20ad18;hp=4bdb58ab14cbbd659a1f9ca2e531a995fed03077;hpb=0570b69305276a349ef7a17c8c54dfeed76f3954;p=linux-2.6-microblaze.git diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4bdb58ab14cb..905a7d549b00 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4008,25 +4008,11 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { - struct mm_struct *mm; struct mmu_gather tlb; - unsigned long tlb_start = start; - unsigned long tlb_end = end; - /* - * If shared PMDs were possibly used within this vma range, adjust - * start/end for worst case tlb flushing. - * Note that we can not be sure if PMDs are shared until we try to - * unmap pages. However, we want to make sure TLB flushing covers - * the largest possible range. - */ - adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); - - mm = vma->vm_mm; - - tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); + tlb_gather_mmu(&tlb, vma->vm_mm); __unmap_hugepage_range(&tlb, vma, start, end, ref_page); - tlb_finish_mmu(&tlb, tlb_start, tlb_end); + tlb_finish_mmu(&tlb); } /*