X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=mm%2Fmmap.c;h=aa9de981b6596c4704936fe61880115d40873af0;hb=53d31a3ffd60176af24f2f77fb3a7e567134eb90;hp=0584e540246e1d2472d00f156317ca71adb86fcd;hpb=f96271cefe6dfd1cb04195b76f4a33e185cd7f92;p=linux-2.6-microblaze.git diff --git a/mm/mmap.c b/mm/mmap.c index 0584e540246e..aa9de981b659 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1457,9 +1457,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, return addr; if (flags & MAP_FIXED_NOREPLACE) { - struct vm_area_struct *vma = find_vma(mm, addr); - - if (vma && vma->vm_start < addr + len) + if (find_vma_intersection(mm, addr, addr + len)) return -EEXIST; } @@ -1611,7 +1609,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, goto out_fput; } } else if (flags & MAP_HUGETLB) { - struct user_struct *user = NULL; + struct ucounts *ucounts = NULL; struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); @@ -1627,13 +1625,13 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, - &user, HUGETLB_ANONHUGE_INODE, + &ucounts, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + flags &= ~MAP_DENYWRITE; retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: @@ -2802,6 +2800,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return __split_vma(mm, vma, addr, new_below); } +static inline void +unlock_range(struct vm_area_struct *start, unsigned long limit) +{ + struct mm_struct *mm = start->vm_mm; + struct vm_area_struct *tmp = start; + + while (tmp && tmp->vm_start < limit) { + if (tmp->vm_flags & VM_LOCKED) { + mm->locked_vm -= vma_pages(tmp); + munlock_vma_pages_all(tmp); + } + + tmp = tmp->vm_next; + } +} + /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. @@ -2828,16 +2842,11 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, */ arch_unmap(mm, start, end); - /* Find the first overlapping VMA */ - vma = find_vma(mm, start); + /* Find the first overlapping VMA where start < vma->vm_end */ + vma = find_vma_intersection(mm, start, end); if (!vma) return 0; prev = vma->vm_prev; - /* we have start < vma->vm_end */ - - /* if it doesn't overlap, we have nothing.. */ - if (vma->vm_start >= end) - return 0; /* * If we need to split any vma, do it now to save pain later. @@ -2890,17 +2899,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, /* * unlock any mlock()ed ranges before detaching vmas */ - if (mm->locked_vm) { - struct vm_area_struct *tmp = vma; - while (tmp && tmp->vm_start < end) { - if (tmp->vm_flags & VM_LOCKED) { - mm->locked_vm -= vma_pages(tmp); - munlock_vma_pages_all(tmp); - } - - tmp = tmp->vm_next; - } - } + if (mm->locked_vm) + unlock_range(vma, end); /* Detach vmas from rbtree */ if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) @@ -3185,14 +3185,8 @@ void exit_mmap(struct mm_struct *mm) mmap_write_unlock(mm); } - if (mm->locked_vm) { - vma = mm->mmap; - while (vma) { - if (vma->vm_flags & VM_LOCKED) - munlock_vma_pages_all(vma); - vma = vma->vm_next; - } - } + if (mm->locked_vm) + unlock_range(mm->mmap, ULONG_MAX); arch_exit_mmap(mm);