1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/ksm.h>
22 #include <linux/file.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/pagewalk.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/shmem_fs.h>
29 #include <linux/mmu_notifier.h>
36 * Any behaviour which results in changes to the vma->vm_flags needs to
37 * take mmap_sem for writing. Others, which simply traverse vmas, need
38 * to only take it for reading.
40 static int madvise_need_mmap_write(int behavior)
50 /* be safe, default to 1. list exceptions explicitly */
56 * We can potentially split a vm area into separate
57 * areas, each area with its own behavior.
59 static long madvise_behavior(struct vm_area_struct *vma,
60 struct vm_area_struct **prev,
61 unsigned long start, unsigned long end, int behavior)
63 struct mm_struct *mm = vma->vm_mm;
66 unsigned long new_flags = vma->vm_flags;
70 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
73 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
76 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
79 new_flags |= VM_DONTCOPY;
82 if (vma->vm_flags & VM_IO) {
86 new_flags &= ~VM_DONTCOPY;
89 /* MADV_WIPEONFORK is only supported on anonymous memory. */
90 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
94 new_flags |= VM_WIPEONFORK;
97 new_flags &= ~VM_WIPEONFORK;
100 new_flags |= VM_DONTDUMP;
103 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
107 new_flags &= ~VM_DONTDUMP;
110 case MADV_UNMERGEABLE:
111 error = ksm_madvise(vma, start, end, behavior, &new_flags);
113 goto out_convert_errno;
116 case MADV_NOHUGEPAGE:
117 error = hugepage_madvise(vma, &new_flags, behavior);
119 goto out_convert_errno;
123 if (new_flags == vma->vm_flags) {
128 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
129 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
130 vma->vm_file, pgoff, vma_policy(vma),
131 vma->vm_userfaultfd_ctx);
139 if (start != vma->vm_start) {
140 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
144 error = __split_vma(mm, vma, start, 1);
146 goto out_convert_errno;
149 if (end != vma->vm_end) {
150 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
154 error = __split_vma(mm, vma, end, 0);
156 goto out_convert_errno;
161 * vm_flags is protected by the mmap_sem held in write mode.
163 vma->vm_flags = new_flags;
167 * madvise() returns EAGAIN if kernel resources, such as
168 * slab, are temporarily unavailable.
170 if (error == -ENOMEM)
177 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
178 unsigned long end, struct mm_walk *walk)
181 struct vm_area_struct *vma = walk->private;
184 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
187 for (index = start; index != end; index += PAGE_SIZE) {
193 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
194 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
195 pte_unmap_unlock(orig_pte, ptl);
197 if (pte_present(pte) || pte_none(pte))
199 entry = pte_to_swp_entry(pte);
200 if (unlikely(non_swap_entry(entry)))
203 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
212 static const struct mm_walk_ops swapin_walk_ops = {
213 .pmd_entry = swapin_walk_pmd_entry,
216 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
217 unsigned long start, unsigned long end,
218 struct address_space *mapping)
224 for (; start < end; start += PAGE_SIZE) {
225 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
227 page = find_get_entry(mapping, index);
228 if (!xa_is_value(page)) {
233 swap = radix_to_swp_entry(page);
234 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
240 lru_add_drain(); /* Push any new pages onto the LRU now */
242 #endif /* CONFIG_SWAP */
245 * Schedule all required I/O operations. Do not wait for completion.
247 static long madvise_willneed(struct vm_area_struct *vma,
248 struct vm_area_struct **prev,
249 unsigned long start, unsigned long end)
251 struct file *file = vma->vm_file;
257 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
258 lru_add_drain(); /* Push any new pages onto the LRU now */
262 if (shmem_mapping(file->f_mapping)) {
263 force_shm_swapin_readahead(vma, start, end,
272 if (IS_DAX(file_inode(file))) {
273 /* no bad return value, but ignore advice */
278 * Filesystem's fadvise may need to take various locks. We need to
279 * explicitly grab a reference because the vma (and hence the
280 * vma's reference to the file) can go away as soon as we drop
283 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
285 up_read(¤t->mm->mmap_sem);
286 offset = (loff_t)(start - vma->vm_start)
287 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
288 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
290 down_read(¤t->mm->mmap_sem);
294 static int madvise_cold_pte_range(pmd_t *pmd, unsigned long addr,
295 unsigned long end, struct mm_walk *walk)
297 struct mmu_gather *tlb = walk->private;
298 struct mm_struct *mm = tlb->mm;
299 struct vm_area_struct *vma = walk->vma;
300 pte_t *orig_pte, *pte, ptent;
304 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
305 if (pmd_trans_huge(*pmd)) {
307 unsigned long next = pmd_addr_end(addr, end);
309 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
310 ptl = pmd_trans_huge_lock(pmd, vma);
315 if (is_huge_zero_pmd(orig_pmd))
318 if (unlikely(!pmd_present(orig_pmd))) {
319 VM_BUG_ON(thp_migration_supported() &&
320 !is_pmd_migration_entry(orig_pmd));
324 page = pmd_page(orig_pmd);
325 if (next - addr != HPAGE_PMD_SIZE) {
328 if (page_mapcount(page) != 1)
334 err = split_huge_page(page);
342 if (pmd_young(orig_pmd)) {
343 pmdp_invalidate(vma, addr, pmd);
344 orig_pmd = pmd_mkold(orig_pmd);
346 set_pmd_at(mm, addr, pmd, orig_pmd);
347 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
350 test_and_clear_page_young(page);
351 deactivate_page(page);
357 if (pmd_trans_unstable(pmd))
361 tlb_change_page_size(tlb, PAGE_SIZE);
362 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
363 flush_tlb_batched_pending(mm);
364 arch_enter_lazy_mmu_mode();
365 for (; addr < end; pte++, addr += PAGE_SIZE) {
371 if (!pte_present(ptent))
374 page = vm_normal_page(vma, addr, ptent);
379 * Creating a THP page is expensive so split it only if we
380 * are sure it's worth. Split it if we are only owner.
382 if (PageTransCompound(page)) {
383 if (page_mapcount(page) != 1)
386 if (!trylock_page(page)) {
390 pte_unmap_unlock(orig_pte, ptl);
391 if (split_huge_page(page)) {
394 pte_offset_map_lock(mm, pmd, addr, &ptl);
399 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
405 VM_BUG_ON_PAGE(PageTransCompound(page), page);
407 if (pte_young(ptent)) {
408 ptent = ptep_get_and_clear_full(mm, addr, pte,
410 ptent = pte_mkold(ptent);
411 set_pte_at(mm, addr, pte, ptent);
412 tlb_remove_tlb_entry(tlb, pte, addr);
416 * We are deactivating a page for accelerating reclaiming.
417 * VM couldn't reclaim the page unless we clear PG_young.
418 * As a side effect, it makes confuse idle-page tracking
419 * because they will miss recent referenced history.
421 test_and_clear_page_young(page);
422 deactivate_page(page);
425 arch_leave_lazy_mmu_mode();
426 pte_unmap_unlock(orig_pte, ptl);
432 static const struct mm_walk_ops cold_walk_ops = {
433 .pmd_entry = madvise_cold_pte_range,
436 static void madvise_cold_page_range(struct mmu_gather *tlb,
437 struct vm_area_struct *vma,
438 unsigned long addr, unsigned long end)
440 tlb_start_vma(tlb, vma);
441 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, NULL);
442 tlb_end_vma(tlb, vma);
445 static long madvise_cold(struct vm_area_struct *vma,
446 struct vm_area_struct **prev,
447 unsigned long start_addr, unsigned long end_addr)
449 struct mm_struct *mm = vma->vm_mm;
450 struct mmu_gather tlb;
453 if (!can_madv_lru_vma(vma))
457 tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
458 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
459 tlb_finish_mmu(&tlb, start_addr, end_addr);
464 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
465 unsigned long end, struct mm_walk *walk)
468 struct mmu_gather *tlb = walk->private;
469 struct mm_struct *mm = tlb->mm;
470 struct vm_area_struct *vma = walk->vma;
472 pte_t *orig_pte, *pte, ptent;
477 next = pmd_addr_end(addr, end);
478 if (pmd_trans_huge(*pmd))
479 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
482 if (pmd_trans_unstable(pmd))
485 tlb_change_page_size(tlb, PAGE_SIZE);
486 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
487 flush_tlb_batched_pending(mm);
488 arch_enter_lazy_mmu_mode();
489 for (; addr != end; pte++, addr += PAGE_SIZE) {
495 * If the pte has swp_entry, just clear page table to
496 * prevent swap-in which is more expensive rather than
497 * (page allocation + zeroing).
499 if (!pte_present(ptent)) {
502 entry = pte_to_swp_entry(ptent);
503 if (non_swap_entry(entry))
506 free_swap_and_cache(entry);
507 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
511 page = vm_normal_page(vma, addr, ptent);
516 * If pmd isn't transhuge but the page is THP and
517 * is owned by only this process, split it and
518 * deactivate all pages.
520 if (PageTransCompound(page)) {
521 if (page_mapcount(page) != 1)
524 if (!trylock_page(page)) {
528 pte_unmap_unlock(orig_pte, ptl);
529 if (split_huge_page(page)) {
532 pte_offset_map_lock(mm, pmd, addr, &ptl);
537 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
543 VM_BUG_ON_PAGE(PageTransCompound(page), page);
545 if (PageSwapCache(page) || PageDirty(page)) {
546 if (!trylock_page(page))
549 * If page is shared with others, we couldn't clear
550 * PG_dirty of the page.
552 if (page_mapcount(page) != 1) {
557 if (PageSwapCache(page) && !try_to_free_swap(page)) {
562 ClearPageDirty(page);
566 if (pte_young(ptent) || pte_dirty(ptent)) {
568 * Some of architecture(ex, PPC) don't update TLB
569 * with set_pte_at and tlb_remove_tlb_entry so for
570 * the portability, remap the pte with old|clean
571 * after pte clearing.
573 ptent = ptep_get_and_clear_full(mm, addr, pte,
576 ptent = pte_mkold(ptent);
577 ptent = pte_mkclean(ptent);
578 set_pte_at(mm, addr, pte, ptent);
579 tlb_remove_tlb_entry(tlb, pte, addr);
581 mark_page_lazyfree(page);
585 if (current->mm == mm)
588 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
590 arch_leave_lazy_mmu_mode();
591 pte_unmap_unlock(orig_pte, ptl);
597 static const struct mm_walk_ops madvise_free_walk_ops = {
598 .pmd_entry = madvise_free_pte_range,
601 static int madvise_free_single_vma(struct vm_area_struct *vma,
602 unsigned long start_addr, unsigned long end_addr)
604 struct mm_struct *mm = vma->vm_mm;
605 struct mmu_notifier_range range;
606 struct mmu_gather tlb;
608 /* MADV_FREE works for only anon vma at the moment */
609 if (!vma_is_anonymous(vma))
612 range.start = max(vma->vm_start, start_addr);
613 if (range.start >= vma->vm_end)
615 range.end = min(vma->vm_end, end_addr);
616 if (range.end <= vma->vm_start)
618 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
619 range.start, range.end);
622 tlb_gather_mmu(&tlb, mm, range.start, range.end);
623 update_hiwater_rss(mm);
625 mmu_notifier_invalidate_range_start(&range);
626 tlb_start_vma(&tlb, vma);
627 walk_page_range(vma->vm_mm, range.start, range.end,
628 &madvise_free_walk_ops, &tlb);
629 tlb_end_vma(&tlb, vma);
630 mmu_notifier_invalidate_range_end(&range);
631 tlb_finish_mmu(&tlb, range.start, range.end);
637 * Application no longer needs these pages. If the pages are dirty,
638 * it's OK to just throw them away. The app will be more careful about
639 * data it wants to keep. Be sure to free swap resources too. The
640 * zap_page_range call sets things up for shrink_active_list to actually free
641 * these pages later if no one else has touched them in the meantime,
642 * although we could add these pages to a global reuse list for
643 * shrink_active_list to pick up before reclaiming other pages.
645 * NB: This interface discards data rather than pushes it out to swap,
646 * as some implementations do. This has performance implications for
647 * applications like large transactional databases which want to discard
648 * pages in anonymous maps after committing to backing store the data
649 * that was kept in them. There is no reason to write this data out to
650 * the swap area if the application is discarding it.
652 * An interface that causes the system to free clean pages and flush
653 * dirty pages is already available as msync(MS_INVALIDATE).
655 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
656 unsigned long start, unsigned long end)
658 zap_page_range(vma, start, end - start);
662 static long madvise_dontneed_free(struct vm_area_struct *vma,
663 struct vm_area_struct **prev,
664 unsigned long start, unsigned long end,
668 if (!can_madv_lru_vma(vma))
671 if (!userfaultfd_remove(vma, start, end)) {
672 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
674 down_read(¤t->mm->mmap_sem);
675 vma = find_vma(current->mm, start);
678 if (start < vma->vm_start) {
680 * This "vma" under revalidation is the one
681 * with the lowest vma->vm_start where start
682 * is also < vma->vm_end. If start <
683 * vma->vm_start it means an hole materialized
684 * in the user address space within the
685 * virtual range passed to MADV_DONTNEED
690 if (!can_madv_lru_vma(vma))
692 if (end > vma->vm_end) {
694 * Don't fail if end > vma->vm_end. If the old
695 * vma was splitted while the mmap_sem was
696 * released the effect of the concurrent
697 * operation may not cause madvise() to
698 * have an undefined result. There may be an
699 * adjacent next vma that we'll walk
700 * next. userfaultfd_remove() will generate an
701 * UFFD_EVENT_REMOVE repetition on the
702 * end-vma->vm_end range, but the manager can
703 * handle a repetition fine.
707 VM_WARN_ON(start >= end);
710 if (behavior == MADV_DONTNEED)
711 return madvise_dontneed_single_vma(vma, start, end);
712 else if (behavior == MADV_FREE)
713 return madvise_free_single_vma(vma, start, end);
719 * Application wants to free up the pages and associated backing store.
720 * This is effectively punching a hole into the middle of a file.
722 static long madvise_remove(struct vm_area_struct *vma,
723 struct vm_area_struct **prev,
724 unsigned long start, unsigned long end)
730 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
732 if (vma->vm_flags & VM_LOCKED)
737 if (!f || !f->f_mapping || !f->f_mapping->host) {
741 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
744 offset = (loff_t)(start - vma->vm_start)
745 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
748 * Filesystem's fallocate may need to take i_mutex. We need to
749 * explicitly grab a reference because the vma (and hence the
750 * vma's reference to the file) can go away as soon as we drop
754 if (userfaultfd_remove(vma, start, end)) {
755 /* mmap_sem was not released by userfaultfd_remove() */
756 up_read(¤t->mm->mmap_sem);
758 error = vfs_fallocate(f,
759 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
760 offset, end - start);
762 down_read(¤t->mm->mmap_sem);
766 #ifdef CONFIG_MEMORY_FAILURE
768 * Error injection support for memory error handling.
770 static int madvise_inject_error(int behavior,
771 unsigned long start, unsigned long end)
777 if (!capable(CAP_SYS_ADMIN))
781 for (; start < end; start += PAGE_SIZE << order) {
785 ret = get_user_pages_fast(start, 1, 0, &page);
788 pfn = page_to_pfn(page);
791 * When soft offlining hugepages, after migrating the page
792 * we dissolve it, therefore in the second loop "page" will
793 * no longer be a compound page, and order will be 0.
795 order = compound_order(compound_head(page));
797 if (PageHWPoison(page)) {
802 if (behavior == MADV_SOFT_OFFLINE) {
803 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
806 ret = soft_offline_page(page, MF_COUNT_INCREASED);
812 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
816 * Drop the page reference taken by get_user_pages_fast(). In
817 * the absence of MF_COUNT_INCREASED the memory_failure()
818 * routine is responsible for pinning the page to prevent it
819 * from being released back to the page allocator.
822 ret = memory_failure(pfn, 0);
827 /* Ensure that all poisoned pages are removed from per-cpu lists */
828 for_each_populated_zone(zone)
829 drain_all_pages(zone);
836 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
837 unsigned long start, unsigned long end, int behavior)
841 return madvise_remove(vma, prev, start, end);
843 return madvise_willneed(vma, prev, start, end);
845 return madvise_cold(vma, prev, start, end);
848 return madvise_dontneed_free(vma, prev, start, end, behavior);
850 return madvise_behavior(vma, prev, start, end, behavior);
855 madvise_behavior_valid(int behavior)
861 case MADV_SEQUENTIAL:
870 case MADV_UNMERGEABLE:
872 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
874 case MADV_NOHUGEPAGE:
878 case MADV_WIPEONFORK:
879 case MADV_KEEPONFORK:
880 #ifdef CONFIG_MEMORY_FAILURE
881 case MADV_SOFT_OFFLINE:
892 * The madvise(2) system call.
894 * Applications can use madvise() to advise the kernel how it should
895 * handle paging I/O in this VM area. The idea is to help the kernel
896 * use appropriate read-ahead and caching techniques. The information
897 * provided is advisory only, and can be safely disregarded by the
898 * kernel without affecting the correct operation of the application.
901 * MADV_NORMAL - the default behavior is to read clusters. This
902 * results in some read-ahead and read-behind.
903 * MADV_RANDOM - the system should read the minimum amount of data
904 * on any access, since it is unlikely that the appli-
905 * cation will need more than what it asks for.
906 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
907 * once, so they can be aggressively read ahead, and
908 * can be freed soon after they are accessed.
909 * MADV_WILLNEED - the application is notifying the system to read
911 * MADV_DONTNEED - the application is finished with the given range,
912 * so the kernel can free resources associated with it.
913 * MADV_FREE - the application marks pages in the given range as lazy free,
914 * where actual purges are postponed until memory pressure happens.
915 * MADV_REMOVE - the application wants to free up the given range of
916 * pages and associated backing store.
917 * MADV_DONTFORK - omit this area from child's address space when forking:
918 * typically, to avoid COWing pages pinned by get_user_pages().
919 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
920 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
921 * range after a fork.
922 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
923 * MADV_HWPOISON - trigger memory error handler as if the given memory range
924 * were corrupted by unrecoverable hardware memory failure.
925 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
926 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
927 * this area with pages of identical content from other such areas.
928 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
929 * MADV_HUGEPAGE - the application wants to back the given range by transparent
930 * huge pages in the future. Existing pages might be coalesced and
931 * new pages might be allocated as THP.
932 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
933 * transparent huge pages so the existing pages will not be
934 * coalesced into THP and new pages will not be allocated as THP.
935 * MADV_DONTDUMP - the application wants to prevent pages in the given range
936 * from being included in its core dump.
937 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
941 * -EINVAL - start + len < 0, start is not page-aligned,
942 * "behavior" is not a valid value, or application
943 * is attempting to release locked or shared pages,
944 * or the specified address range includes file, Huge TLB,
945 * MAP_SHARED or VMPFNMAP range.
946 * -ENOMEM - addresses in the specified range are not currently
947 * mapped, or are outside the AS of the process.
948 * -EIO - an I/O error occurred while paging in data.
949 * -EBADF - map exists, but area maps something that isn't a file.
950 * -EAGAIN - a kernel resource was temporarily unavailable.
952 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
954 unsigned long end, tmp;
955 struct vm_area_struct *vma, *prev;
956 int unmapped_error = 0;
960 struct blk_plug plug;
962 start = untagged_addr(start);
964 if (!madvise_behavior_valid(behavior))
967 if (start & ~PAGE_MASK)
969 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
971 /* Check to see whether len was rounded up from small -ve to zero */
983 #ifdef CONFIG_MEMORY_FAILURE
984 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
985 return madvise_inject_error(behavior, start, start + len_in);
988 write = madvise_need_mmap_write(behavior);
990 if (down_write_killable(¤t->mm->mmap_sem))
993 down_read(¤t->mm->mmap_sem);
997 * If the interval [start,end) covers some unmapped address
998 * ranges, just ignore them, but return -ENOMEM at the end.
999 * - different from the way of handling in mlock etc.
1001 vma = find_vma_prev(current->mm, start, &prev);
1002 if (vma && start > vma->vm_start)
1005 blk_start_plug(&plug);
1007 /* Still start < end. */
1012 /* Here start < (end|vma->vm_end). */
1013 if (start < vma->vm_start) {
1014 unmapped_error = -ENOMEM;
1015 start = vma->vm_start;
1020 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1025 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1026 error = madvise_vma(vma, &prev, start, tmp, behavior);
1030 if (prev && start < prev->vm_end)
1031 start = prev->vm_end;
1032 error = unmapped_error;
1036 vma = prev->vm_next;
1037 else /* madvise_remove dropped mmap_sem */
1038 vma = find_vma(current->mm, start);
1041 blk_finish_plug(&plug);
1043 up_write(¤t->mm->mmap_sem);
1045 up_read(¤t->mm->mmap_sem);