Merge tag 'at91-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux...
[linux-2.6-microblaze.git] / mm / mlock.c
index c6b139a..96f0010 100644 (file)
 
 #include "internal.h"
 
-int can_do_mlock(void)
+bool can_do_mlock(void)
 {
        if (rlimit(RLIMIT_MEMLOCK) != 0)
-               return 1;
+               return true;
        if (capable(CAP_IPC_LOCK))
-               return 1;
-       return 0;
+               return true;
+       return false;
 }
 EXPORT_SYMBOL(can_do_mlock);
 
@@ -82,6 +82,9 @@ void mlock_vma_page(struct page *page)
        /* Serialize with page migration */
        BUG_ON(!PageLocked(page));
 
+       VM_BUG_ON_PAGE(PageTail(page), page);
+       VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
+
        if (!TestSetPageMlocked(page)) {
                mod_zone_page_state(page_zone(page), NR_MLOCK,
                                    hpage_nr_pages(page));
@@ -172,12 +175,14 @@ static void __munlock_isolation_failed(struct page *page)
  */
 unsigned int munlock_vma_page(struct page *page)
 {
-       unsigned int nr_pages;
+       int nr_pages;
        struct zone *zone = page_zone(page);
 
        /* For try_to_munlock() and to serialize with page migration */
        BUG_ON(!PageLocked(page));
 
+       VM_BUG_ON_PAGE(PageTail(page), page);
+
        /*
         * Serialize with any parallel __split_huge_page_refcount() which
         * might otherwise copy PageMlocked to part of the tail pages before
@@ -388,6 +393,13 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
                if (!page || page_zone_id(page) != zoneid)
                        break;
 
+               /*
+                * Do not use pagevec for PTE-mapped THP,
+                * munlock_vma_pages_range() will handle them.
+                */
+               if (PageTransCompound(page))
+                       break;
+
                get_page(page);
                /*
                 * Increase the address that will be returned *before* the
@@ -443,29 +455,43 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
                                &page_mask);
 
-               if (page && !IS_ERR(page) && !PageTransCompound(page)) {
-                       /*
-                        * Non-huge pages are handled in batches via
-                        * pagevec. The pin from follow_page_mask()
-                        * prevents them from collapsing by THP.
-                        */
-                       pagevec_add(&pvec, page);
-                       zone = page_zone(page);
-                       zoneid = page_zone_id(page);
+               if (page && !IS_ERR(page)) {
+                       if (PageTransTail(page)) {
+                               VM_BUG_ON_PAGE(PageMlocked(page), page);
+                               put_page(page); /* follow_page_mask() */
+                       } else if (PageTransHuge(page)) {
+                               lock_page(page);
+                               /*
+                                * Any THP page found by follow_page_mask() may
+                                * have gotten split before reaching
+                                * munlock_vma_page(), so we need to recompute
+                                * the page_mask here.
+                                */
+                               page_mask = munlock_vma_page(page);
+                               unlock_page(page);
+                               put_page(page); /* follow_page_mask() */
+                       } else {
+                               /*
+                                * Non-huge pages are handled in batches via
+                                * pagevec. The pin from follow_page_mask()
+                                * prevents them from collapsing by THP.
+                                */
+                               pagevec_add(&pvec, page);
+                               zone = page_zone(page);
+                               zoneid = page_zone_id(page);
 
-                       /*
-                        * Try to fill the rest of pagevec using fast
-                        * pte walk. This will also update start to
-                        * the next page to process. Then munlock the
-                        * pagevec.
-                        */
-                       start = __munlock_pagevec_fill(&pvec, vma,
-                                       zoneid, start, end);
-                       __munlock_pagevec(&pvec, zone);
-                       goto next;
+                               /*
+                                * Try to fill the rest of pagevec using fast
+                                * pte walk. This will also update start to
+                                * the next page to process. Then munlock the
+                                * pagevec.
+                                */
+                               start = __munlock_pagevec_fill(&pvec, vma,
+                                               zoneid, start, end);
+                               __munlock_pagevec(&pvec, zone);
+                               goto next;
+                       }
                }
-               /* It's a bug to munlock in the middle of a THP page */
-               VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
                page_increm = 1 + page_mask;
                start += page_increm * PAGE_SIZE;
 next: