Merge tag 'for-linus-5.12-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / mm / hugetlb.c
index a260296..905a7d5 100644 (file)
@@ -79,6 +79,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
 static int num_fault_mutexes;
 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
 
+static inline bool PageHugeFreed(struct page *head)
+{
+       return page_private(head + 4) == -1UL;
+}
+
+static inline void SetPageHugeFreed(struct page *head)
+{
+       set_page_private(head + 4, -1UL);
+}
+
+static inline void ClearPageHugeFreed(struct page *head)
+{
+       set_page_private(head + 4, 0);
+}
+
 /* Forward declaration */
 static int hugetlb_acct_memory(struct hstate *h, long delta);
 
@@ -1028,6 +1043,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        list_move(&page->lru, &h->hugepage_freelists[nid]);
        h->free_huge_pages++;
        h->free_huge_pages_node[nid]++;
+       SetPageHugeFreed(page);
 }
 
 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1044,6 +1060,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
 
                list_move(&page->lru, &h->hugepage_activelist);
                set_page_refcounted(page);
+               ClearPageHugeFreed(page);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                return page;
@@ -1344,12 +1361,11 @@ struct hstate *size_to_hstate(unsigned long size)
  */
 bool page_huge_active(struct page *page)
 {
-       VM_BUG_ON_PAGE(!PageHuge(page), page);
-       return PageHead(page) && PagePrivate(&page[1]);
+       return PageHeadHuge(page) && PagePrivate(&page[1]);
 }
 
 /* never called for tail page */
-static void set_page_huge_active(struct page *page)
+void set_page_huge_active(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
        SetPagePrivate(&page[1]);
@@ -1505,6 +1521,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
        spin_lock(&hugetlb_lock);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
+       ClearPageHugeFreed(page);
        spin_unlock(&hugetlb_lock);
 }
 
@@ -1755,6 +1772,7 @@ int dissolve_free_huge_page(struct page *page)
 {
        int rc = -EBUSY;
 
+retry:
        /* Not to disrupt normal path by vainly holding hugetlb_lock */
        if (!PageHuge(page))
                return 0;
@@ -1771,6 +1789,26 @@ int dissolve_free_huge_page(struct page *page)
                int nid = page_to_nid(head);
                if (h->free_huge_pages - h->resv_huge_pages == 0)
                        goto out;
+
+               /*
+                * We should make sure that the page is already on the free list
+                * when it is dissolved.
+                */
+               if (unlikely(!PageHugeFreed(head))) {
+                       spin_unlock(&hugetlb_lock);
+                       cond_resched();
+
+                       /*
+                        * Theoretically, we should return -EBUSY when we
+                        * encounter this race. In fact, we have a chance
+                        * to successfully dissolve the page if we do a
+                        * retry. Because the race window is quite small.
+                        * If we seize this opportunity, it is an optimization
+                        * for increasing the success rate of dissolving page.
+                        */
+                       goto retry;
+               }
+
                /*
                 * Move PageHWPoison flag from head page to the raw error page,
                 * which makes any subpages rather than the error page reusable.
@@ -2009,13 +2047,16 @@ retry:
 
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+               int zeroed;
+
                if ((--needed) < 0)
                        break;
                /*
                 * This page is now managed by the hugetlb allocator and has
                 * no users -- drop the buddy allocator's reference.
                 */
-               VM_BUG_ON_PAGE(!put_page_testzero(page), page);
+               zeroed = put_page_testzero(page);
+               VM_BUG_ON_PAGE(!zeroed, page);
                enqueue_huge_page(h, page);
        }
 free:
@@ -3967,25 +4008,11 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       struct mm_struct *mm;
        struct mmu_gather tlb;
-       unsigned long tlb_start = start;
-       unsigned long tlb_end = end;
-
-       /*
-        * If shared PMDs were possibly used within this vma range, adjust
-        * start/end for worst case tlb flushing.
-        * Note that we can not be sure if PMDs are shared until we try to
-        * unmap pages.  However, we want to make sure TLB flushing covers
-        * the largest possible range.
-        */
-       adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
-
-       mm = vma->vm_mm;
 
-       tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
+       tlb_gather_mmu(&tlb, vma->vm_mm);
        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
-       tlb_finish_mmu(&tlb, tlb_start, tlb_end);
+       tlb_finish_mmu(&tlb);
 }
 
 /*
@@ -4371,7 +4398,7 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON |
+                       ret = VM_FAULT_HWPOISON_LARGE |
                                VM_FAULT_SET_HINDEX(hstate_index(h));
                        goto backout_unlocked;
                }
@@ -5555,9 +5582,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
 {
        bool ret = true;
 
-       VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
-       if (!page_huge_active(page) || !get_page_unless_zero(page)) {
+       if (!PageHeadHuge(page) || !page_huge_active(page) ||
+           !get_page_unless_zero(page)) {
                ret = false;
                goto unlock;
        }