mm/hugetlb: add missing annotation for gather_surplus_pages()
[linux-2.6-microblaze.git] / mm / hugetlb.c
index 79f4c0f..f5fb53f 100644 (file)
@@ -276,6 +276,48 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
 #endif
 }
 
+static bool has_same_uncharge_info(struct file_region *rg,
+                                  struct file_region *org)
+{
+#ifdef CONFIG_CGROUP_HUGETLB
+       return rg && org &&
+              rg->reservation_counter == org->reservation_counter &&
+              rg->css == org->css;
+
+#else
+       return true;
+#endif
+}
+
+static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
+{
+       struct file_region *nrg = NULL, *prg = NULL;
+
+       prg = list_prev_entry(rg, link);
+       if (&prg->link != &resv->regions && prg->to == rg->from &&
+           has_same_uncharge_info(prg, rg)) {
+               prg->to = rg->to;
+
+               list_del(&rg->link);
+               kfree(rg);
+
+               coalesce_file_region(resv, prg);
+               return;
+       }
+
+       nrg = list_next_entry(rg, link);
+       if (&nrg->link != &resv->regions && nrg->from == rg->to &&
+           has_same_uncharge_info(nrg, rg)) {
+               nrg->from = rg->from;
+
+               list_del(&rg->link);
+               kfree(rg);
+
+               coalesce_file_region(resv, nrg);
+               return;
+       }
+}
+
 /* Must be called with resv->lock held. Calling this with count_only == true
  * will count the number of pages to be added but will not modify the linked
  * list. If regions_needed != NULL and count_only == true, then regions_needed
@@ -327,6 +369,7 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
                                record_hugetlb_cgroup_uncharge_info(h_cg, h,
                                                                    resv, nrg);
                                list_add(&nrg->link, rg->link.prev);
+                               coalesce_file_region(resv, nrg);
                        } else if (regions_needed)
                                *regions_needed += 1;
                }
@@ -344,6 +387,7 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
                                resv, last_accounted_offset, t);
                        record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
                        list_add(&nrg->link, rg->link.prev);
+                       coalesce_file_region(resv, nrg);
                } else if (regions_needed)
                        *regions_needed += 1;
        }
@@ -1484,7 +1528,7 @@ int PageHeadHuge(struct page *page_head)
        if (!PageHead(page_head))
                return 0;
 
-       return get_compound_page_dtor(page_head) == free_huge_page;
+       return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
 }
 
 /*
@@ -1966,6 +2010,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
  * of size 'delta'.
  */
 static int gather_surplus_pages(struct hstate *h, int delta)
+       __must_hold(&hugetlb_lock)
 {
        struct list_head surplus_list;
        struct page *page, *tmp;
@@ -5112,7 +5157,7 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
                                unsigned long *start, unsigned long *end)
 {
-       unsigned long check_addr = *start;
+       unsigned long check_addr;
 
        if (!(vma->vm_flags & VM_MAYSHARE))
                return;