bpf: Remove unused new_flags in hierarchy_allows_attach()
[linux-2.6-microblaze.git] / mm / hugetlb.c
index 17178db..ac65bb5 100644 (file)
@@ -244,6 +244,60 @@ struct file_region {
        long to;
 };
 
+/* Must be called with resv->lock held. Calling this with count_only == true
+ * will count the number of pages to be added but will not modify the linked
+ * list.
+ */
+static long add_reservation_in_range(struct resv_map *resv, long f, long t,
+                                    bool count_only)
+{
+       long chg = 0;
+       struct list_head *head = &resv->regions;
+       struct file_region *rg = NULL, *trg = NULL, *nrg = NULL;
+
+       /* Locate the region we are before or in. */
+       list_for_each_entry(rg, head, link)
+               if (f <= rg->to)
+                       break;
+
+       /* Round our left edge to the current segment if it encloses us. */
+       if (f > rg->from)
+               f = rg->from;
+
+       chg = t - f;
+
+       /* Check for and consume any regions we now overlap with. */
+       nrg = rg;
+       list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+               if (&rg->link == head)
+                       break;
+               if (rg->from > t)
+                       break;
+
+               /* We overlap with this area, if it extends further than
+                * us then we must extend ourselves.  Account for its
+                * existing reservation.
+                */
+               if (rg->to > t) {
+                       chg += rg->to - t;
+                       t = rg->to;
+               }
+               chg -= rg->to - rg->from;
+
+               if (!count_only && rg != nrg) {
+                       list_del(&rg->link);
+                       kfree(rg);
+               }
+       }
+
+       if (!count_only) {
+               nrg->from = f;
+               nrg->to = t;
+       }
+
+       return chg;
+}
+
 /*
  * Add the huge page range represented by [f, t) to the reserve
  * map.  Existing regions will be expanded to accommodate the specified
@@ -257,7 +311,7 @@ struct file_region {
 static long region_add(struct resv_map *resv, long f, long t)
 {
        struct list_head *head = &resv->regions;
-       struct file_region *rg, *nrg, *trg;
+       struct file_region *rg, *nrg;
        long add = 0;
 
        spin_lock(&resv->lock);
@@ -287,38 +341,7 @@ static long region_add(struct resv_map *resv, long f, long t)
                goto out_locked;
        }
 
-       /* Round our left edge to the current segment if it encloses us. */
-       if (f > rg->from)
-               f = rg->from;
-
-       /* Check for and consume any regions we now overlap with. */
-       nrg = rg;
-       list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-               if (&rg->link == head)
-                       break;
-               if (rg->from > t)
-                       break;
-
-               /* If this area reaches higher then extend our area to
-                * include it completely.  If this is not the first area
-                * which we intend to reuse, free it. */
-               if (rg->to > t)
-                       t = rg->to;
-               if (rg != nrg) {
-                       /* Decrement return value by the deleted range.
-                        * Another range will span this area so that by
-                        * end of routine add will be >= zero
-                        */
-                       add -= (rg->to - rg->from);
-                       list_del(&rg->link);
-                       kfree(rg);
-               }
-       }
-
-       add += (nrg->from - f);         /* Added to beginning of region */
-       nrg->from = f;
-       add += t - nrg->to;             /* Added to end of region */
-       nrg->to = t;
+       add = add_reservation_in_range(resv, f, t, false);
 
 out_locked:
        resv->adds_in_progress--;
@@ -345,8 +368,6 @@ out_locked:
  */
 static long region_chg(struct resv_map *resv, long f, long t)
 {
-       struct list_head *head = &resv->regions;
-       struct file_region *rg;
        long chg = 0;
 
        spin_lock(&resv->lock);
@@ -375,34 +396,8 @@ retry_locked:
                goto retry_locked;
        }
 
-       /* Locate the region we are before or in. */
-       list_for_each_entry(rg, head, link)
-               if (f <= rg->to)
-                       break;
+       chg = add_reservation_in_range(resv, f, t, true);
 
-       /* Round our left edge to the current segment if it encloses us. */
-       if (f > rg->from)
-               f = rg->from;
-       chg = t - f;
-
-       /* Check for and consume any regions we now overlap with. */
-       list_for_each_entry(rg, rg->link.prev, link) {
-               if (&rg->link == head)
-                       break;
-               if (rg->from > t)
-                       goto out;
-
-               /* We overlap with this area, if it extends further than
-                * us then we must extend ourselves.  Account for its
-                * existing reservation. */
-               if (rg->to > t) {
-                       chg += rg->to - t;
-                       t = rg->to;
-               }
-               chg -= rg->to - rg->from;
-       }
-
-out:
        spin_unlock(&resv->lock);
        return chg;
 }
@@ -3801,7 +3796,7 @@ retry:
                         * handling userfault.  Reacquire after handling
                         * fault to make calling code simpler.
                         */
-                       hash = hugetlb_fault_mutex_hash(h, mapping, idx);
+                       hash = hugetlb_fault_mutex_hash(mapping, idx);
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        ret = handle_userfault(&vmf, VM_UFFD_MISSING);
                        mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -3928,8 +3923,7 @@ backout_unlocked:
 }
 
 #ifdef CONFIG_SMP
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
-                           pgoff_t idx)
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
 {
        unsigned long key[2];
        u32 hash;
@@ -3946,8 +3940,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
  * For uniprocesor systems we always use a single mutex, so just
  * return 0 and avoid the hashing overhead.
  */
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
-                           pgoff_t idx)
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
 {
        return 0;
 }
@@ -3991,7 +3984,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * get spurious allocation failures if two CPUs race to instantiate
         * the same page in the page cache.
         */
-       hash = hugetlb_fault_mutex_hash(h, mapping, idx);
+       hash = hugetlb_fault_mutex_hash(mapping, idx);
        mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
        entry = huge_ptep_get(ptep);
@@ -4345,6 +4338,21 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                break;
                        }
                }
+
+               /*
+                * If subpage information not requested, update counters
+                * and skip the same_page loop below.
+                */
+               if (!pages && !vmas && !pfn_offset &&
+                   (vaddr + huge_page_size(h) < vma->vm_end) &&
+                   (remainder >= pages_per_huge_page(h))) {
+                       vaddr += huge_page_size(h);
+                       remainder -= pages_per_huge_page(h);
+                       i += pages_per_huge_page(h);
+                       spin_unlock(ptl);
+                       continue;
+               }
+
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);