mm/thp: try_to_unmap() use TTU_SYNC for safe splitting
[linux-2.6-microblaze.git] / mm / hugetlb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33
34 #include <asm/page.h>
35 #include <asm/pgalloc.h>
36 #include <asm/tlb.h>
37
38 #include <linux/io.h>
39 #include <linux/hugetlb.h>
40 #include <linux/hugetlb_cgroup.h>
41 #include <linux/node.h>
42 #include <linux/page_owner.h>
43 #include "internal.h"
44
45 int hugetlb_max_hstate __read_mostly;
46 unsigned int default_hstate_idx;
47 struct hstate hstates[HUGE_MAX_HSTATE];
48
49 #ifdef CONFIG_CMA
50 static struct cma *hugetlb_cma[MAX_NUMNODES];
51 #endif
52 static unsigned long hugetlb_cma_size __initdata;
53
54 /*
55  * Minimum page order among possible hugepage sizes, set to a proper value
56  * at boot time.
57  */
58 static unsigned int minimum_order __read_mostly = UINT_MAX;
59
60 __initdata LIST_HEAD(huge_boot_pages);
61
62 /* for command line parsing */
63 static struct hstate * __initdata parsed_hstate;
64 static unsigned long __initdata default_hstate_max_huge_pages;
65 static bool __initdata parsed_valid_hugepagesz = true;
66 static bool __initdata parsed_default_hugepagesz;
67
68 /*
69  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
70  * free_huge_pages, and surplus_huge_pages.
71  */
72 DEFINE_SPINLOCK(hugetlb_lock);
73
74 /*
75  * Serializes faults on the same logical page.  This is used to
76  * prevent spurious OOMs when the hugepage pool is fully utilized.
77  */
78 static int num_fault_mutexes;
79 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
80
81 /* Forward declaration */
82 static int hugetlb_acct_memory(struct hstate *h, long delta);
83
84 static inline bool subpool_is_free(struct hugepage_subpool *spool)
85 {
86         if (spool->count)
87                 return false;
88         if (spool->max_hpages != -1)
89                 return spool->used_hpages == 0;
90         if (spool->min_hpages != -1)
91                 return spool->rsv_hpages == spool->min_hpages;
92
93         return true;
94 }
95
96 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
97                                                 unsigned long irq_flags)
98 {
99         spin_unlock_irqrestore(&spool->lock, irq_flags);
100
101         /* If no pages are used, and no other handles to the subpool
102          * remain, give up any reservations based on minimum size and
103          * free the subpool */
104         if (subpool_is_free(spool)) {
105                 if (spool->min_hpages != -1)
106                         hugetlb_acct_memory(spool->hstate,
107                                                 -spool->min_hpages);
108                 kfree(spool);
109         }
110 }
111
112 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
113                                                 long min_hpages)
114 {
115         struct hugepage_subpool *spool;
116
117         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
118         if (!spool)
119                 return NULL;
120
121         spin_lock_init(&spool->lock);
122         spool->count = 1;
123         spool->max_hpages = max_hpages;
124         spool->hstate = h;
125         spool->min_hpages = min_hpages;
126
127         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
128                 kfree(spool);
129                 return NULL;
130         }
131         spool->rsv_hpages = min_hpages;
132
133         return spool;
134 }
135
136 void hugepage_put_subpool(struct hugepage_subpool *spool)
137 {
138         unsigned long flags;
139
140         spin_lock_irqsave(&spool->lock, flags);
141         BUG_ON(!spool->count);
142         spool->count--;
143         unlock_or_release_subpool(spool, flags);
144 }
145
146 /*
147  * Subpool accounting for allocating and reserving pages.
148  * Return -ENOMEM if there are not enough resources to satisfy the
149  * request.  Otherwise, return the number of pages by which the
150  * global pools must be adjusted (upward).  The returned value may
151  * only be different than the passed value (delta) in the case where
152  * a subpool minimum size must be maintained.
153  */
154 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
155                                       long delta)
156 {
157         long ret = delta;
158
159         if (!spool)
160                 return ret;
161
162         spin_lock_irq(&spool->lock);
163
164         if (spool->max_hpages != -1) {          /* maximum size accounting */
165                 if ((spool->used_hpages + delta) <= spool->max_hpages)
166                         spool->used_hpages += delta;
167                 else {
168                         ret = -ENOMEM;
169                         goto unlock_ret;
170                 }
171         }
172
173         /* minimum size accounting */
174         if (spool->min_hpages != -1 && spool->rsv_hpages) {
175                 if (delta > spool->rsv_hpages) {
176                         /*
177                          * Asking for more reserves than those already taken on
178                          * behalf of subpool.  Return difference.
179                          */
180                         ret = delta - spool->rsv_hpages;
181                         spool->rsv_hpages = 0;
182                 } else {
183                         ret = 0;        /* reserves already accounted for */
184                         spool->rsv_hpages -= delta;
185                 }
186         }
187
188 unlock_ret:
189         spin_unlock_irq(&spool->lock);
190         return ret;
191 }
192
193 /*
194  * Subpool accounting for freeing and unreserving pages.
195  * Return the number of global page reservations that must be dropped.
196  * The return value may only be different than the passed value (delta)
197  * in the case where a subpool minimum size must be maintained.
198  */
199 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
200                                        long delta)
201 {
202         long ret = delta;
203         unsigned long flags;
204
205         if (!spool)
206                 return delta;
207
208         spin_lock_irqsave(&spool->lock, flags);
209
210         if (spool->max_hpages != -1)            /* maximum size accounting */
211                 spool->used_hpages -= delta;
212
213          /* minimum size accounting */
214         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
215                 if (spool->rsv_hpages + delta <= spool->min_hpages)
216                         ret = 0;
217                 else
218                         ret = spool->rsv_hpages + delta - spool->min_hpages;
219
220                 spool->rsv_hpages += delta;
221                 if (spool->rsv_hpages > spool->min_hpages)
222                         spool->rsv_hpages = spool->min_hpages;
223         }
224
225         /*
226          * If hugetlbfs_put_super couldn't free spool due to an outstanding
227          * quota reference, free it now.
228          */
229         unlock_or_release_subpool(spool, flags);
230
231         return ret;
232 }
233
234 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
235 {
236         return HUGETLBFS_SB(inode->i_sb)->spool;
237 }
238
239 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
240 {
241         return subpool_inode(file_inode(vma->vm_file));
242 }
243
244 /* Helper that removes a struct file_region from the resv_map cache and returns
245  * it for use.
246  */
247 static struct file_region *
248 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
249 {
250         struct file_region *nrg = NULL;
251
252         VM_BUG_ON(resv->region_cache_count <= 0);
253
254         resv->region_cache_count--;
255         nrg = list_first_entry(&resv->region_cache, struct file_region, link);
256         list_del(&nrg->link);
257
258         nrg->from = from;
259         nrg->to = to;
260
261         return nrg;
262 }
263
264 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
265                                               struct file_region *rg)
266 {
267 #ifdef CONFIG_CGROUP_HUGETLB
268         nrg->reservation_counter = rg->reservation_counter;
269         nrg->css = rg->css;
270         if (rg->css)
271                 css_get(rg->css);
272 #endif
273 }
274
275 /* Helper that records hugetlb_cgroup uncharge info. */
276 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
277                                                 struct hstate *h,
278                                                 struct resv_map *resv,
279                                                 struct file_region *nrg)
280 {
281 #ifdef CONFIG_CGROUP_HUGETLB
282         if (h_cg) {
283                 nrg->reservation_counter =
284                         &h_cg->rsvd_hugepage[hstate_index(h)];
285                 nrg->css = &h_cg->css;
286                 /*
287                  * The caller will hold exactly one h_cg->css reference for the
288                  * whole contiguous reservation region. But this area might be
289                  * scattered when there are already some file_regions reside in
290                  * it. As a result, many file_regions may share only one css
291                  * reference. In order to ensure that one file_region must hold
292                  * exactly one h_cg->css reference, we should do css_get for
293                  * each file_region and leave the reference held by caller
294                  * untouched.
295                  */
296                 css_get(&h_cg->css);
297                 if (!resv->pages_per_hpage)
298                         resv->pages_per_hpage = pages_per_huge_page(h);
299                 /* pages_per_hpage should be the same for all entries in
300                  * a resv_map.
301                  */
302                 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
303         } else {
304                 nrg->reservation_counter = NULL;
305                 nrg->css = NULL;
306         }
307 #endif
308 }
309
310 static void put_uncharge_info(struct file_region *rg)
311 {
312 #ifdef CONFIG_CGROUP_HUGETLB
313         if (rg->css)
314                 css_put(rg->css);
315 #endif
316 }
317
318 static bool has_same_uncharge_info(struct file_region *rg,
319                                    struct file_region *org)
320 {
321 #ifdef CONFIG_CGROUP_HUGETLB
322         return rg && org &&
323                rg->reservation_counter == org->reservation_counter &&
324                rg->css == org->css;
325
326 #else
327         return true;
328 #endif
329 }
330
331 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
332 {
333         struct file_region *nrg = NULL, *prg = NULL;
334
335         prg = list_prev_entry(rg, link);
336         if (&prg->link != &resv->regions && prg->to == rg->from &&
337             has_same_uncharge_info(prg, rg)) {
338                 prg->to = rg->to;
339
340                 list_del(&rg->link);
341                 put_uncharge_info(rg);
342                 kfree(rg);
343
344                 rg = prg;
345         }
346
347         nrg = list_next_entry(rg, link);
348         if (&nrg->link != &resv->regions && nrg->from == rg->to &&
349             has_same_uncharge_info(nrg, rg)) {
350                 nrg->from = rg->from;
351
352                 list_del(&rg->link);
353                 put_uncharge_info(rg);
354                 kfree(rg);
355         }
356 }
357
358 static inline long
359 hugetlb_resv_map_add(struct resv_map *map, struct file_region *rg, long from,
360                      long to, struct hstate *h, struct hugetlb_cgroup *cg,
361                      long *regions_needed)
362 {
363         struct file_region *nrg;
364
365         if (!regions_needed) {
366                 nrg = get_file_region_entry_from_cache(map, from, to);
367                 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
368                 list_add(&nrg->link, rg->link.prev);
369                 coalesce_file_region(map, nrg);
370         } else
371                 *regions_needed += 1;
372
373         return to - from;
374 }
375
376 /*
377  * Must be called with resv->lock held.
378  *
379  * Calling this with regions_needed != NULL will count the number of pages
380  * to be added but will not modify the linked list. And regions_needed will
381  * indicate the number of file_regions needed in the cache to carry out to add
382  * the regions for this range.
383  */
384 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
385                                      struct hugetlb_cgroup *h_cg,
386                                      struct hstate *h, long *regions_needed)
387 {
388         long add = 0;
389         struct list_head *head = &resv->regions;
390         long last_accounted_offset = f;
391         struct file_region *rg = NULL, *trg = NULL;
392
393         if (regions_needed)
394                 *regions_needed = 0;
395
396         /* In this loop, we essentially handle an entry for the range
397          * [last_accounted_offset, rg->from), at every iteration, with some
398          * bounds checking.
399          */
400         list_for_each_entry_safe(rg, trg, head, link) {
401                 /* Skip irrelevant regions that start before our range. */
402                 if (rg->from < f) {
403                         /* If this region ends after the last accounted offset,
404                          * then we need to update last_accounted_offset.
405                          */
406                         if (rg->to > last_accounted_offset)
407                                 last_accounted_offset = rg->to;
408                         continue;
409                 }
410
411                 /* When we find a region that starts beyond our range, we've
412                  * finished.
413                  */
414                 if (rg->from >= t)
415                         break;
416
417                 /* Add an entry for last_accounted_offset -> rg->from, and
418                  * update last_accounted_offset.
419                  */
420                 if (rg->from > last_accounted_offset)
421                         add += hugetlb_resv_map_add(resv, rg,
422                                                     last_accounted_offset,
423                                                     rg->from, h, h_cg,
424                                                     regions_needed);
425
426                 last_accounted_offset = rg->to;
427         }
428
429         /* Handle the case where our range extends beyond
430          * last_accounted_offset.
431          */
432         if (last_accounted_offset < t)
433                 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
434                                             t, h, h_cg, regions_needed);
435
436         VM_BUG_ON(add < 0);
437         return add;
438 }
439
440 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
441  */
442 static int allocate_file_region_entries(struct resv_map *resv,
443                                         int regions_needed)
444         __must_hold(&resv->lock)
445 {
446         struct list_head allocated_regions;
447         int to_allocate = 0, i = 0;
448         struct file_region *trg = NULL, *rg = NULL;
449
450         VM_BUG_ON(regions_needed < 0);
451
452         INIT_LIST_HEAD(&allocated_regions);
453
454         /*
455          * Check for sufficient descriptors in the cache to accommodate
456          * the number of in progress add operations plus regions_needed.
457          *
458          * This is a while loop because when we drop the lock, some other call
459          * to region_add or region_del may have consumed some region_entries,
460          * so we keep looping here until we finally have enough entries for
461          * (adds_in_progress + regions_needed).
462          */
463         while (resv->region_cache_count <
464                (resv->adds_in_progress + regions_needed)) {
465                 to_allocate = resv->adds_in_progress + regions_needed -
466                               resv->region_cache_count;
467
468                 /* At this point, we should have enough entries in the cache
469                  * for all the existing adds_in_progress. We should only be
470                  * needing to allocate for regions_needed.
471                  */
472                 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
473
474                 spin_unlock(&resv->lock);
475                 for (i = 0; i < to_allocate; i++) {
476                         trg = kmalloc(sizeof(*trg), GFP_KERNEL);
477                         if (!trg)
478                                 goto out_of_memory;
479                         list_add(&trg->link, &allocated_regions);
480                 }
481
482                 spin_lock(&resv->lock);
483
484                 list_splice(&allocated_regions, &resv->region_cache);
485                 resv->region_cache_count += to_allocate;
486         }
487
488         return 0;
489
490 out_of_memory:
491         list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
492                 list_del(&rg->link);
493                 kfree(rg);
494         }
495         return -ENOMEM;
496 }
497
498 /*
499  * Add the huge page range represented by [f, t) to the reserve
500  * map.  Regions will be taken from the cache to fill in this range.
501  * Sufficient regions should exist in the cache due to the previous
502  * call to region_chg with the same range, but in some cases the cache will not
503  * have sufficient entries due to races with other code doing region_add or
504  * region_del.  The extra needed entries will be allocated.
505  *
506  * regions_needed is the out value provided by a previous call to region_chg.
507  *
508  * Return the number of new huge pages added to the map.  This number is greater
509  * than or equal to zero.  If file_region entries needed to be allocated for
510  * this operation and we were not able to allocate, it returns -ENOMEM.
511  * region_add of regions of length 1 never allocate file_regions and cannot
512  * fail; region_chg will always allocate at least 1 entry and a region_add for
513  * 1 page will only require at most 1 entry.
514  */
515 static long region_add(struct resv_map *resv, long f, long t,
516                        long in_regions_needed, struct hstate *h,
517                        struct hugetlb_cgroup *h_cg)
518 {
519         long add = 0, actual_regions_needed = 0;
520
521         spin_lock(&resv->lock);
522 retry:
523
524         /* Count how many regions are actually needed to execute this add. */
525         add_reservation_in_range(resv, f, t, NULL, NULL,
526                                  &actual_regions_needed);
527
528         /*
529          * Check for sufficient descriptors in the cache to accommodate
530          * this add operation. Note that actual_regions_needed may be greater
531          * than in_regions_needed, as the resv_map may have been modified since
532          * the region_chg call. In this case, we need to make sure that we
533          * allocate extra entries, such that we have enough for all the
534          * existing adds_in_progress, plus the excess needed for this
535          * operation.
536          */
537         if (actual_regions_needed > in_regions_needed &&
538             resv->region_cache_count <
539                     resv->adds_in_progress +
540                             (actual_regions_needed - in_regions_needed)) {
541                 /* region_add operation of range 1 should never need to
542                  * allocate file_region entries.
543                  */
544                 VM_BUG_ON(t - f <= 1);
545
546                 if (allocate_file_region_entries(
547                             resv, actual_regions_needed - in_regions_needed)) {
548                         return -ENOMEM;
549                 }
550
551                 goto retry;
552         }
553
554         add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
555
556         resv->adds_in_progress -= in_regions_needed;
557
558         spin_unlock(&resv->lock);
559         return add;
560 }
561
562 /*
563  * Examine the existing reserve map and determine how many
564  * huge pages in the specified range [f, t) are NOT currently
565  * represented.  This routine is called before a subsequent
566  * call to region_add that will actually modify the reserve
567  * map to add the specified range [f, t).  region_chg does
568  * not change the number of huge pages represented by the
569  * map.  A number of new file_region structures is added to the cache as a
570  * placeholder, for the subsequent region_add call to use. At least 1
571  * file_region structure is added.
572  *
573  * out_regions_needed is the number of regions added to the
574  * resv->adds_in_progress.  This value needs to be provided to a follow up call
575  * to region_add or region_abort for proper accounting.
576  *
577  * Returns the number of huge pages that need to be added to the existing
578  * reservation map for the range [f, t).  This number is greater or equal to
579  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
580  * is needed and can not be allocated.
581  */
582 static long region_chg(struct resv_map *resv, long f, long t,
583                        long *out_regions_needed)
584 {
585         long chg = 0;
586
587         spin_lock(&resv->lock);
588
589         /* Count how many hugepages in this range are NOT represented. */
590         chg = add_reservation_in_range(resv, f, t, NULL, NULL,
591                                        out_regions_needed);
592
593         if (*out_regions_needed == 0)
594                 *out_regions_needed = 1;
595
596         if (allocate_file_region_entries(resv, *out_regions_needed))
597                 return -ENOMEM;
598
599         resv->adds_in_progress += *out_regions_needed;
600
601         spin_unlock(&resv->lock);
602         return chg;
603 }
604
605 /*
606  * Abort the in progress add operation.  The adds_in_progress field
607  * of the resv_map keeps track of the operations in progress between
608  * calls to region_chg and region_add.  Operations are sometimes
609  * aborted after the call to region_chg.  In such cases, region_abort
610  * is called to decrement the adds_in_progress counter. regions_needed
611  * is the value returned by the region_chg call, it is used to decrement
612  * the adds_in_progress counter.
613  *
614  * NOTE: The range arguments [f, t) are not needed or used in this
615  * routine.  They are kept to make reading the calling code easier as
616  * arguments will match the associated region_chg call.
617  */
618 static void region_abort(struct resv_map *resv, long f, long t,
619                          long regions_needed)
620 {
621         spin_lock(&resv->lock);
622         VM_BUG_ON(!resv->region_cache_count);
623         resv->adds_in_progress -= regions_needed;
624         spin_unlock(&resv->lock);
625 }
626
627 /*
628  * Delete the specified range [f, t) from the reserve map.  If the
629  * t parameter is LONG_MAX, this indicates that ALL regions after f
630  * should be deleted.  Locate the regions which intersect [f, t)
631  * and either trim, delete or split the existing regions.
632  *
633  * Returns the number of huge pages deleted from the reserve map.
634  * In the normal case, the return value is zero or more.  In the
635  * case where a region must be split, a new region descriptor must
636  * be allocated.  If the allocation fails, -ENOMEM will be returned.
637  * NOTE: If the parameter t == LONG_MAX, then we will never split
638  * a region and possibly return -ENOMEM.  Callers specifying
639  * t == LONG_MAX do not need to check for -ENOMEM error.
640  */
641 static long region_del(struct resv_map *resv, long f, long t)
642 {
643         struct list_head *head = &resv->regions;
644         struct file_region *rg, *trg;
645         struct file_region *nrg = NULL;
646         long del = 0;
647
648 retry:
649         spin_lock(&resv->lock);
650         list_for_each_entry_safe(rg, trg, head, link) {
651                 /*
652                  * Skip regions before the range to be deleted.  file_region
653                  * ranges are normally of the form [from, to).  However, there
654                  * may be a "placeholder" entry in the map which is of the form
655                  * (from, to) with from == to.  Check for placeholder entries
656                  * at the beginning of the range to be deleted.
657                  */
658                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
659                         continue;
660
661                 if (rg->from >= t)
662                         break;
663
664                 if (f > rg->from && t < rg->to) { /* Must split region */
665                         /*
666                          * Check for an entry in the cache before dropping
667                          * lock and attempting allocation.
668                          */
669                         if (!nrg &&
670                             resv->region_cache_count > resv->adds_in_progress) {
671                                 nrg = list_first_entry(&resv->region_cache,
672                                                         struct file_region,
673                                                         link);
674                                 list_del(&nrg->link);
675                                 resv->region_cache_count--;
676                         }
677
678                         if (!nrg) {
679                                 spin_unlock(&resv->lock);
680                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
681                                 if (!nrg)
682                                         return -ENOMEM;
683                                 goto retry;
684                         }
685
686                         del += t - f;
687                         hugetlb_cgroup_uncharge_file_region(
688                                 resv, rg, t - f, false);
689
690                         /* New entry for end of split region */
691                         nrg->from = t;
692                         nrg->to = rg->to;
693
694                         copy_hugetlb_cgroup_uncharge_info(nrg, rg);
695
696                         INIT_LIST_HEAD(&nrg->link);
697
698                         /* Original entry is trimmed */
699                         rg->to = f;
700
701                         list_add(&nrg->link, &rg->link);
702                         nrg = NULL;
703                         break;
704                 }
705
706                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
707                         del += rg->to - rg->from;
708                         hugetlb_cgroup_uncharge_file_region(resv, rg,
709                                                             rg->to - rg->from, true);
710                         list_del(&rg->link);
711                         kfree(rg);
712                         continue;
713                 }
714
715                 if (f <= rg->from) {    /* Trim beginning of region */
716                         hugetlb_cgroup_uncharge_file_region(resv, rg,
717                                                             t - rg->from, false);
718
719                         del += t - rg->from;
720                         rg->from = t;
721                 } else {                /* Trim end of region */
722                         hugetlb_cgroup_uncharge_file_region(resv, rg,
723                                                             rg->to - f, false);
724
725                         del += rg->to - f;
726                         rg->to = f;
727                 }
728         }
729
730         spin_unlock(&resv->lock);
731         kfree(nrg);
732         return del;
733 }
734
735 /*
736  * A rare out of memory error was encountered which prevented removal of
737  * the reserve map region for a page.  The huge page itself was free'ed
738  * and removed from the page cache.  This routine will adjust the subpool
739  * usage count, and the global reserve count if needed.  By incrementing
740  * these counts, the reserve map entry which could not be deleted will
741  * appear as a "reserved" entry instead of simply dangling with incorrect
742  * counts.
743  */
744 void hugetlb_fix_reserve_counts(struct inode *inode)
745 {
746         struct hugepage_subpool *spool = subpool_inode(inode);
747         long rsv_adjust;
748         bool reserved = false;
749
750         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
751         if (rsv_adjust > 0) {
752                 struct hstate *h = hstate_inode(inode);
753
754                 if (!hugetlb_acct_memory(h, 1))
755                         reserved = true;
756         } else if (!rsv_adjust) {
757                 reserved = true;
758         }
759
760         if (!reserved)
761                 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
762 }
763
764 /*
765  * Count and return the number of huge pages in the reserve map
766  * that intersect with the range [f, t).
767  */
768 static long region_count(struct resv_map *resv, long f, long t)
769 {
770         struct list_head *head = &resv->regions;
771         struct file_region *rg;
772         long chg = 0;
773
774         spin_lock(&resv->lock);
775         /* Locate each segment we overlap with, and count that overlap. */
776         list_for_each_entry(rg, head, link) {
777                 long seg_from;
778                 long seg_to;
779
780                 if (rg->to <= f)
781                         continue;
782                 if (rg->from >= t)
783                         break;
784
785                 seg_from = max(rg->from, f);
786                 seg_to = min(rg->to, t);
787
788                 chg += seg_to - seg_from;
789         }
790         spin_unlock(&resv->lock);
791
792         return chg;
793 }
794
795 /*
796  * Convert the address within this vma to the page offset within
797  * the mapping, in pagecache page units; huge pages here.
798  */
799 static pgoff_t vma_hugecache_offset(struct hstate *h,
800                         struct vm_area_struct *vma, unsigned long address)
801 {
802         return ((address - vma->vm_start) >> huge_page_shift(h)) +
803                         (vma->vm_pgoff >> huge_page_order(h));
804 }
805
806 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
807                                      unsigned long address)
808 {
809         return vma_hugecache_offset(hstate_vma(vma), vma, address);
810 }
811 EXPORT_SYMBOL_GPL(linear_hugepage_index);
812
813 /*
814  * Return the size of the pages allocated when backing a VMA. In the majority
815  * cases this will be same size as used by the page table entries.
816  */
817 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
818 {
819         if (vma->vm_ops && vma->vm_ops->pagesize)
820                 return vma->vm_ops->pagesize(vma);
821         return PAGE_SIZE;
822 }
823 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
824
825 /*
826  * Return the page size being used by the MMU to back a VMA. In the majority
827  * of cases, the page size used by the kernel matches the MMU size. On
828  * architectures where it differs, an architecture-specific 'strong'
829  * version of this symbol is required.
830  */
831 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
832 {
833         return vma_kernel_pagesize(vma);
834 }
835
836 /*
837  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
838  * bits of the reservation map pointer, which are always clear due to
839  * alignment.
840  */
841 #define HPAGE_RESV_OWNER    (1UL << 0)
842 #define HPAGE_RESV_UNMAPPED (1UL << 1)
843 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
844
845 /*
846  * These helpers are used to track how many pages are reserved for
847  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
848  * is guaranteed to have their future faults succeed.
849  *
850  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
851  * the reserve counters are updated with the hugetlb_lock held. It is safe
852  * to reset the VMA at fork() time as it is not in use yet and there is no
853  * chance of the global counters getting corrupted as a result of the values.
854  *
855  * The private mapping reservation is represented in a subtly different
856  * manner to a shared mapping.  A shared mapping has a region map associated
857  * with the underlying file, this region map represents the backing file
858  * pages which have ever had a reservation assigned which this persists even
859  * after the page is instantiated.  A private mapping has a region map
860  * associated with the original mmap which is attached to all VMAs which
861  * reference it, this region map represents those offsets which have consumed
862  * reservation ie. where pages have been instantiated.
863  */
864 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
865 {
866         return (unsigned long)vma->vm_private_data;
867 }
868
869 static void set_vma_private_data(struct vm_area_struct *vma,
870                                                         unsigned long value)
871 {
872         vma->vm_private_data = (void *)value;
873 }
874
875 static void
876 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
877                                           struct hugetlb_cgroup *h_cg,
878                                           struct hstate *h)
879 {
880 #ifdef CONFIG_CGROUP_HUGETLB
881         if (!h_cg || !h) {
882                 resv_map->reservation_counter = NULL;
883                 resv_map->pages_per_hpage = 0;
884                 resv_map->css = NULL;
885         } else {
886                 resv_map->reservation_counter =
887                         &h_cg->rsvd_hugepage[hstate_index(h)];
888                 resv_map->pages_per_hpage = pages_per_huge_page(h);
889                 resv_map->css = &h_cg->css;
890         }
891 #endif
892 }
893
894 struct resv_map *resv_map_alloc(void)
895 {
896         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
897         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
898
899         if (!resv_map || !rg) {
900                 kfree(resv_map);
901                 kfree(rg);
902                 return NULL;
903         }
904
905         kref_init(&resv_map->refs);
906         spin_lock_init(&resv_map->lock);
907         INIT_LIST_HEAD(&resv_map->regions);
908
909         resv_map->adds_in_progress = 0;
910         /*
911          * Initialize these to 0. On shared mappings, 0's here indicate these
912          * fields don't do cgroup accounting. On private mappings, these will be
913          * re-initialized to the proper values, to indicate that hugetlb cgroup
914          * reservations are to be un-charged from here.
915          */
916         resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
917
918         INIT_LIST_HEAD(&resv_map->region_cache);
919         list_add(&rg->link, &resv_map->region_cache);
920         resv_map->region_cache_count = 1;
921
922         return resv_map;
923 }
924
925 void resv_map_release(struct kref *ref)
926 {
927         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
928         struct list_head *head = &resv_map->region_cache;
929         struct file_region *rg, *trg;
930
931         /* Clear out any active regions before we release the map. */
932         region_del(resv_map, 0, LONG_MAX);
933
934         /* ... and any entries left in the cache */
935         list_for_each_entry_safe(rg, trg, head, link) {
936                 list_del(&rg->link);
937                 kfree(rg);
938         }
939
940         VM_BUG_ON(resv_map->adds_in_progress);
941
942         kfree(resv_map);
943 }
944
945 static inline struct resv_map *inode_resv_map(struct inode *inode)
946 {
947         /*
948          * At inode evict time, i_mapping may not point to the original
949          * address space within the inode.  This original address space
950          * contains the pointer to the resv_map.  So, always use the
951          * address space embedded within the inode.
952          * The VERY common case is inode->mapping == &inode->i_data but,
953          * this may not be true for device special inodes.
954          */
955         return (struct resv_map *)(&inode->i_data)->private_data;
956 }
957
958 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
959 {
960         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
961         if (vma->vm_flags & VM_MAYSHARE) {
962                 struct address_space *mapping = vma->vm_file->f_mapping;
963                 struct inode *inode = mapping->host;
964
965                 return inode_resv_map(inode);
966
967         } else {
968                 return (struct resv_map *)(get_vma_private_data(vma) &
969                                                         ~HPAGE_RESV_MASK);
970         }
971 }
972
973 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
974 {
975         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
976         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
977
978         set_vma_private_data(vma, (get_vma_private_data(vma) &
979                                 HPAGE_RESV_MASK) | (unsigned long)map);
980 }
981
982 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
983 {
984         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
985         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
986
987         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
988 }
989
990 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
991 {
992         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
993
994         return (get_vma_private_data(vma) & flag) != 0;
995 }
996
997 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
998 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
999 {
1000         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1001         if (!(vma->vm_flags & VM_MAYSHARE))
1002                 vma->vm_private_data = (void *)0;
1003 }
1004
1005 /* Returns true if the VMA has associated reserve pages */
1006 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1007 {
1008         if (vma->vm_flags & VM_NORESERVE) {
1009                 /*
1010                  * This address is already reserved by other process(chg == 0),
1011                  * so, we should decrement reserved count. Without decrementing,
1012                  * reserve count remains after releasing inode, because this
1013                  * allocated page will go into page cache and is regarded as
1014                  * coming from reserved pool in releasing step.  Currently, we
1015                  * don't have any other solution to deal with this situation
1016                  * properly, so add work-around here.
1017                  */
1018                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1019                         return true;
1020                 else
1021                         return false;
1022         }
1023
1024         /* Shared mappings always use reserves */
1025         if (vma->vm_flags & VM_MAYSHARE) {
1026                 /*
1027                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
1028                  * be a region map for all pages.  The only situation where
1029                  * there is no region map is if a hole was punched via
1030                  * fallocate.  In this case, there really are no reserves to
1031                  * use.  This situation is indicated if chg != 0.
1032                  */
1033                 if (chg)
1034                         return false;
1035                 else
1036                         return true;
1037         }
1038
1039         /*
1040          * Only the process that called mmap() has reserves for
1041          * private mappings.
1042          */
1043         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1044                 /*
1045                  * Like the shared case above, a hole punch or truncate
1046                  * could have been performed on the private mapping.
1047                  * Examine the value of chg to determine if reserves
1048                  * actually exist or were previously consumed.
1049                  * Very Subtle - The value of chg comes from a previous
1050                  * call to vma_needs_reserves().  The reserve map for
1051                  * private mappings has different (opposite) semantics
1052                  * than that of shared mappings.  vma_needs_reserves()
1053                  * has already taken this difference in semantics into
1054                  * account.  Therefore, the meaning of chg is the same
1055                  * as in the shared case above.  Code could easily be
1056                  * combined, but keeping it separate draws attention to
1057                  * subtle differences.
1058                  */
1059                 if (chg)
1060                         return false;
1061                 else
1062                         return true;
1063         }
1064
1065         return false;
1066 }
1067
1068 static void enqueue_huge_page(struct hstate *h, struct page *page)
1069 {
1070         int nid = page_to_nid(page);
1071
1072         lockdep_assert_held(&hugetlb_lock);
1073         list_move(&page->lru, &h->hugepage_freelists[nid]);
1074         h->free_huge_pages++;
1075         h->free_huge_pages_node[nid]++;
1076         SetHPageFreed(page);
1077 }
1078
1079 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1080 {
1081         struct page *page;
1082         bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1083
1084         lockdep_assert_held(&hugetlb_lock);
1085         list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1086                 if (pin && !is_pinnable_page(page))
1087                         continue;
1088
1089                 if (PageHWPoison(page))
1090                         continue;
1091
1092                 list_move(&page->lru, &h->hugepage_activelist);
1093                 set_page_refcounted(page);
1094                 ClearHPageFreed(page);
1095                 h->free_huge_pages--;
1096                 h->free_huge_pages_node[nid]--;
1097                 return page;
1098         }
1099
1100         return NULL;
1101 }
1102
1103 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1104                 nodemask_t *nmask)
1105 {
1106         unsigned int cpuset_mems_cookie;
1107         struct zonelist *zonelist;
1108         struct zone *zone;
1109         struct zoneref *z;
1110         int node = NUMA_NO_NODE;
1111
1112         zonelist = node_zonelist(nid, gfp_mask);
1113
1114 retry_cpuset:
1115         cpuset_mems_cookie = read_mems_allowed_begin();
1116         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1117                 struct page *page;
1118
1119                 if (!cpuset_zone_allowed(zone, gfp_mask))
1120                         continue;
1121                 /*
1122                  * no need to ask again on the same node. Pool is node rather than
1123                  * zone aware
1124                  */
1125                 if (zone_to_nid(zone) == node)
1126                         continue;
1127                 node = zone_to_nid(zone);
1128
1129                 page = dequeue_huge_page_node_exact(h, node);
1130                 if (page)
1131                         return page;
1132         }
1133         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1134                 goto retry_cpuset;
1135
1136         return NULL;
1137 }
1138
1139 static struct page *dequeue_huge_page_vma(struct hstate *h,
1140                                 struct vm_area_struct *vma,
1141                                 unsigned long address, int avoid_reserve,
1142                                 long chg)
1143 {
1144         struct page *page;
1145         struct mempolicy *mpol;
1146         gfp_t gfp_mask;
1147         nodemask_t *nodemask;
1148         int nid;
1149
1150         /*
1151          * A child process with MAP_PRIVATE mappings created by their parent
1152          * have no page reserves. This check ensures that reservations are
1153          * not "stolen". The child may still get SIGKILLed
1154          */
1155         if (!vma_has_reserves(vma, chg) &&
1156                         h->free_huge_pages - h->resv_huge_pages == 0)
1157                 goto err;
1158
1159         /* If reserves cannot be used, ensure enough pages are in the pool */
1160         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
1161                 goto err;
1162
1163         gfp_mask = htlb_alloc_mask(h);
1164         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1165         page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1166         if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1167                 SetHPageRestoreReserve(page);
1168                 h->resv_huge_pages--;
1169         }
1170
1171         mpol_cond_put(mpol);
1172         return page;
1173
1174 err:
1175         return NULL;
1176 }
1177
1178 /*
1179  * common helper functions for hstate_next_node_to_{alloc|free}.
1180  * We may have allocated or freed a huge page based on a different
1181  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1182  * be outside of *nodes_allowed.  Ensure that we use an allowed
1183  * node for alloc or free.
1184  */
1185 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1186 {
1187         nid = next_node_in(nid, *nodes_allowed);
1188         VM_BUG_ON(nid >= MAX_NUMNODES);
1189
1190         return nid;
1191 }
1192
1193 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1194 {
1195         if (!node_isset(nid, *nodes_allowed))
1196                 nid = next_node_allowed(nid, nodes_allowed);
1197         return nid;
1198 }
1199
1200 /*
1201  * returns the previously saved node ["this node"] from which to
1202  * allocate a persistent huge page for the pool and advance the
1203  * next node from which to allocate, handling wrap at end of node
1204  * mask.
1205  */
1206 static int hstate_next_node_to_alloc(struct hstate *h,
1207                                         nodemask_t *nodes_allowed)
1208 {
1209         int nid;
1210
1211         VM_BUG_ON(!nodes_allowed);
1212
1213         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1214         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1215
1216         return nid;
1217 }
1218
1219 /*
1220  * helper for remove_pool_huge_page() - return the previously saved
1221  * node ["this node"] from which to free a huge page.  Advance the
1222  * next node id whether or not we find a free huge page to free so
1223  * that the next attempt to free addresses the next node.
1224  */
1225 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1226 {
1227         int nid;
1228
1229         VM_BUG_ON(!nodes_allowed);
1230
1231         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1232         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1233
1234         return nid;
1235 }
1236
1237 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1238         for (nr_nodes = nodes_weight(*mask);                            \
1239                 nr_nodes > 0 &&                                         \
1240                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1241                 nr_nodes--)
1242
1243 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1244         for (nr_nodes = nodes_weight(*mask);                            \
1245                 nr_nodes > 0 &&                                         \
1246                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1247                 nr_nodes--)
1248
1249 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1250 static void destroy_compound_gigantic_page(struct page *page,
1251                                         unsigned int order)
1252 {
1253         int i;
1254         int nr_pages = 1 << order;
1255         struct page *p = page + 1;
1256
1257         atomic_set(compound_mapcount_ptr(page), 0);
1258         atomic_set(compound_pincount_ptr(page), 0);
1259
1260         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1261                 clear_compound_head(p);
1262                 set_page_refcounted(p);
1263         }
1264
1265         set_compound_order(page, 0);
1266         page[1].compound_nr = 0;
1267         __ClearPageHead(page);
1268 }
1269
1270 static void free_gigantic_page(struct page *page, unsigned int order)
1271 {
1272         /*
1273          * If the page isn't allocated using the cma allocator,
1274          * cma_release() returns false.
1275          */
1276 #ifdef CONFIG_CMA
1277         if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1278                 return;
1279 #endif
1280
1281         free_contig_range(page_to_pfn(page), 1 << order);
1282 }
1283
1284 #ifdef CONFIG_CONTIG_ALLOC
1285 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1286                 int nid, nodemask_t *nodemask)
1287 {
1288         unsigned long nr_pages = pages_per_huge_page(h);
1289         if (nid == NUMA_NO_NODE)
1290                 nid = numa_mem_id();
1291
1292 #ifdef CONFIG_CMA
1293         {
1294                 struct page *page;
1295                 int node;
1296
1297                 if (hugetlb_cma[nid]) {
1298                         page = cma_alloc(hugetlb_cma[nid], nr_pages,
1299                                         huge_page_order(h), true);
1300                         if (page)
1301                                 return page;
1302                 }
1303
1304                 if (!(gfp_mask & __GFP_THISNODE)) {
1305                         for_each_node_mask(node, *nodemask) {
1306                                 if (node == nid || !hugetlb_cma[node])
1307                                         continue;
1308
1309                                 page = cma_alloc(hugetlb_cma[node], nr_pages,
1310                                                 huge_page_order(h), true);
1311                                 if (page)
1312                                         return page;
1313                         }
1314                 }
1315         }
1316 #endif
1317
1318         return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1319 }
1320
1321 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1322 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1323 #else /* !CONFIG_CONTIG_ALLOC */
1324 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1325                                         int nid, nodemask_t *nodemask)
1326 {
1327         return NULL;
1328 }
1329 #endif /* CONFIG_CONTIG_ALLOC */
1330
1331 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1332 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1333                                         int nid, nodemask_t *nodemask)
1334 {
1335         return NULL;
1336 }
1337 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1338 static inline void destroy_compound_gigantic_page(struct page *page,
1339                                                 unsigned int order) { }
1340 #endif
1341
1342 /*
1343  * Remove hugetlb page from lists, and update dtor so that page appears
1344  * as just a compound page.  A reference is held on the page.
1345  *
1346  * Must be called with hugetlb lock held.
1347  */
1348 static void remove_hugetlb_page(struct hstate *h, struct page *page,
1349                                                         bool adjust_surplus)
1350 {
1351         int nid = page_to_nid(page);
1352
1353         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1354         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
1355
1356         lockdep_assert_held(&hugetlb_lock);
1357         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1358                 return;
1359
1360         list_del(&page->lru);
1361
1362         if (HPageFreed(page)) {
1363                 h->free_huge_pages--;
1364                 h->free_huge_pages_node[nid]--;
1365         }
1366         if (adjust_surplus) {
1367                 h->surplus_huge_pages--;
1368                 h->surplus_huge_pages_node[nid]--;
1369         }
1370
1371         set_page_refcounted(page);
1372         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1373
1374         h->nr_huge_pages--;
1375         h->nr_huge_pages_node[nid]--;
1376 }
1377
1378 static void update_and_free_page(struct hstate *h, struct page *page)
1379 {
1380         int i;
1381         struct page *subpage = page;
1382
1383         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1384                 return;
1385
1386         for (i = 0; i < pages_per_huge_page(h);
1387              i++, subpage = mem_map_next(subpage, page, i)) {
1388                 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1389                                 1 << PG_referenced | 1 << PG_dirty |
1390                                 1 << PG_active | 1 << PG_private |
1391                                 1 << PG_writeback);
1392         }
1393         if (hstate_is_gigantic(h)) {
1394                 destroy_compound_gigantic_page(page, huge_page_order(h));
1395                 free_gigantic_page(page, huge_page_order(h));
1396         } else {
1397                 __free_pages(page, huge_page_order(h));
1398         }
1399 }
1400
1401 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
1402 {
1403         struct page *page, *t_page;
1404
1405         list_for_each_entry_safe(page, t_page, list, lru) {
1406                 update_and_free_page(h, page);
1407                 cond_resched();
1408         }
1409 }
1410
1411 struct hstate *size_to_hstate(unsigned long size)
1412 {
1413         struct hstate *h;
1414
1415         for_each_hstate(h) {
1416                 if (huge_page_size(h) == size)
1417                         return h;
1418         }
1419         return NULL;
1420 }
1421
1422 void free_huge_page(struct page *page)
1423 {
1424         /*
1425          * Can't pass hstate in here because it is called from the
1426          * compound page destructor.
1427          */
1428         struct hstate *h = page_hstate(page);
1429         int nid = page_to_nid(page);
1430         struct hugepage_subpool *spool = hugetlb_page_subpool(page);
1431         bool restore_reserve;
1432         unsigned long flags;
1433
1434         VM_BUG_ON_PAGE(page_count(page), page);
1435         VM_BUG_ON_PAGE(page_mapcount(page), page);
1436
1437         hugetlb_set_page_subpool(page, NULL);
1438         page->mapping = NULL;
1439         restore_reserve = HPageRestoreReserve(page);
1440         ClearHPageRestoreReserve(page);
1441
1442         /*
1443          * If HPageRestoreReserve was set on page, page allocation consumed a
1444          * reservation.  If the page was associated with a subpool, there
1445          * would have been a page reserved in the subpool before allocation
1446          * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1447          * reservation, do not call hugepage_subpool_put_pages() as this will
1448          * remove the reserved page from the subpool.
1449          */
1450         if (!restore_reserve) {
1451                 /*
1452                  * A return code of zero implies that the subpool will be
1453                  * under its minimum size if the reservation is not restored
1454                  * after page is free.  Therefore, force restore_reserve
1455                  * operation.
1456                  */
1457                 if (hugepage_subpool_put_pages(spool, 1) == 0)
1458                         restore_reserve = true;
1459         }
1460
1461         spin_lock_irqsave(&hugetlb_lock, flags);
1462         ClearHPageMigratable(page);
1463         hugetlb_cgroup_uncharge_page(hstate_index(h),
1464                                      pages_per_huge_page(h), page);
1465         hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1466                                           pages_per_huge_page(h), page);
1467         if (restore_reserve)
1468                 h->resv_huge_pages++;
1469
1470         if (HPageTemporary(page)) {
1471                 remove_hugetlb_page(h, page, false);
1472                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1473                 update_and_free_page(h, page);
1474         } else if (h->surplus_huge_pages_node[nid]) {
1475                 /* remove the page from active list */
1476                 remove_hugetlb_page(h, page, true);
1477                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1478                 update_and_free_page(h, page);
1479         } else {
1480                 arch_clear_hugepage_flags(page);
1481                 enqueue_huge_page(h, page);
1482                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1483         }
1484 }
1485
1486 /*
1487  * Must be called with the hugetlb lock held
1488  */
1489 static void __prep_account_new_huge_page(struct hstate *h, int nid)
1490 {
1491         lockdep_assert_held(&hugetlb_lock);
1492         h->nr_huge_pages++;
1493         h->nr_huge_pages_node[nid]++;
1494 }
1495
1496 static void __prep_new_huge_page(struct page *page)
1497 {
1498         INIT_LIST_HEAD(&page->lru);
1499         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1500         hugetlb_set_page_subpool(page, NULL);
1501         set_hugetlb_cgroup(page, NULL);
1502         set_hugetlb_cgroup_rsvd(page, NULL);
1503 }
1504
1505 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1506 {
1507         __prep_new_huge_page(page);
1508         spin_lock_irq(&hugetlb_lock);
1509         __prep_account_new_huge_page(h, nid);
1510         spin_unlock_irq(&hugetlb_lock);
1511 }
1512
1513 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1514 {
1515         int i;
1516         int nr_pages = 1 << order;
1517         struct page *p = page + 1;
1518
1519         /* we rely on prep_new_huge_page to set the destructor */
1520         set_compound_order(page, order);
1521         __ClearPageReserved(page);
1522         __SetPageHead(page);
1523         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1524                 /*
1525                  * For gigantic hugepages allocated through bootmem at
1526                  * boot, it's safer to be consistent with the not-gigantic
1527                  * hugepages and clear the PG_reserved bit from all tail pages
1528                  * too.  Otherwise drivers using get_user_pages() to access tail
1529                  * pages may get the reference counting wrong if they see
1530                  * PG_reserved set on a tail page (despite the head page not
1531                  * having PG_reserved set).  Enforcing this consistency between
1532                  * head and tail pages allows drivers to optimize away a check
1533                  * on the head page when they need know if put_page() is needed
1534                  * after get_user_pages().
1535                  */
1536                 __ClearPageReserved(p);
1537                 set_page_count(p, 0);
1538                 set_compound_head(p, page);
1539         }
1540         atomic_set(compound_mapcount_ptr(page), -1);
1541         atomic_set(compound_pincount_ptr(page), 0);
1542 }
1543
1544 /*
1545  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1546  * transparent huge pages.  See the PageTransHuge() documentation for more
1547  * details.
1548  */
1549 int PageHuge(struct page *page)
1550 {
1551         if (!PageCompound(page))
1552                 return 0;
1553
1554         page = compound_head(page);
1555         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1556 }
1557 EXPORT_SYMBOL_GPL(PageHuge);
1558
1559 /*
1560  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1561  * normal or transparent huge pages.
1562  */
1563 int PageHeadHuge(struct page *page_head)
1564 {
1565         if (!PageHead(page_head))
1566                 return 0;
1567
1568         return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1569 }
1570
1571 /*
1572  * Find and lock address space (mapping) in write mode.
1573  *
1574  * Upon entry, the page is locked which means that page_mapping() is
1575  * stable.  Due to locking order, we can only trylock_write.  If we can
1576  * not get the lock, simply return NULL to caller.
1577  */
1578 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1579 {
1580         struct address_space *mapping = page_mapping(hpage);
1581
1582         if (!mapping)
1583                 return mapping;
1584
1585         if (i_mmap_trylock_write(mapping))
1586                 return mapping;
1587
1588         return NULL;
1589 }
1590
1591 pgoff_t __basepage_index(struct page *page)
1592 {
1593         struct page *page_head = compound_head(page);
1594         pgoff_t index = page_index(page_head);
1595         unsigned long compound_idx;
1596
1597         if (!PageHuge(page_head))
1598                 return page_index(page);
1599
1600         if (compound_order(page_head) >= MAX_ORDER)
1601                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1602         else
1603                 compound_idx = page - page_head;
1604
1605         return (index << compound_order(page_head)) + compound_idx;
1606 }
1607
1608 static struct page *alloc_buddy_huge_page(struct hstate *h,
1609                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1610                 nodemask_t *node_alloc_noretry)
1611 {
1612         int order = huge_page_order(h);
1613         struct page *page;
1614         bool alloc_try_hard = true;
1615
1616         /*
1617          * By default we always try hard to allocate the page with
1618          * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1619          * a loop (to adjust global huge page counts) and previous allocation
1620          * failed, do not continue to try hard on the same node.  Use the
1621          * node_alloc_noretry bitmap to manage this state information.
1622          */
1623         if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1624                 alloc_try_hard = false;
1625         gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1626         if (alloc_try_hard)
1627                 gfp_mask |= __GFP_RETRY_MAYFAIL;
1628         if (nid == NUMA_NO_NODE)
1629                 nid = numa_mem_id();
1630         page = __alloc_pages(gfp_mask, order, nid, nmask);
1631         if (page)
1632                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1633         else
1634                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1635
1636         /*
1637          * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1638          * indicates an overall state change.  Clear bit so that we resume
1639          * normal 'try hard' allocations.
1640          */
1641         if (node_alloc_noretry && page && !alloc_try_hard)
1642                 node_clear(nid, *node_alloc_noretry);
1643
1644         /*
1645          * If we tried hard to get a page but failed, set bit so that
1646          * subsequent attempts will not try as hard until there is an
1647          * overall state change.
1648          */
1649         if (node_alloc_noretry && !page && alloc_try_hard)
1650                 node_set(nid, *node_alloc_noretry);
1651
1652         return page;
1653 }
1654
1655 /*
1656  * Common helper to allocate a fresh hugetlb page. All specific allocators
1657  * should use this function to get new hugetlb pages
1658  */
1659 static struct page *alloc_fresh_huge_page(struct hstate *h,
1660                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1661                 nodemask_t *node_alloc_noretry)
1662 {
1663         struct page *page;
1664
1665         if (hstate_is_gigantic(h))
1666                 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1667         else
1668                 page = alloc_buddy_huge_page(h, gfp_mask,
1669                                 nid, nmask, node_alloc_noretry);
1670         if (!page)
1671                 return NULL;
1672
1673         if (hstate_is_gigantic(h))
1674                 prep_compound_gigantic_page(page, huge_page_order(h));
1675         prep_new_huge_page(h, page, page_to_nid(page));
1676
1677         return page;
1678 }
1679
1680 /*
1681  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1682  * manner.
1683  */
1684 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1685                                 nodemask_t *node_alloc_noretry)
1686 {
1687         struct page *page;
1688         int nr_nodes, node;
1689         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1690
1691         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1692                 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1693                                                 node_alloc_noretry);
1694                 if (page)
1695                         break;
1696         }
1697
1698         if (!page)
1699                 return 0;
1700
1701         put_page(page); /* free it into the hugepage allocator */
1702
1703         return 1;
1704 }
1705
1706 /*
1707  * Remove huge page from pool from next node to free.  Attempt to keep
1708  * persistent huge pages more or less balanced over allowed nodes.
1709  * This routine only 'removes' the hugetlb page.  The caller must make
1710  * an additional call to free the page to low level allocators.
1711  * Called with hugetlb_lock locked.
1712  */
1713 static struct page *remove_pool_huge_page(struct hstate *h,
1714                                                 nodemask_t *nodes_allowed,
1715                                                  bool acct_surplus)
1716 {
1717         int nr_nodes, node;
1718         struct page *page = NULL;
1719
1720         lockdep_assert_held(&hugetlb_lock);
1721         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1722                 /*
1723                  * If we're returning unused surplus pages, only examine
1724                  * nodes with surplus pages.
1725                  */
1726                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1727                     !list_empty(&h->hugepage_freelists[node])) {
1728                         page = list_entry(h->hugepage_freelists[node].next,
1729                                           struct page, lru);
1730                         remove_hugetlb_page(h, page, acct_surplus);
1731                         break;
1732                 }
1733         }
1734
1735         return page;
1736 }
1737
1738 /*
1739  * Dissolve a given free hugepage into free buddy pages. This function does
1740  * nothing for in-use hugepages and non-hugepages.
1741  * This function returns values like below:
1742  *
1743  *  -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1744  *          (allocated or reserved.)
1745  *       0: successfully dissolved free hugepages or the page is not a
1746  *          hugepage (considered as already dissolved)
1747  */
1748 int dissolve_free_huge_page(struct page *page)
1749 {
1750         int rc = -EBUSY;
1751
1752 retry:
1753         /* Not to disrupt normal path by vainly holding hugetlb_lock */
1754         if (!PageHuge(page))
1755                 return 0;
1756
1757         spin_lock_irq(&hugetlb_lock);
1758         if (!PageHuge(page)) {
1759                 rc = 0;
1760                 goto out;
1761         }
1762
1763         if (!page_count(page)) {
1764                 struct page *head = compound_head(page);
1765                 struct hstate *h = page_hstate(head);
1766                 if (h->free_huge_pages - h->resv_huge_pages == 0)
1767                         goto out;
1768
1769                 /*
1770                  * We should make sure that the page is already on the free list
1771                  * when it is dissolved.
1772                  */
1773                 if (unlikely(!HPageFreed(head))) {
1774                         spin_unlock_irq(&hugetlb_lock);
1775                         cond_resched();
1776
1777                         /*
1778                          * Theoretically, we should return -EBUSY when we
1779                          * encounter this race. In fact, we have a chance
1780                          * to successfully dissolve the page if we do a
1781                          * retry. Because the race window is quite small.
1782                          * If we seize this opportunity, it is an optimization
1783                          * for increasing the success rate of dissolving page.
1784                          */
1785                         goto retry;
1786                 }
1787
1788                 /*
1789                  * Move PageHWPoison flag from head page to the raw error page,
1790                  * which makes any subpages rather than the error page reusable.
1791                  */
1792                 if (PageHWPoison(head) && page != head) {
1793                         SetPageHWPoison(page);
1794                         ClearPageHWPoison(head);
1795                 }
1796                 remove_hugetlb_page(h, head, false);
1797                 h->max_huge_pages--;
1798                 spin_unlock_irq(&hugetlb_lock);
1799                 update_and_free_page(h, head);
1800                 return 0;
1801         }
1802 out:
1803         spin_unlock_irq(&hugetlb_lock);
1804         return rc;
1805 }
1806
1807 /*
1808  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1809  * make specified memory blocks removable from the system.
1810  * Note that this will dissolve a free gigantic hugepage completely, if any
1811  * part of it lies within the given range.
1812  * Also note that if dissolve_free_huge_page() returns with an error, all
1813  * free hugepages that were dissolved before that error are lost.
1814  */
1815 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1816 {
1817         unsigned long pfn;
1818         struct page *page;
1819         int rc = 0;
1820
1821         if (!hugepages_supported())
1822                 return rc;
1823
1824         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1825                 page = pfn_to_page(pfn);
1826                 rc = dissolve_free_huge_page(page);
1827                 if (rc)
1828                         break;
1829         }
1830
1831         return rc;
1832 }
1833
1834 /*
1835  * Allocates a fresh surplus page from the page allocator.
1836  */
1837 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1838                 int nid, nodemask_t *nmask)
1839 {
1840         struct page *page = NULL;
1841
1842         if (hstate_is_gigantic(h))
1843                 return NULL;
1844
1845         spin_lock_irq(&hugetlb_lock);
1846         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1847                 goto out_unlock;
1848         spin_unlock_irq(&hugetlb_lock);
1849
1850         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1851         if (!page)
1852                 return NULL;
1853
1854         spin_lock_irq(&hugetlb_lock);
1855         /*
1856          * We could have raced with the pool size change.
1857          * Double check that and simply deallocate the new page
1858          * if we would end up overcommiting the surpluses. Abuse
1859          * temporary page to workaround the nasty free_huge_page
1860          * codeflow
1861          */
1862         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1863                 SetHPageTemporary(page);
1864                 spin_unlock_irq(&hugetlb_lock);
1865                 put_page(page);
1866                 return NULL;
1867         } else {
1868                 h->surplus_huge_pages++;
1869                 h->surplus_huge_pages_node[page_to_nid(page)]++;
1870         }
1871
1872 out_unlock:
1873         spin_unlock_irq(&hugetlb_lock);
1874
1875         return page;
1876 }
1877
1878 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1879                                      int nid, nodemask_t *nmask)
1880 {
1881         struct page *page;
1882
1883         if (hstate_is_gigantic(h))
1884                 return NULL;
1885
1886         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1887         if (!page)
1888                 return NULL;
1889
1890         /*
1891          * We do not account these pages as surplus because they are only
1892          * temporary and will be released properly on the last reference
1893          */
1894         SetHPageTemporary(page);
1895
1896         return page;
1897 }
1898
1899 /*
1900  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1901  */
1902 static
1903 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1904                 struct vm_area_struct *vma, unsigned long addr)
1905 {
1906         struct page *page;
1907         struct mempolicy *mpol;
1908         gfp_t gfp_mask = htlb_alloc_mask(h);
1909         int nid;
1910         nodemask_t *nodemask;
1911
1912         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1913         page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1914         mpol_cond_put(mpol);
1915
1916         return page;
1917 }
1918
1919 /* page migration callback function */
1920 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1921                 nodemask_t *nmask, gfp_t gfp_mask)
1922 {
1923         spin_lock_irq(&hugetlb_lock);
1924         if (h->free_huge_pages - h->resv_huge_pages > 0) {
1925                 struct page *page;
1926
1927                 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1928                 if (page) {
1929                         spin_unlock_irq(&hugetlb_lock);
1930                         return page;
1931                 }
1932         }
1933         spin_unlock_irq(&hugetlb_lock);
1934
1935         return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1936 }
1937
1938 /* mempolicy aware migration callback */
1939 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1940                 unsigned long address)
1941 {
1942         struct mempolicy *mpol;
1943         nodemask_t *nodemask;
1944         struct page *page;
1945         gfp_t gfp_mask;
1946         int node;
1947
1948         gfp_mask = htlb_alloc_mask(h);
1949         node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1950         page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
1951         mpol_cond_put(mpol);
1952
1953         return page;
1954 }
1955
1956 /*
1957  * Increase the hugetlb pool such that it can accommodate a reservation
1958  * of size 'delta'.
1959  */
1960 static int gather_surplus_pages(struct hstate *h, long delta)
1961         __must_hold(&hugetlb_lock)
1962 {
1963         struct list_head surplus_list;
1964         struct page *page, *tmp;
1965         int ret;
1966         long i;
1967         long needed, allocated;
1968         bool alloc_ok = true;
1969
1970         lockdep_assert_held(&hugetlb_lock);
1971         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1972         if (needed <= 0) {
1973                 h->resv_huge_pages += delta;
1974                 return 0;
1975         }
1976
1977         allocated = 0;
1978         INIT_LIST_HEAD(&surplus_list);
1979
1980         ret = -ENOMEM;
1981 retry:
1982         spin_unlock_irq(&hugetlb_lock);
1983         for (i = 0; i < needed; i++) {
1984                 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1985                                 NUMA_NO_NODE, NULL);
1986                 if (!page) {
1987                         alloc_ok = false;
1988                         break;
1989                 }
1990                 list_add(&page->lru, &surplus_list);
1991                 cond_resched();
1992         }
1993         allocated += i;
1994
1995         /*
1996          * After retaking hugetlb_lock, we need to recalculate 'needed'
1997          * because either resv_huge_pages or free_huge_pages may have changed.
1998          */
1999         spin_lock_irq(&hugetlb_lock);
2000         needed = (h->resv_huge_pages + delta) -
2001                         (h->free_huge_pages + allocated);
2002         if (needed > 0) {
2003                 if (alloc_ok)
2004                         goto retry;
2005                 /*
2006                  * We were not able to allocate enough pages to
2007                  * satisfy the entire reservation so we free what
2008                  * we've allocated so far.
2009                  */
2010                 goto free;
2011         }
2012         /*
2013          * The surplus_list now contains _at_least_ the number of extra pages
2014          * needed to accommodate the reservation.  Add the appropriate number
2015          * of pages to the hugetlb pool and free the extras back to the buddy
2016          * allocator.  Commit the entire reservation here to prevent another
2017          * process from stealing the pages as they are added to the pool but
2018          * before they are reserved.
2019          */
2020         needed += allocated;
2021         h->resv_huge_pages += delta;
2022         ret = 0;
2023
2024         /* Free the needed pages to the hugetlb pool */
2025         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2026                 int zeroed;
2027
2028                 if ((--needed) < 0)
2029                         break;
2030                 /*
2031                  * This page is now managed by the hugetlb allocator and has
2032                  * no users -- drop the buddy allocator's reference.
2033                  */
2034                 zeroed = put_page_testzero(page);
2035                 VM_BUG_ON_PAGE(!zeroed, page);
2036                 enqueue_huge_page(h, page);
2037         }
2038 free:
2039         spin_unlock_irq(&hugetlb_lock);
2040
2041         /* Free unnecessary surplus pages to the buddy allocator */
2042         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2043                 put_page(page);
2044         spin_lock_irq(&hugetlb_lock);
2045
2046         return ret;
2047 }
2048
2049 /*
2050  * This routine has two main purposes:
2051  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2052  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2053  *    to the associated reservation map.
2054  * 2) Free any unused surplus pages that may have been allocated to satisfy
2055  *    the reservation.  As many as unused_resv_pages may be freed.
2056  */
2057 static void return_unused_surplus_pages(struct hstate *h,
2058                                         unsigned long unused_resv_pages)
2059 {
2060         unsigned long nr_pages;
2061         struct page *page;
2062         LIST_HEAD(page_list);
2063
2064         lockdep_assert_held(&hugetlb_lock);
2065         /* Uncommit the reservation */
2066         h->resv_huge_pages -= unused_resv_pages;
2067
2068         /* Cannot return gigantic pages currently */
2069         if (hstate_is_gigantic(h))
2070                 goto out;
2071
2072         /*
2073          * Part (or even all) of the reservation could have been backed
2074          * by pre-allocated pages. Only free surplus pages.
2075          */
2076         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2077
2078         /*
2079          * We want to release as many surplus pages as possible, spread
2080          * evenly across all nodes with memory. Iterate across these nodes
2081          * until we can no longer free unreserved surplus pages. This occurs
2082          * when the nodes with surplus pages have no free pages.
2083          * remove_pool_huge_page() will balance the freed pages across the
2084          * on-line nodes with memory and will handle the hstate accounting.
2085          */
2086         while (nr_pages--) {
2087                 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
2088                 if (!page)
2089                         goto out;
2090
2091                 list_add(&page->lru, &page_list);
2092         }
2093
2094 out:
2095         spin_unlock_irq(&hugetlb_lock);
2096         update_and_free_pages_bulk(h, &page_list);
2097         spin_lock_irq(&hugetlb_lock);
2098 }
2099
2100
2101 /*
2102  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2103  * are used by the huge page allocation routines to manage reservations.
2104  *
2105  * vma_needs_reservation is called to determine if the huge page at addr
2106  * within the vma has an associated reservation.  If a reservation is
2107  * needed, the value 1 is returned.  The caller is then responsible for
2108  * managing the global reservation and subpool usage counts.  After
2109  * the huge page has been allocated, vma_commit_reservation is called
2110  * to add the page to the reservation map.  If the page allocation fails,
2111  * the reservation must be ended instead of committed.  vma_end_reservation
2112  * is called in such cases.
2113  *
2114  * In the normal case, vma_commit_reservation returns the same value
2115  * as the preceding vma_needs_reservation call.  The only time this
2116  * is not the case is if a reserve map was changed between calls.  It
2117  * is the responsibility of the caller to notice the difference and
2118  * take appropriate action.
2119  *
2120  * vma_add_reservation is used in error paths where a reservation must
2121  * be restored when a newly allocated huge page must be freed.  It is
2122  * to be called after calling vma_needs_reservation to determine if a
2123  * reservation exists.
2124  *
2125  * vma_del_reservation is used in error paths where an entry in the reserve
2126  * map was created during huge page allocation and must be removed.  It is to
2127  * be called after calling vma_needs_reservation to determine if a reservation
2128  * exists.
2129  */
2130 enum vma_resv_mode {
2131         VMA_NEEDS_RESV,
2132         VMA_COMMIT_RESV,
2133         VMA_END_RESV,
2134         VMA_ADD_RESV,
2135         VMA_DEL_RESV,
2136 };
2137 static long __vma_reservation_common(struct hstate *h,
2138                                 struct vm_area_struct *vma, unsigned long addr,
2139                                 enum vma_resv_mode mode)
2140 {
2141         struct resv_map *resv;
2142         pgoff_t idx;
2143         long ret;
2144         long dummy_out_regions_needed;
2145
2146         resv = vma_resv_map(vma);
2147         if (!resv)
2148                 return 1;
2149
2150         idx = vma_hugecache_offset(h, vma, addr);
2151         switch (mode) {
2152         case VMA_NEEDS_RESV:
2153                 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2154                 /* We assume that vma_reservation_* routines always operate on
2155                  * 1 page, and that adding to resv map a 1 page entry can only
2156                  * ever require 1 region.
2157                  */
2158                 VM_BUG_ON(dummy_out_regions_needed != 1);
2159                 break;
2160         case VMA_COMMIT_RESV:
2161                 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2162                 /* region_add calls of range 1 should never fail. */
2163                 VM_BUG_ON(ret < 0);
2164                 break;
2165         case VMA_END_RESV:
2166                 region_abort(resv, idx, idx + 1, 1);
2167                 ret = 0;
2168                 break;
2169         case VMA_ADD_RESV:
2170                 if (vma->vm_flags & VM_MAYSHARE) {
2171                         ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2172                         /* region_add calls of range 1 should never fail. */
2173                         VM_BUG_ON(ret < 0);
2174                 } else {
2175                         region_abort(resv, idx, idx + 1, 1);
2176                         ret = region_del(resv, idx, idx + 1);
2177                 }
2178                 break;
2179         case VMA_DEL_RESV:
2180                 if (vma->vm_flags & VM_MAYSHARE) {
2181                         region_abort(resv, idx, idx + 1, 1);
2182                         ret = region_del(resv, idx, idx + 1);
2183                 } else {
2184                         ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2185                         /* region_add calls of range 1 should never fail. */
2186                         VM_BUG_ON(ret < 0);
2187                 }
2188                 break;
2189         default:
2190                 BUG();
2191         }
2192
2193         if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2194                 return ret;
2195         /*
2196          * We know private mapping must have HPAGE_RESV_OWNER set.
2197          *
2198          * In most cases, reserves always exist for private mappings.
2199          * However, a file associated with mapping could have been
2200          * hole punched or truncated after reserves were consumed.
2201          * As subsequent fault on such a range will not use reserves.
2202          * Subtle - The reserve map for private mappings has the
2203          * opposite meaning than that of shared mappings.  If NO
2204          * entry is in the reserve map, it means a reservation exists.
2205          * If an entry exists in the reserve map, it means the
2206          * reservation has already been consumed.  As a result, the
2207          * return value of this routine is the opposite of the
2208          * value returned from reserve map manipulation routines above.
2209          */
2210         if (ret > 0)
2211                 return 0;
2212         if (ret == 0)
2213                 return 1;
2214         return ret;
2215 }
2216
2217 static long vma_needs_reservation(struct hstate *h,
2218                         struct vm_area_struct *vma, unsigned long addr)
2219 {
2220         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2221 }
2222
2223 static long vma_commit_reservation(struct hstate *h,
2224                         struct vm_area_struct *vma, unsigned long addr)
2225 {
2226         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2227 }
2228
2229 static void vma_end_reservation(struct hstate *h,
2230                         struct vm_area_struct *vma, unsigned long addr)
2231 {
2232         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2233 }
2234
2235 static long vma_add_reservation(struct hstate *h,
2236                         struct vm_area_struct *vma, unsigned long addr)
2237 {
2238         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2239 }
2240
2241 static long vma_del_reservation(struct hstate *h,
2242                         struct vm_area_struct *vma, unsigned long addr)
2243 {
2244         return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2245 }
2246
2247 /*
2248  * This routine is called to restore reservation information on error paths.
2249  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2250  * the hugetlb mutex should remain held when calling this routine.
2251  *
2252  * It handles two specific cases:
2253  * 1) A reservation was in place and the page consumed the reservation.
2254  *    HPageRestoreReserve is set in the page.
2255  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2256  *    not set.  However, alloc_huge_page always updates the reserve map.
2257  *
2258  * In case 1, free_huge_page later in the error path will increment the
2259  * global reserve count.  But, free_huge_page does not have enough context
2260  * to adjust the reservation map.  This case deals primarily with private
2261  * mappings.  Adjust the reserve map here to be consistent with global
2262  * reserve count adjustments to be made by free_huge_page.  Make sure the
2263  * reserve map indicates there is a reservation present.
2264  *
2265  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2266  */
2267 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2268                         unsigned long address, struct page *page)
2269 {
2270         long rc = vma_needs_reservation(h, vma, address);
2271
2272         if (HPageRestoreReserve(page)) {
2273                 if (unlikely(rc < 0))
2274                         /*
2275                          * Rare out of memory condition in reserve map
2276                          * manipulation.  Clear HPageRestoreReserve so that
2277                          * global reserve count will not be incremented
2278                          * by free_huge_page.  This will make it appear
2279                          * as though the reservation for this page was
2280                          * consumed.  This may prevent the task from
2281                          * faulting in the page at a later time.  This
2282                          * is better than inconsistent global huge page
2283                          * accounting of reserve counts.
2284                          */
2285                         ClearHPageRestoreReserve(page);
2286                 else if (rc)
2287                         (void)vma_add_reservation(h, vma, address);
2288                 else
2289                         vma_end_reservation(h, vma, address);
2290         } else {
2291                 if (!rc) {
2292                         /*
2293                          * This indicates there is an entry in the reserve map
2294                          * added by alloc_huge_page.  We know it was added
2295                          * before the alloc_huge_page call, otherwise
2296                          * HPageRestoreReserve would be set on the page.
2297                          * Remove the entry so that a subsequent allocation
2298                          * does not consume a reservation.
2299                          */
2300                         rc = vma_del_reservation(h, vma, address);
2301                         if (rc < 0)
2302                                 /*
2303                                  * VERY rare out of memory condition.  Since
2304                                  * we can not delete the entry, set
2305                                  * HPageRestoreReserve so that the reserve
2306                                  * count will be incremented when the page
2307                                  * is freed.  This reserve will be consumed
2308                                  * on a subsequent allocation.
2309                                  */
2310                                 SetHPageRestoreReserve(page);
2311                 } else if (rc < 0) {
2312                         /*
2313                          * Rare out of memory condition from
2314                          * vma_needs_reservation call.  Memory allocation is
2315                          * only attempted if a new entry is needed.  Therefore,
2316                          * this implies there is not an entry in the
2317                          * reserve map.
2318                          *
2319                          * For shared mappings, no entry in the map indicates
2320                          * no reservation.  We are done.
2321                          */
2322                         if (!(vma->vm_flags & VM_MAYSHARE))
2323                                 /*
2324                                  * For private mappings, no entry indicates
2325                                  * a reservation is present.  Since we can
2326                                  * not add an entry, set SetHPageRestoreReserve
2327                                  * on the page so reserve count will be
2328                                  * incremented when freed.  This reserve will
2329                                  * be consumed on a subsequent allocation.
2330                                  */
2331                                 SetHPageRestoreReserve(page);
2332                 } else
2333                         /*
2334                          * No reservation present, do nothing
2335                          */
2336                          vma_end_reservation(h, vma, address);
2337         }
2338 }
2339
2340 /*
2341  * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2342  * @h: struct hstate old page belongs to
2343  * @old_page: Old page to dissolve
2344  * @list: List to isolate the page in case we need to
2345  * Returns 0 on success, otherwise negated error.
2346  */
2347 static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2348                                         struct list_head *list)
2349 {
2350         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2351         int nid = page_to_nid(old_page);
2352         struct page *new_page;
2353         int ret = 0;
2354
2355         /*
2356          * Before dissolving the page, we need to allocate a new one for the
2357          * pool to remain stable. Using alloc_buddy_huge_page() allows us to
2358          * not having to deal with prep_new_huge_page() and avoids dealing of any
2359          * counters. This simplifies and let us do the whole thing under the
2360          * lock.
2361          */
2362         new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
2363         if (!new_page)
2364                 return -ENOMEM;
2365
2366 retry:
2367         spin_lock_irq(&hugetlb_lock);
2368         if (!PageHuge(old_page)) {
2369                 /*
2370                  * Freed from under us. Drop new_page too.
2371                  */
2372                 goto free_new;
2373         } else if (page_count(old_page)) {
2374                 /*
2375                  * Someone has grabbed the page, try to isolate it here.
2376                  * Fail with -EBUSY if not possible.
2377                  */
2378                 spin_unlock_irq(&hugetlb_lock);
2379                 if (!isolate_huge_page(old_page, list))
2380                         ret = -EBUSY;
2381                 spin_lock_irq(&hugetlb_lock);
2382                 goto free_new;
2383         } else if (!HPageFreed(old_page)) {
2384                 /*
2385                  * Page's refcount is 0 but it has not been enqueued in the
2386                  * freelist yet. Race window is small, so we can succeed here if
2387                  * we retry.
2388                  */
2389                 spin_unlock_irq(&hugetlb_lock);
2390                 cond_resched();
2391                 goto retry;
2392         } else {
2393                 /*
2394                  * Ok, old_page is still a genuine free hugepage. Remove it from
2395                  * the freelist and decrease the counters. These will be
2396                  * incremented again when calling __prep_account_new_huge_page()
2397                  * and enqueue_huge_page() for new_page. The counters will remain
2398                  * stable since this happens under the lock.
2399                  */
2400                 remove_hugetlb_page(h, old_page, false);
2401
2402                 /*
2403                  * new_page needs to be initialized with the standard hugetlb
2404                  * state. This is normally done by prep_new_huge_page() but
2405                  * that takes hugetlb_lock which is already held so we need to
2406                  * open code it here.
2407                  * Reference count trick is needed because allocator gives us
2408                  * referenced page but the pool requires pages with 0 refcount.
2409                  */
2410                 __prep_new_huge_page(new_page);
2411                 __prep_account_new_huge_page(h, nid);
2412                 page_ref_dec(new_page);
2413                 enqueue_huge_page(h, new_page);
2414
2415                 /*
2416                  * Pages have been replaced, we can safely free the old one.
2417                  */
2418                 spin_unlock_irq(&hugetlb_lock);
2419                 update_and_free_page(h, old_page);
2420         }
2421
2422         return ret;
2423
2424 free_new:
2425         spin_unlock_irq(&hugetlb_lock);
2426         __free_pages(new_page, huge_page_order(h));
2427
2428         return ret;
2429 }
2430
2431 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2432 {
2433         struct hstate *h;
2434         struct page *head;
2435         int ret = -EBUSY;
2436
2437         /*
2438          * The page might have been dissolved from under our feet, so make sure
2439          * to carefully check the state under the lock.
2440          * Return success when racing as if we dissolved the page ourselves.
2441          */
2442         spin_lock_irq(&hugetlb_lock);
2443         if (PageHuge(page)) {
2444                 head = compound_head(page);
2445                 h = page_hstate(head);
2446         } else {
2447                 spin_unlock_irq(&hugetlb_lock);
2448                 return 0;
2449         }
2450         spin_unlock_irq(&hugetlb_lock);
2451
2452         /*
2453          * Fence off gigantic pages as there is a cyclic dependency between
2454          * alloc_contig_range and them. Return -ENOMEM as this has the effect
2455          * of bailing out right away without further retrying.
2456          */
2457         if (hstate_is_gigantic(h))
2458                 return -ENOMEM;
2459
2460         if (page_count(head) && isolate_huge_page(head, list))
2461                 ret = 0;
2462         else if (!page_count(head))
2463                 ret = alloc_and_dissolve_huge_page(h, head, list);
2464
2465         return ret;
2466 }
2467
2468 struct page *alloc_huge_page(struct vm_area_struct *vma,
2469                                     unsigned long addr, int avoid_reserve)
2470 {
2471         struct hugepage_subpool *spool = subpool_vma(vma);
2472         struct hstate *h = hstate_vma(vma);
2473         struct page *page;
2474         long map_chg, map_commit;
2475         long gbl_chg;
2476         int ret, idx;
2477         struct hugetlb_cgroup *h_cg;
2478         bool deferred_reserve;
2479
2480         idx = hstate_index(h);
2481         /*
2482          * Examine the region/reserve map to determine if the process
2483          * has a reservation for the page to be allocated.  A return
2484          * code of zero indicates a reservation exists (no change).
2485          */
2486         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2487         if (map_chg < 0)
2488                 return ERR_PTR(-ENOMEM);
2489
2490         /*
2491          * Processes that did not create the mapping will have no
2492          * reserves as indicated by the region/reserve map. Check
2493          * that the allocation will not exceed the subpool limit.
2494          * Allocations for MAP_NORESERVE mappings also need to be
2495          * checked against any subpool limit.
2496          */
2497         if (map_chg || avoid_reserve) {
2498                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2499                 if (gbl_chg < 0) {
2500                         vma_end_reservation(h, vma, addr);
2501                         return ERR_PTR(-ENOSPC);
2502                 }
2503
2504                 /*
2505                  * Even though there was no reservation in the region/reserve
2506                  * map, there could be reservations associated with the
2507                  * subpool that can be used.  This would be indicated if the
2508                  * return value of hugepage_subpool_get_pages() is zero.
2509                  * However, if avoid_reserve is specified we still avoid even
2510                  * the subpool reservations.
2511                  */
2512                 if (avoid_reserve)
2513                         gbl_chg = 1;
2514         }
2515
2516         /* If this allocation is not consuming a reservation, charge it now.
2517          */
2518         deferred_reserve = map_chg || avoid_reserve;
2519         if (deferred_reserve) {
2520                 ret = hugetlb_cgroup_charge_cgroup_rsvd(
2521                         idx, pages_per_huge_page(h), &h_cg);
2522                 if (ret)
2523                         goto out_subpool_put;
2524         }
2525
2526         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2527         if (ret)
2528                 goto out_uncharge_cgroup_reservation;
2529
2530         spin_lock_irq(&hugetlb_lock);
2531         /*
2532          * glb_chg is passed to indicate whether or not a page must be taken
2533          * from the global free pool (global change).  gbl_chg == 0 indicates
2534          * a reservation exists for the allocation.
2535          */
2536         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2537         if (!page) {
2538                 spin_unlock_irq(&hugetlb_lock);
2539                 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2540                 if (!page)
2541                         goto out_uncharge_cgroup;
2542                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2543                         SetHPageRestoreReserve(page);
2544                         h->resv_huge_pages--;
2545                 }
2546                 spin_lock_irq(&hugetlb_lock);
2547                 list_add(&page->lru, &h->hugepage_activelist);
2548                 /* Fall through */
2549         }
2550         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2551         /* If allocation is not consuming a reservation, also store the
2552          * hugetlb_cgroup pointer on the page.
2553          */
2554         if (deferred_reserve) {
2555                 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2556                                                   h_cg, page);
2557         }
2558
2559         spin_unlock_irq(&hugetlb_lock);
2560
2561         hugetlb_set_page_subpool(page, spool);
2562
2563         map_commit = vma_commit_reservation(h, vma, addr);
2564         if (unlikely(map_chg > map_commit)) {
2565                 /*
2566                  * The page was added to the reservation map between
2567                  * vma_needs_reservation and vma_commit_reservation.
2568                  * This indicates a race with hugetlb_reserve_pages.
2569                  * Adjust for the subpool count incremented above AND
2570                  * in hugetlb_reserve_pages for the same page.  Also,
2571                  * the reservation count added in hugetlb_reserve_pages
2572                  * no longer applies.
2573                  */
2574                 long rsv_adjust;
2575
2576                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2577                 hugetlb_acct_memory(h, -rsv_adjust);
2578                 if (deferred_reserve)
2579                         hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
2580                                         pages_per_huge_page(h), page);
2581         }
2582         return page;
2583
2584 out_uncharge_cgroup:
2585         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2586 out_uncharge_cgroup_reservation:
2587         if (deferred_reserve)
2588                 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2589                                                     h_cg);
2590 out_subpool_put:
2591         if (map_chg || avoid_reserve)
2592                 hugepage_subpool_put_pages(spool, 1);
2593         vma_end_reservation(h, vma, addr);
2594         return ERR_PTR(-ENOSPC);
2595 }
2596
2597 int alloc_bootmem_huge_page(struct hstate *h)
2598         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2599 int __alloc_bootmem_huge_page(struct hstate *h)
2600 {
2601         struct huge_bootmem_page *m;
2602         int nr_nodes, node;
2603
2604         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2605                 void *addr;
2606
2607                 addr = memblock_alloc_try_nid_raw(
2608                                 huge_page_size(h), huge_page_size(h),
2609                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2610                 if (addr) {
2611                         /*
2612                          * Use the beginning of the huge page to store the
2613                          * huge_bootmem_page struct (until gather_bootmem
2614                          * puts them into the mem_map).
2615                          */
2616                         m = addr;
2617                         goto found;
2618                 }
2619         }
2620         return 0;
2621
2622 found:
2623         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2624         /* Put them into a private list first because mem_map is not up yet */
2625         INIT_LIST_HEAD(&m->list);
2626         list_add(&m->list, &huge_boot_pages);
2627         m->hstate = h;
2628         return 1;
2629 }
2630
2631 static void __init prep_compound_huge_page(struct page *page,
2632                 unsigned int order)
2633 {
2634         if (unlikely(order > (MAX_ORDER - 1)))
2635                 prep_compound_gigantic_page(page, order);
2636         else
2637                 prep_compound_page(page, order);
2638 }
2639
2640 /* Put bootmem huge pages into the standard lists after mem_map is up */
2641 static void __init gather_bootmem_prealloc(void)
2642 {
2643         struct huge_bootmem_page *m;
2644
2645         list_for_each_entry(m, &huge_boot_pages, list) {
2646                 struct page *page = virt_to_page(m);
2647                 struct hstate *h = m->hstate;
2648
2649                 WARN_ON(page_count(page) != 1);
2650                 prep_compound_huge_page(page, huge_page_order(h));
2651                 WARN_ON(PageReserved(page));
2652                 prep_new_huge_page(h, page, page_to_nid(page));
2653                 put_page(page); /* free it into the hugepage allocator */
2654
2655                 /*
2656                  * If we had gigantic hugepages allocated at boot time, we need
2657                  * to restore the 'stolen' pages to totalram_pages in order to
2658                  * fix confusing memory reports from free(1) and another
2659                  * side-effects, like CommitLimit going negative.
2660                  */
2661                 if (hstate_is_gigantic(h))
2662                         adjust_managed_page_count(page, pages_per_huge_page(h));
2663                 cond_resched();
2664         }
2665 }
2666
2667 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2668 {
2669         unsigned long i;
2670         nodemask_t *node_alloc_noretry;
2671
2672         if (!hstate_is_gigantic(h)) {
2673                 /*
2674                  * Bit mask controlling how hard we retry per-node allocations.
2675                  * Ignore errors as lower level routines can deal with
2676                  * node_alloc_noretry == NULL.  If this kmalloc fails at boot
2677                  * time, we are likely in bigger trouble.
2678                  */
2679                 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2680                                                 GFP_KERNEL);
2681         } else {
2682                 /* allocations done at boot time */
2683                 node_alloc_noretry = NULL;
2684         }
2685
2686         /* bit mask controlling how hard we retry per-node allocations */
2687         if (node_alloc_noretry)
2688                 nodes_clear(*node_alloc_noretry);
2689
2690         for (i = 0; i < h->max_huge_pages; ++i) {
2691                 if (hstate_is_gigantic(h)) {
2692                         if (hugetlb_cma_size) {
2693                                 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
2694                                 goto free;
2695                         }
2696                         if (!alloc_bootmem_huge_page(h))
2697                                 break;
2698                 } else if (!alloc_pool_huge_page(h,
2699                                          &node_states[N_MEMORY],
2700                                          node_alloc_noretry))
2701                         break;
2702                 cond_resched();
2703         }
2704         if (i < h->max_huge_pages) {
2705                 char buf[32];
2706
2707                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2708                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2709                         h->max_huge_pages, buf, i);
2710                 h->max_huge_pages = i;
2711         }
2712 free:
2713         kfree(node_alloc_noretry);
2714 }
2715
2716 static void __init hugetlb_init_hstates(void)
2717 {
2718         struct hstate *h;
2719
2720         for_each_hstate(h) {
2721                 if (minimum_order > huge_page_order(h))
2722                         minimum_order = huge_page_order(h);
2723
2724                 /* oversize hugepages were init'ed in early boot */
2725                 if (!hstate_is_gigantic(h))
2726                         hugetlb_hstate_alloc_pages(h);
2727         }
2728         VM_BUG_ON(minimum_order == UINT_MAX);
2729 }
2730
2731 static void __init report_hugepages(void)
2732 {
2733         struct hstate *h;
2734
2735         for_each_hstate(h) {
2736                 char buf[32];
2737
2738                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2739                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2740                         buf, h->free_huge_pages);
2741         }
2742 }
2743
2744 #ifdef CONFIG_HIGHMEM
2745 static void try_to_free_low(struct hstate *h, unsigned long count,
2746                                                 nodemask_t *nodes_allowed)
2747 {
2748         int i;
2749         LIST_HEAD(page_list);
2750
2751         lockdep_assert_held(&hugetlb_lock);
2752         if (hstate_is_gigantic(h))
2753                 return;
2754
2755         /*
2756          * Collect pages to be freed on a list, and free after dropping lock
2757          */
2758         for_each_node_mask(i, *nodes_allowed) {
2759                 struct page *page, *next;
2760                 struct list_head *freel = &h->hugepage_freelists[i];
2761                 list_for_each_entry_safe(page, next, freel, lru) {
2762                         if (count >= h->nr_huge_pages)
2763                                 goto out;
2764                         if (PageHighMem(page))
2765                                 continue;
2766                         remove_hugetlb_page(h, page, false);
2767                         list_add(&page->lru, &page_list);
2768                 }
2769         }
2770
2771 out:
2772         spin_unlock_irq(&hugetlb_lock);
2773         update_and_free_pages_bulk(h, &page_list);
2774         spin_lock_irq(&hugetlb_lock);
2775 }
2776 #else
2777 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2778                                                 nodemask_t *nodes_allowed)
2779 {
2780 }
2781 #endif
2782
2783 /*
2784  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2785  * balanced by operating on them in a round-robin fashion.
2786  * Returns 1 if an adjustment was made.
2787  */
2788 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2789                                 int delta)
2790 {
2791         int nr_nodes, node;
2792
2793         lockdep_assert_held(&hugetlb_lock);
2794         VM_BUG_ON(delta != -1 && delta != 1);
2795
2796         if (delta < 0) {
2797                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2798                         if (h->surplus_huge_pages_node[node])
2799                                 goto found;
2800                 }
2801         } else {
2802                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2803                         if (h->surplus_huge_pages_node[node] <
2804                                         h->nr_huge_pages_node[node])
2805                                 goto found;
2806                 }
2807         }
2808         return 0;
2809
2810 found:
2811         h->surplus_huge_pages += delta;
2812         h->surplus_huge_pages_node[node] += delta;
2813         return 1;
2814 }
2815
2816 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2817 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2818                               nodemask_t *nodes_allowed)
2819 {
2820         unsigned long min_count, ret;
2821         struct page *page;
2822         LIST_HEAD(page_list);
2823         NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2824
2825         /*
2826          * Bit mask controlling how hard we retry per-node allocations.
2827          * If we can not allocate the bit mask, do not attempt to allocate
2828          * the requested huge pages.
2829          */
2830         if (node_alloc_noretry)
2831                 nodes_clear(*node_alloc_noretry);
2832         else
2833                 return -ENOMEM;
2834
2835         /*
2836          * resize_lock mutex prevents concurrent adjustments to number of
2837          * pages in hstate via the proc/sysfs interfaces.
2838          */
2839         mutex_lock(&h->resize_lock);
2840         spin_lock_irq(&hugetlb_lock);
2841
2842         /*
2843          * Check for a node specific request.
2844          * Changing node specific huge page count may require a corresponding
2845          * change to the global count.  In any case, the passed node mask
2846          * (nodes_allowed) will restrict alloc/free to the specified node.
2847          */
2848         if (nid != NUMA_NO_NODE) {
2849                 unsigned long old_count = count;
2850
2851                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2852                 /*
2853                  * User may have specified a large count value which caused the
2854                  * above calculation to overflow.  In this case, they wanted
2855                  * to allocate as many huge pages as possible.  Set count to
2856                  * largest possible value to align with their intention.
2857                  */
2858                 if (count < old_count)
2859                         count = ULONG_MAX;
2860         }
2861
2862         /*
2863          * Gigantic pages runtime allocation depend on the capability for large
2864          * page range allocation.
2865          * If the system does not provide this feature, return an error when
2866          * the user tries to allocate gigantic pages but let the user free the
2867          * boottime allocated gigantic pages.
2868          */
2869         if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2870                 if (count > persistent_huge_pages(h)) {
2871                         spin_unlock_irq(&hugetlb_lock);
2872                         mutex_unlock(&h->resize_lock);
2873                         NODEMASK_FREE(node_alloc_noretry);
2874                         return -EINVAL;
2875                 }
2876                 /* Fall through to decrease pool */
2877         }
2878
2879         /*
2880          * Increase the pool size
2881          * First take pages out of surplus state.  Then make up the
2882          * remaining difference by allocating fresh huge pages.
2883          *
2884          * We might race with alloc_surplus_huge_page() here and be unable
2885          * to convert a surplus huge page to a normal huge page. That is
2886          * not critical, though, it just means the overall size of the
2887          * pool might be one hugepage larger than it needs to be, but
2888          * within all the constraints specified by the sysctls.
2889          */
2890         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2891                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2892                         break;
2893         }
2894
2895         while (count > persistent_huge_pages(h)) {
2896                 /*
2897                  * If this allocation races such that we no longer need the
2898                  * page, free_huge_page will handle it by freeing the page
2899                  * and reducing the surplus.
2900                  */
2901                 spin_unlock_irq(&hugetlb_lock);
2902
2903                 /* yield cpu to avoid soft lockup */
2904                 cond_resched();
2905
2906                 ret = alloc_pool_huge_page(h, nodes_allowed,
2907                                                 node_alloc_noretry);
2908                 spin_lock_irq(&hugetlb_lock);
2909                 if (!ret)
2910                         goto out;
2911
2912                 /* Bail for signals. Probably ctrl-c from user */
2913                 if (signal_pending(current))
2914                         goto out;
2915         }
2916
2917         /*
2918          * Decrease the pool size
2919          * First return free pages to the buddy allocator (being careful
2920          * to keep enough around to satisfy reservations).  Then place
2921          * pages into surplus state as needed so the pool will shrink
2922          * to the desired size as pages become free.
2923          *
2924          * By placing pages into the surplus state independent of the
2925          * overcommit value, we are allowing the surplus pool size to
2926          * exceed overcommit. There are few sane options here. Since
2927          * alloc_surplus_huge_page() is checking the global counter,
2928          * though, we'll note that we're not allowed to exceed surplus
2929          * and won't grow the pool anywhere else. Not until one of the
2930          * sysctls are changed, or the surplus pages go out of use.
2931          */
2932         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2933         min_count = max(count, min_count);
2934         try_to_free_low(h, min_count, nodes_allowed);
2935
2936         /*
2937          * Collect pages to be removed on list without dropping lock
2938          */
2939         while (min_count < persistent_huge_pages(h)) {
2940                 page = remove_pool_huge_page(h, nodes_allowed, 0);
2941                 if (!page)
2942                         break;
2943
2944                 list_add(&page->lru, &page_list);
2945         }
2946         /* free the pages after dropping lock */
2947         spin_unlock_irq(&hugetlb_lock);
2948         update_and_free_pages_bulk(h, &page_list);
2949         spin_lock_irq(&hugetlb_lock);
2950
2951         while (count < persistent_huge_pages(h)) {
2952                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2953                         break;
2954         }
2955 out:
2956         h->max_huge_pages = persistent_huge_pages(h);
2957         spin_unlock_irq(&hugetlb_lock);
2958         mutex_unlock(&h->resize_lock);
2959
2960         NODEMASK_FREE(node_alloc_noretry);
2961
2962         return 0;
2963 }
2964
2965 #define HSTATE_ATTR_RO(_name) \
2966         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2967
2968 #define HSTATE_ATTR(_name) \
2969         static struct kobj_attribute _name##_attr = \
2970                 __ATTR(_name, 0644, _name##_show, _name##_store)
2971
2972 static struct kobject *hugepages_kobj;
2973 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2974
2975 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2976
2977 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2978 {
2979         int i;
2980
2981         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2982                 if (hstate_kobjs[i] == kobj) {
2983                         if (nidp)
2984                                 *nidp = NUMA_NO_NODE;
2985                         return &hstates[i];
2986                 }
2987
2988         return kobj_to_node_hstate(kobj, nidp);
2989 }
2990
2991 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2992                                         struct kobj_attribute *attr, char *buf)
2993 {
2994         struct hstate *h;
2995         unsigned long nr_huge_pages;
2996         int nid;
2997
2998         h = kobj_to_hstate(kobj, &nid);
2999         if (nid == NUMA_NO_NODE)
3000                 nr_huge_pages = h->nr_huge_pages;
3001         else
3002                 nr_huge_pages = h->nr_huge_pages_node[nid];
3003
3004         return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3005 }
3006
3007 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3008                                            struct hstate *h, int nid,
3009                                            unsigned long count, size_t len)
3010 {
3011         int err;
3012         nodemask_t nodes_allowed, *n_mask;
3013
3014         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3015                 return -EINVAL;
3016
3017         if (nid == NUMA_NO_NODE) {
3018                 /*
3019                  * global hstate attribute
3020                  */
3021                 if (!(obey_mempolicy &&
3022                                 init_nodemask_of_mempolicy(&nodes_allowed)))
3023                         n_mask = &node_states[N_MEMORY];
3024                 else
3025                         n_mask = &nodes_allowed;
3026         } else {
3027                 /*
3028                  * Node specific request.  count adjustment happens in
3029                  * set_max_huge_pages() after acquiring hugetlb_lock.
3030                  */
3031                 init_nodemask_of_node(&nodes_allowed, nid);
3032                 n_mask = &nodes_allowed;
3033         }
3034
3035         err = set_max_huge_pages(h, count, nid, n_mask);
3036
3037         return err ? err : len;
3038 }
3039
3040 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3041                                          struct kobject *kobj, const char *buf,
3042                                          size_t len)
3043 {
3044         struct hstate *h;
3045         unsigned long count;
3046         int nid;
3047         int err;
3048
3049         err = kstrtoul(buf, 10, &count);
3050         if (err)
3051                 return err;
3052
3053         h = kobj_to_hstate(kobj, &nid);
3054         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3055 }
3056
3057 static ssize_t nr_hugepages_show(struct kobject *kobj,
3058                                        struct kobj_attribute *attr, char *buf)
3059 {
3060         return nr_hugepages_show_common(kobj, attr, buf);
3061 }
3062
3063 static ssize_t nr_hugepages_store(struct kobject *kobj,
3064                struct kobj_attribute *attr, const char *buf, size_t len)
3065 {
3066         return nr_hugepages_store_common(false, kobj, buf, len);
3067 }
3068 HSTATE_ATTR(nr_hugepages);
3069
3070 #ifdef CONFIG_NUMA
3071
3072 /*
3073  * hstate attribute for optionally mempolicy-based constraint on persistent
3074  * huge page alloc/free.
3075  */
3076 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3077                                            struct kobj_attribute *attr,
3078                                            char *buf)
3079 {
3080         return nr_hugepages_show_common(kobj, attr, buf);
3081 }
3082
3083 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
3084                struct kobj_attribute *attr, const char *buf, size_t len)
3085 {
3086         return nr_hugepages_store_common(true, kobj, buf, len);
3087 }
3088 HSTATE_ATTR(nr_hugepages_mempolicy);
3089 #endif
3090
3091
3092 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3093                                         struct kobj_attribute *attr, char *buf)
3094 {
3095         struct hstate *h = kobj_to_hstate(kobj, NULL);
3096         return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3097 }
3098
3099 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3100                 struct kobj_attribute *attr, const char *buf, size_t count)
3101 {
3102         int err;
3103         unsigned long input;
3104         struct hstate *h = kobj_to_hstate(kobj, NULL);
3105
3106         if (hstate_is_gigantic(h))
3107                 return -EINVAL;
3108
3109         err = kstrtoul(buf, 10, &input);
3110         if (err)
3111                 return err;
3112
3113         spin_lock_irq(&hugetlb_lock);
3114         h->nr_overcommit_huge_pages = input;
3115         spin_unlock_irq(&hugetlb_lock);
3116
3117         return count;
3118 }
3119 HSTATE_ATTR(nr_overcommit_hugepages);
3120
3121 static ssize_t free_hugepages_show(struct kobject *kobj,
3122                                         struct kobj_attribute *attr, char *buf)
3123 {
3124         struct hstate *h;
3125         unsigned long free_huge_pages;
3126         int nid;
3127
3128         h = kobj_to_hstate(kobj, &nid);
3129         if (nid == NUMA_NO_NODE)
3130                 free_huge_pages = h->free_huge_pages;
3131         else
3132                 free_huge_pages = h->free_huge_pages_node[nid];
3133
3134         return sysfs_emit(buf, "%lu\n", free_huge_pages);
3135 }
3136 HSTATE_ATTR_RO(free_hugepages);
3137
3138 static ssize_t resv_hugepages_show(struct kobject *kobj,
3139                                         struct kobj_attribute *attr, char *buf)
3140 {
3141         struct hstate *h = kobj_to_hstate(kobj, NULL);
3142         return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3143 }
3144 HSTATE_ATTR_RO(resv_hugepages);
3145
3146 static ssize_t surplus_hugepages_show(struct kobject *kobj,
3147                                         struct kobj_attribute *attr, char *buf)
3148 {
3149         struct hstate *h;
3150         unsigned long surplus_huge_pages;
3151         int nid;
3152
3153         h = kobj_to_hstate(kobj, &nid);
3154         if (nid == NUMA_NO_NODE)
3155                 surplus_huge_pages = h->surplus_huge_pages;
3156         else
3157                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
3158
3159         return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3160 }
3161 HSTATE_ATTR_RO(surplus_hugepages);
3162
3163 static struct attribute *hstate_attrs[] = {
3164         &nr_hugepages_attr.attr,
3165         &nr_overcommit_hugepages_attr.attr,
3166         &free_hugepages_attr.attr,
3167         &resv_hugepages_attr.attr,
3168         &surplus_hugepages_attr.attr,
3169 #ifdef CONFIG_NUMA
3170         &nr_hugepages_mempolicy_attr.attr,
3171 #endif
3172         NULL,
3173 };
3174
3175 static const struct attribute_group hstate_attr_group = {
3176         .attrs = hstate_attrs,
3177 };
3178
3179 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3180                                     struct kobject **hstate_kobjs,
3181                                     const struct attribute_group *hstate_attr_group)
3182 {
3183         int retval;
3184         int hi = hstate_index(h);
3185
3186         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3187         if (!hstate_kobjs[hi])
3188                 return -ENOMEM;
3189
3190         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3191         if (retval) {
3192                 kobject_put(hstate_kobjs[hi]);
3193                 hstate_kobjs[hi] = NULL;
3194         }
3195
3196         return retval;
3197 }
3198
3199 static void __init hugetlb_sysfs_init(void)
3200 {
3201         struct hstate *h;
3202         int err;
3203
3204         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3205         if (!hugepages_kobj)
3206                 return;
3207
3208         for_each_hstate(h) {
3209                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
3210                                          hstate_kobjs, &hstate_attr_group);
3211                 if (err)
3212                         pr_err("HugeTLB: Unable to add hstate %s", h->name);
3213         }
3214 }
3215
3216 #ifdef CONFIG_NUMA
3217
3218 /*
3219  * node_hstate/s - associate per node hstate attributes, via their kobjects,
3220  * with node devices in node_devices[] using a parallel array.  The array
3221  * index of a node device or _hstate == node id.
3222  * This is here to avoid any static dependency of the node device driver, in
3223  * the base kernel, on the hugetlb module.
3224  */
3225 struct node_hstate {
3226         struct kobject          *hugepages_kobj;
3227         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
3228 };
3229 static struct node_hstate node_hstates[MAX_NUMNODES];
3230
3231 /*
3232  * A subset of global hstate attributes for node devices
3233  */
3234 static struct attribute *per_node_hstate_attrs[] = {
3235         &nr_hugepages_attr.attr,
3236         &free_hugepages_attr.attr,
3237         &surplus_hugepages_attr.attr,
3238         NULL,
3239 };
3240
3241 static const struct attribute_group per_node_hstate_attr_group = {
3242         .attrs = per_node_hstate_attrs,
3243 };
3244
3245 /*
3246  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3247  * Returns node id via non-NULL nidp.
3248  */
3249 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3250 {
3251         int nid;
3252
3253         for (nid = 0; nid < nr_node_ids; nid++) {
3254                 struct node_hstate *nhs = &node_hstates[nid];
3255                 int i;
3256                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
3257                         if (nhs->hstate_kobjs[i] == kobj) {
3258                                 if (nidp)
3259                                         *nidp = nid;
3260                                 return &hstates[i];
3261                         }
3262         }
3263
3264         BUG();
3265         return NULL;
3266 }
3267
3268 /*
3269  * Unregister hstate attributes from a single node device.
3270  * No-op if no hstate attributes attached.
3271  */
3272 static void hugetlb_unregister_node(struct node *node)
3273 {
3274         struct hstate *h;
3275         struct node_hstate *nhs = &node_hstates[node->dev.id];
3276
3277         if (!nhs->hugepages_kobj)
3278                 return;         /* no hstate attributes */
3279
3280         for_each_hstate(h) {
3281                 int idx = hstate_index(h);
3282                 if (nhs->hstate_kobjs[idx]) {
3283                         kobject_put(nhs->hstate_kobjs[idx]);
3284                         nhs->hstate_kobjs[idx] = NULL;
3285                 }
3286         }
3287
3288         kobject_put(nhs->hugepages_kobj);
3289         nhs->hugepages_kobj = NULL;
3290 }
3291
3292
3293 /*
3294  * Register hstate attributes for a single node device.
3295  * No-op if attributes already registered.
3296  */
3297 static void hugetlb_register_node(struct node *node)
3298 {
3299         struct hstate *h;
3300         struct node_hstate *nhs = &node_hstates[node->dev.id];
3301         int err;
3302
3303         if (nhs->hugepages_kobj)
3304                 return;         /* already allocated */
3305
3306         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3307                                                         &node->dev.kobj);
3308         if (!nhs->hugepages_kobj)
3309                 return;
3310
3311         for_each_hstate(h) {
3312                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3313                                                 nhs->hstate_kobjs,
3314                                                 &per_node_hstate_attr_group);
3315                 if (err) {
3316                         pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3317                                 h->name, node->dev.id);
3318                         hugetlb_unregister_node(node);
3319                         break;
3320                 }
3321         }
3322 }
3323
3324 /*
3325  * hugetlb init time:  register hstate attributes for all registered node
3326  * devices of nodes that have memory.  All on-line nodes should have
3327  * registered their associated device by this time.
3328  */
3329 static void __init hugetlb_register_all_nodes(void)
3330 {
3331         int nid;
3332
3333         for_each_node_state(nid, N_MEMORY) {
3334                 struct node *node = node_devices[nid];
3335                 if (node->dev.id == nid)
3336                         hugetlb_register_node(node);
3337         }
3338
3339         /*
3340          * Let the node device driver know we're here so it can
3341          * [un]register hstate attributes on node hotplug.
3342          */
3343         register_hugetlbfs_with_node(hugetlb_register_node,
3344                                      hugetlb_unregister_node);
3345 }
3346 #else   /* !CONFIG_NUMA */
3347
3348 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3349 {
3350         BUG();
3351         if (nidp)
3352                 *nidp = -1;
3353         return NULL;
3354 }
3355
3356 static void hugetlb_register_all_nodes(void) { }
3357
3358 #endif
3359
3360 static int __init hugetlb_init(void)
3361 {
3362         int i;
3363
3364         BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
3365                         __NR_HPAGEFLAGS);
3366
3367         if (!hugepages_supported()) {
3368                 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
3369                         pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
3370                 return 0;
3371         }
3372
3373         /*
3374          * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
3375          * architectures depend on setup being done here.
3376          */
3377         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
3378         if (!parsed_default_hugepagesz) {
3379                 /*
3380                  * If we did not parse a default huge page size, set
3381                  * default_hstate_idx to HPAGE_SIZE hstate. And, if the
3382                  * number of huge pages for this default size was implicitly
3383                  * specified, set that here as well.
3384                  * Note that the implicit setting will overwrite an explicit
3385                  * setting.  A warning will be printed in this case.
3386                  */
3387                 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
3388                 if (default_hstate_max_huge_pages) {
3389                         if (default_hstate.max_huge_pages) {
3390                                 char buf[32];
3391
3392                                 string_get_size(huge_page_size(&default_hstate),
3393                                         1, STRING_UNITS_2, buf, 32);
3394                                 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
3395                                         default_hstate.max_huge_pages, buf);
3396                                 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
3397                                         default_hstate_max_huge_pages);
3398                         }
3399                         default_hstate.max_huge_pages =
3400                                 default_hstate_max_huge_pages;
3401                 }
3402         }
3403
3404         hugetlb_cma_check();
3405         hugetlb_init_hstates();
3406         gather_bootmem_prealloc();
3407         report_hugepages();
3408
3409         hugetlb_sysfs_init();
3410         hugetlb_register_all_nodes();
3411         hugetlb_cgroup_file_init();
3412
3413 #ifdef CONFIG_SMP
3414         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
3415 #else
3416         num_fault_mutexes = 1;
3417 #endif
3418         hugetlb_fault_mutex_table =
3419                 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
3420                               GFP_KERNEL);
3421         BUG_ON(!hugetlb_fault_mutex_table);
3422
3423         for (i = 0; i < num_fault_mutexes; i++)
3424                 mutex_init(&hugetlb_fault_mutex_table[i]);
3425         return 0;
3426 }
3427 subsys_initcall(hugetlb_init);
3428
3429 /* Overwritten by architectures with more huge page sizes */
3430 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
3431 {
3432         return size == HPAGE_SIZE;
3433 }
3434
3435 void __init hugetlb_add_hstate(unsigned int order)
3436 {
3437         struct hstate *h;
3438         unsigned long i;
3439
3440         if (size_to_hstate(PAGE_SIZE << order)) {
3441                 return;
3442         }
3443         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
3444         BUG_ON(order == 0);
3445         h = &hstates[hugetlb_max_hstate++];
3446         mutex_init(&h->resize_lock);
3447         h->order = order;
3448         h->mask = ~(huge_page_size(h) - 1);
3449         for (i = 0; i < MAX_NUMNODES; ++i)
3450                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
3451         INIT_LIST_HEAD(&h->hugepage_activelist);
3452         h->next_nid_to_alloc = first_memory_node;
3453         h->next_nid_to_free = first_memory_node;
3454         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
3455                                         huge_page_size(h)/1024);
3456
3457         parsed_hstate = h;
3458 }
3459
3460 /*
3461  * hugepages command line processing
3462  * hugepages normally follows a valid hugepagsz or default_hugepagsz
3463  * specification.  If not, ignore the hugepages value.  hugepages can also
3464  * be the first huge page command line  option in which case it implicitly
3465  * specifies the number of huge pages for the default size.
3466  */
3467 static int __init hugepages_setup(char *s)
3468 {
3469         unsigned long *mhp;
3470         static unsigned long *last_mhp;
3471
3472         if (!parsed_valid_hugepagesz) {
3473                 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
3474                 parsed_valid_hugepagesz = true;
3475                 return 0;
3476         }
3477
3478         /*
3479          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
3480          * yet, so this hugepages= parameter goes to the "default hstate".
3481          * Otherwise, it goes with the previously parsed hugepagesz or
3482          * default_hugepagesz.
3483          */
3484         else if (!hugetlb_max_hstate)
3485                 mhp = &default_hstate_max_huge_pages;
3486         else
3487                 mhp = &parsed_hstate->max_huge_pages;
3488
3489         if (mhp == last_mhp) {
3490                 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
3491                 return 0;
3492         }
3493
3494         if (sscanf(s, "%lu", mhp) <= 0)
3495                 *mhp = 0;
3496
3497         /*
3498          * Global state is always initialized later in hugetlb_init.
3499          * But we need to allocate gigantic hstates here early to still
3500          * use the bootmem allocator.
3501          */
3502         if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
3503                 hugetlb_hstate_alloc_pages(parsed_hstate);
3504
3505         last_mhp = mhp;
3506
3507         return 1;
3508 }
3509 __setup("hugepages=", hugepages_setup);
3510
3511 /*
3512  * hugepagesz command line processing
3513  * A specific huge page size can only be specified once with hugepagesz.
3514  * hugepagesz is followed by hugepages on the command line.  The global
3515  * variable 'parsed_valid_hugepagesz' is used to determine if prior
3516  * hugepagesz argument was valid.
3517  */
3518 static int __init hugepagesz_setup(char *s)
3519 {
3520         unsigned long size;
3521         struct hstate *h;
3522
3523         parsed_valid_hugepagesz = false;
3524         size = (unsigned long)memparse(s, NULL);
3525
3526         if (!arch_hugetlb_valid_size(size)) {
3527                 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
3528                 return 0;
3529         }
3530
3531         h = size_to_hstate(size);
3532         if (h) {
3533                 /*
3534                  * hstate for this size already exists.  This is normally
3535                  * an error, but is allowed if the existing hstate is the
3536                  * default hstate.  More specifically, it is only allowed if
3537                  * the number of huge pages for the default hstate was not
3538                  * previously specified.
3539                  */
3540                 if (!parsed_default_hugepagesz ||  h != &default_hstate ||
3541                     default_hstate.max_huge_pages) {
3542                         pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
3543                         return 0;
3544                 }
3545
3546                 /*
3547                  * No need to call hugetlb_add_hstate() as hstate already
3548                  * exists.  But, do set parsed_hstate so that a following
3549                  * hugepages= parameter will be applied to this hstate.
3550                  */
3551                 parsed_hstate = h;
3552                 parsed_valid_hugepagesz = true;
3553                 return 1;
3554         }
3555
3556         hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3557         parsed_valid_hugepagesz = true;
3558         return 1;
3559 }
3560 __setup("hugepagesz=", hugepagesz_setup);
3561
3562 /*
3563  * default_hugepagesz command line input
3564  * Only one instance of default_hugepagesz allowed on command line.
3565  */
3566 static int __init default_hugepagesz_setup(char *s)
3567 {
3568         unsigned long size;
3569
3570         parsed_valid_hugepagesz = false;
3571         if (parsed_default_hugepagesz) {
3572                 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
3573                 return 0;
3574         }
3575
3576         size = (unsigned long)memparse(s, NULL);
3577
3578         if (!arch_hugetlb_valid_size(size)) {
3579                 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
3580                 return 0;
3581         }
3582
3583         hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3584         parsed_valid_hugepagesz = true;
3585         parsed_default_hugepagesz = true;
3586         default_hstate_idx = hstate_index(size_to_hstate(size));
3587
3588         /*
3589          * The number of default huge pages (for this size) could have been
3590          * specified as the first hugetlb parameter: hugepages=X.  If so,
3591          * then default_hstate_max_huge_pages is set.  If the default huge
3592          * page size is gigantic (>= MAX_ORDER), then the pages must be
3593          * allocated here from bootmem allocator.
3594          */
3595         if (default_hstate_max_huge_pages) {
3596                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
3597                 if (hstate_is_gigantic(&default_hstate))
3598                         hugetlb_hstate_alloc_pages(&default_hstate);
3599                 default_hstate_max_huge_pages = 0;
3600         }
3601
3602         return 1;
3603 }
3604 __setup("default_hugepagesz=", default_hugepagesz_setup);
3605
3606 static unsigned int allowed_mems_nr(struct hstate *h)
3607 {
3608         int node;
3609         unsigned int nr = 0;
3610         nodemask_t *mpol_allowed;
3611         unsigned int *array = h->free_huge_pages_node;
3612         gfp_t gfp_mask = htlb_alloc_mask(h);
3613
3614         mpol_allowed = policy_nodemask_current(gfp_mask);
3615
3616         for_each_node_mask(node, cpuset_current_mems_allowed) {
3617                 if (!mpol_allowed || node_isset(node, *mpol_allowed))
3618                         nr += array[node];
3619         }
3620
3621         return nr;
3622 }
3623
3624 #ifdef CONFIG_SYSCTL
3625 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
3626                                           void *buffer, size_t *length,
3627                                           loff_t *ppos, unsigned long *out)
3628 {
3629         struct ctl_table dup_table;
3630
3631         /*
3632          * In order to avoid races with __do_proc_doulongvec_minmax(), we
3633          * can duplicate the @table and alter the duplicate of it.
3634          */
3635         dup_table = *table;
3636         dup_table.data = out;
3637
3638         return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
3639 }
3640
3641 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3642                          struct ctl_table *table, int write,
3643                          void *buffer, size_t *length, loff_t *ppos)
3644 {
3645         struct hstate *h = &default_hstate;
3646         unsigned long tmp = h->max_huge_pages;
3647         int ret;
3648
3649         if (!hugepages_supported())
3650                 return -EOPNOTSUPP;
3651
3652         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3653                                              &tmp);
3654         if (ret)
3655                 goto out;
3656
3657         if (write)
3658                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
3659                                                   NUMA_NO_NODE, tmp, *length);
3660 out:
3661         return ret;
3662 }
3663
3664 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3665                           void *buffer, size_t *length, loff_t *ppos)
3666 {
3667
3668         return hugetlb_sysctl_handler_common(false, table, write,
3669                                                         buffer, length, ppos);
3670 }
3671
3672 #ifdef CONFIG_NUMA
3673 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3674                           void *buffer, size_t *length, loff_t *ppos)
3675 {
3676         return hugetlb_sysctl_handler_common(true, table, write,
3677                                                         buffer, length, ppos);
3678 }
3679 #endif /* CONFIG_NUMA */
3680
3681 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3682                 void *buffer, size_t *length, loff_t *ppos)
3683 {
3684         struct hstate *h = &default_hstate;
3685         unsigned long tmp;
3686         int ret;
3687
3688         if (!hugepages_supported())
3689                 return -EOPNOTSUPP;
3690
3691         tmp = h->nr_overcommit_huge_pages;
3692
3693         if (write && hstate_is_gigantic(h))
3694                 return -EINVAL;
3695
3696         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3697                                              &tmp);
3698         if (ret)
3699                 goto out;
3700
3701         if (write) {
3702                 spin_lock_irq(&hugetlb_lock);
3703                 h->nr_overcommit_huge_pages = tmp;
3704                 spin_unlock_irq(&hugetlb_lock);
3705         }
3706 out:
3707         return ret;
3708 }
3709
3710 #endif /* CONFIG_SYSCTL */
3711
3712 void hugetlb_report_meminfo(struct seq_file *m)
3713 {
3714         struct hstate *h;
3715         unsigned long total = 0;
3716
3717         if (!hugepages_supported())
3718                 return;
3719
3720         for_each_hstate(h) {
3721                 unsigned long count = h->nr_huge_pages;
3722
3723                 total += huge_page_size(h) * count;
3724
3725                 if (h == &default_hstate)
3726                         seq_printf(m,
3727                                    "HugePages_Total:   %5lu\n"
3728                                    "HugePages_Free:    %5lu\n"
3729                                    "HugePages_Rsvd:    %5lu\n"
3730                                    "HugePages_Surp:    %5lu\n"
3731                                    "Hugepagesize:   %8lu kB\n",
3732                                    count,
3733                                    h->free_huge_pages,
3734                                    h->resv_huge_pages,
3735                                    h->surplus_huge_pages,
3736                                    huge_page_size(h) / SZ_1K);
3737         }
3738
3739         seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
3740 }
3741
3742 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
3743 {
3744         struct hstate *h = &default_hstate;
3745
3746         if (!hugepages_supported())
3747                 return 0;
3748
3749         return sysfs_emit_at(buf, len,
3750                              "Node %d HugePages_Total: %5u\n"
3751                              "Node %d HugePages_Free:  %5u\n"
3752                              "Node %d HugePages_Surp:  %5u\n",
3753                              nid, h->nr_huge_pages_node[nid],
3754                              nid, h->free_huge_pages_node[nid],
3755                              nid, h->surplus_huge_pages_node[nid]);
3756 }
3757
3758 void hugetlb_show_meminfo(void)
3759 {
3760         struct hstate *h;
3761         int nid;
3762
3763         if (!hugepages_supported())
3764                 return;
3765
3766         for_each_node_state(nid, N_MEMORY)
3767                 for_each_hstate(h)
3768                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3769                                 nid,
3770                                 h->nr_huge_pages_node[nid],
3771                                 h->free_huge_pages_node[nid],
3772                                 h->surplus_huge_pages_node[nid],
3773                                 huge_page_size(h) / SZ_1K);
3774 }
3775
3776 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3777 {
3778         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3779                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3780 }
3781
3782 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3783 unsigned long hugetlb_total_pages(void)
3784 {
3785         struct hstate *h;
3786         unsigned long nr_total_pages = 0;
3787
3788         for_each_hstate(h)
3789                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3790         return nr_total_pages;
3791 }
3792
3793 static int hugetlb_acct_memory(struct hstate *h, long delta)
3794 {
3795         int ret = -ENOMEM;
3796
3797         if (!delta)
3798                 return 0;
3799
3800         spin_lock_irq(&hugetlb_lock);
3801         /*
3802          * When cpuset is configured, it breaks the strict hugetlb page
3803          * reservation as the accounting is done on a global variable. Such
3804          * reservation is completely rubbish in the presence of cpuset because
3805          * the reservation is not checked against page availability for the
3806          * current cpuset. Application can still potentially OOM'ed by kernel
3807          * with lack of free htlb page in cpuset that the task is in.
3808          * Attempt to enforce strict accounting with cpuset is almost
3809          * impossible (or too ugly) because cpuset is too fluid that
3810          * task or memory node can be dynamically moved between cpusets.
3811          *
3812          * The change of semantics for shared hugetlb mapping with cpuset is
3813          * undesirable. However, in order to preserve some of the semantics,
3814          * we fall back to check against current free page availability as
3815          * a best attempt and hopefully to minimize the impact of changing
3816          * semantics that cpuset has.
3817          *
3818          * Apart from cpuset, we also have memory policy mechanism that
3819          * also determines from which node the kernel will allocate memory
3820          * in a NUMA system. So similar to cpuset, we also should consider
3821          * the memory policy of the current task. Similar to the description
3822          * above.
3823          */
3824         if (delta > 0) {
3825                 if (gather_surplus_pages(h, delta) < 0)
3826                         goto out;
3827
3828                 if (delta > allowed_mems_nr(h)) {
3829                         return_unused_surplus_pages(h, delta);
3830                         goto out;
3831                 }
3832         }
3833
3834         ret = 0;
3835         if (delta < 0)
3836                 return_unused_surplus_pages(h, (unsigned long) -delta);
3837
3838 out:
3839         spin_unlock_irq(&hugetlb_lock);
3840         return ret;
3841 }
3842
3843 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3844 {
3845         struct resv_map *resv = vma_resv_map(vma);
3846
3847         /*
3848          * This new VMA should share its siblings reservation map if present.
3849          * The VMA will only ever have a valid reservation map pointer where
3850          * it is being copied for another still existing VMA.  As that VMA
3851          * has a reference to the reservation map it cannot disappear until
3852          * after this open call completes.  It is therefore safe to take a
3853          * new reference here without additional locking.
3854          */
3855         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3856                 kref_get(&resv->refs);
3857 }
3858
3859 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3860 {
3861         struct hstate *h = hstate_vma(vma);
3862         struct resv_map *resv = vma_resv_map(vma);
3863         struct hugepage_subpool *spool = subpool_vma(vma);
3864         unsigned long reserve, start, end;
3865         long gbl_reserve;
3866
3867         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3868                 return;
3869
3870         start = vma_hugecache_offset(h, vma, vma->vm_start);
3871         end = vma_hugecache_offset(h, vma, vma->vm_end);
3872
3873         reserve = (end - start) - region_count(resv, start, end);
3874         hugetlb_cgroup_uncharge_counter(resv, start, end);
3875         if (reserve) {
3876                 /*
3877                  * Decrement reserve counts.  The global reserve count may be
3878                  * adjusted if the subpool has a minimum size.
3879                  */
3880                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3881                 hugetlb_acct_memory(h, -gbl_reserve);
3882         }
3883
3884         kref_put(&resv->refs, resv_map_release);
3885 }
3886
3887 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3888 {
3889         if (addr & ~(huge_page_mask(hstate_vma(vma))))
3890                 return -EINVAL;
3891         return 0;
3892 }
3893
3894 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3895 {
3896         return huge_page_size(hstate_vma(vma));
3897 }
3898
3899 /*
3900  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3901  * handle_mm_fault() to try to instantiate regular-sized pages in the
3902  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3903  * this far.
3904  */
3905 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3906 {
3907         BUG();
3908         return 0;
3909 }
3910
3911 /*
3912  * When a new function is introduced to vm_operations_struct and added
3913  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3914  * This is because under System V memory model, mappings created via
3915  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3916  * their original vm_ops are overwritten with shm_vm_ops.
3917  */
3918 const struct vm_operations_struct hugetlb_vm_ops = {
3919         .fault = hugetlb_vm_op_fault,
3920         .open = hugetlb_vm_op_open,
3921         .close = hugetlb_vm_op_close,
3922         .may_split = hugetlb_vm_op_split,
3923         .pagesize = hugetlb_vm_op_pagesize,
3924 };
3925
3926 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3927                                 int writable)
3928 {
3929         pte_t entry;
3930
3931         if (writable) {
3932                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3933                                          vma->vm_page_prot)));
3934         } else {
3935                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3936                                            vma->vm_page_prot));
3937         }
3938         entry = pte_mkyoung(entry);
3939         entry = pte_mkhuge(entry);
3940         entry = arch_make_huge_pte(entry, vma, page, writable);
3941
3942         return entry;
3943 }
3944
3945 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3946                                    unsigned long address, pte_t *ptep)
3947 {
3948         pte_t entry;
3949
3950         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3951         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3952                 update_mmu_cache(vma, address, ptep);
3953 }
3954
3955 bool is_hugetlb_entry_migration(pte_t pte)
3956 {
3957         swp_entry_t swp;
3958
3959         if (huge_pte_none(pte) || pte_present(pte))
3960                 return false;
3961         swp = pte_to_swp_entry(pte);
3962         if (is_migration_entry(swp))
3963                 return true;
3964         else
3965                 return false;
3966 }
3967
3968 static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
3969 {
3970         swp_entry_t swp;
3971
3972         if (huge_pte_none(pte) || pte_present(pte))
3973                 return false;
3974         swp = pte_to_swp_entry(pte);
3975         if (is_hwpoison_entry(swp))
3976                 return true;
3977         else
3978                 return false;
3979 }
3980
3981 static void
3982 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
3983                      struct page *new_page)
3984 {
3985         __SetPageUptodate(new_page);
3986         set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
3987         hugepage_add_new_anon_rmap(new_page, vma, addr);
3988         hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
3989         ClearHPageRestoreReserve(new_page);
3990         SetHPageMigratable(new_page);
3991 }
3992
3993 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3994                             struct vm_area_struct *vma)
3995 {
3996         pte_t *src_pte, *dst_pte, entry, dst_entry;
3997         struct page *ptepage;
3998         unsigned long addr;
3999         bool cow = is_cow_mapping(vma->vm_flags);
4000         struct hstate *h = hstate_vma(vma);
4001         unsigned long sz = huge_page_size(h);
4002         unsigned long npages = pages_per_huge_page(h);
4003         struct address_space *mapping = vma->vm_file->f_mapping;
4004         struct mmu_notifier_range range;
4005         int ret = 0;
4006
4007         if (cow) {
4008                 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
4009                                         vma->vm_start,
4010                                         vma->vm_end);
4011                 mmu_notifier_invalidate_range_start(&range);
4012         } else {
4013                 /*
4014                  * For shared mappings i_mmap_rwsem must be held to call
4015                  * huge_pte_alloc, otherwise the returned ptep could go
4016                  * away if part of a shared pmd and another thread calls
4017                  * huge_pmd_unshare.
4018                  */
4019                 i_mmap_lock_read(mapping);
4020         }
4021
4022         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
4023                 spinlock_t *src_ptl, *dst_ptl;
4024                 src_pte = huge_pte_offset(src, addr, sz);
4025                 if (!src_pte)
4026                         continue;
4027                 dst_pte = huge_pte_alloc(dst, vma, addr, sz);
4028                 if (!dst_pte) {
4029                         ret = -ENOMEM;
4030                         break;
4031                 }
4032
4033                 /*
4034                  * If the pagetables are shared don't copy or take references.
4035                  * dst_pte == src_pte is the common case of src/dest sharing.
4036                  *
4037                  * However, src could have 'unshared' and dst shares with
4038                  * another vma.  If dst_pte !none, this implies sharing.
4039                  * Check here before taking page table lock, and once again
4040                  * after taking the lock below.
4041                  */
4042                 dst_entry = huge_ptep_get(dst_pte);
4043                 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
4044                         continue;
4045
4046                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
4047                 src_ptl = huge_pte_lockptr(h, src, src_pte);
4048                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4049                 entry = huge_ptep_get(src_pte);
4050                 dst_entry = huge_ptep_get(dst_pte);
4051 again:
4052                 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
4053                         /*
4054                          * Skip if src entry none.  Also, skip in the
4055                          * unlikely case dst entry !none as this implies
4056                          * sharing with another vma.
4057                          */
4058                         ;
4059                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
4060                                     is_hugetlb_entry_hwpoisoned(entry))) {
4061                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
4062
4063                         if (is_write_migration_entry(swp_entry) && cow) {
4064                                 /*
4065                                  * COW mappings require pages in both
4066                                  * parent and child to be set to read.
4067                                  */
4068                                 make_migration_entry_read(&swp_entry);
4069                                 entry = swp_entry_to_pte(swp_entry);
4070                                 set_huge_swap_pte_at(src, addr, src_pte,
4071                                                      entry, sz);
4072                         }
4073                         set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
4074                 } else {
4075                         entry = huge_ptep_get(src_pte);
4076                         ptepage = pte_page(entry);
4077                         get_page(ptepage);
4078
4079                         /*
4080                          * This is a rare case where we see pinned hugetlb
4081                          * pages while they're prone to COW.  We need to do the
4082                          * COW earlier during fork.
4083                          *
4084                          * When pre-allocating the page or copying data, we
4085                          * need to be without the pgtable locks since we could
4086                          * sleep during the process.
4087                          */
4088                         if (unlikely(page_needs_cow_for_dma(vma, ptepage))) {
4089                                 pte_t src_pte_old = entry;
4090                                 struct page *new;
4091
4092                                 spin_unlock(src_ptl);
4093                                 spin_unlock(dst_ptl);
4094                                 /* Do not use reserve as it's private owned */
4095                                 new = alloc_huge_page(vma, addr, 1);
4096                                 if (IS_ERR(new)) {
4097                                         put_page(ptepage);
4098                                         ret = PTR_ERR(new);
4099                                         break;
4100                                 }
4101                                 copy_user_huge_page(new, ptepage, addr, vma,
4102                                                     npages);
4103                                 put_page(ptepage);
4104
4105                                 /* Install the new huge page if src pte stable */
4106                                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
4107                                 src_ptl = huge_pte_lockptr(h, src, src_pte);
4108                                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4109                                 entry = huge_ptep_get(src_pte);
4110                                 if (!pte_same(src_pte_old, entry)) {
4111                                         restore_reserve_on_error(h, vma, addr,
4112                                                                 new);
4113                                         put_page(new);
4114                                         /* dst_entry won't change as in child */
4115                                         goto again;
4116                                 }
4117                                 hugetlb_install_page(vma, dst_pte, addr, new);
4118                                 spin_unlock(src_ptl);
4119                                 spin_unlock(dst_ptl);
4120                                 continue;
4121                         }
4122
4123                         if (cow) {
4124                                 /*
4125                                  * No need to notify as we are downgrading page
4126                                  * table protection not changing it to point
4127                                  * to a new page.
4128                                  *
4129                                  * See Documentation/vm/mmu_notifier.rst
4130                                  */
4131                                 huge_ptep_set_wrprotect(src, addr, src_pte);
4132                                 entry = huge_pte_wrprotect(entry);
4133                         }
4134
4135                         page_dup_rmap(ptepage, true);
4136                         set_huge_pte_at(dst, addr, dst_pte, entry);
4137                         hugetlb_count_add(npages, dst);
4138                 }
4139                 spin_unlock(src_ptl);
4140                 spin_unlock(dst_ptl);
4141         }
4142
4143         if (cow)
4144                 mmu_notifier_invalidate_range_end(&range);
4145         else
4146                 i_mmap_unlock_read(mapping);
4147
4148         return ret;
4149 }
4150
4151 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
4152                             unsigned long start, unsigned long end,
4153                             struct page *ref_page)
4154 {
4155         struct mm_struct *mm = vma->vm_mm;
4156         unsigned long address;
4157         pte_t *ptep;
4158         pte_t pte;
4159         spinlock_t *ptl;
4160         struct page *page;
4161         struct hstate *h = hstate_vma(vma);
4162         unsigned long sz = huge_page_size(h);
4163         struct mmu_notifier_range range;
4164
4165         WARN_ON(!is_vm_hugetlb_page(vma));
4166         BUG_ON(start & ~huge_page_mask(h));
4167         BUG_ON(end & ~huge_page_mask(h));
4168
4169         /*
4170          * This is a hugetlb vma, all the pte entries should point
4171          * to huge page.
4172          */
4173         tlb_change_page_size(tlb, sz);
4174         tlb_start_vma(tlb, vma);
4175
4176         /*
4177          * If sharing possible, alert mmu notifiers of worst case.
4178          */
4179         mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
4180                                 end);
4181         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4182         mmu_notifier_invalidate_range_start(&range);
4183         address = start;
4184         for (; address < end; address += sz) {
4185                 ptep = huge_pte_offset(mm, address, sz);
4186                 if (!ptep)
4187                         continue;
4188
4189                 ptl = huge_pte_lock(h, mm, ptep);
4190                 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
4191                         spin_unlock(ptl);
4192                         /*
4193                          * We just unmapped a page of PMDs by clearing a PUD.
4194                          * The caller's TLB flush range should cover this area.
4195                          */
4196                         continue;
4197                 }
4198
4199                 pte = huge_ptep_get(ptep);
4200                 if (huge_pte_none(pte)) {
4201                         spin_unlock(ptl);
4202                         continue;
4203                 }
4204
4205                 /*
4206                  * Migrating hugepage or HWPoisoned hugepage is already
4207                  * unmapped and its refcount is dropped, so just clear pte here.
4208                  */
4209                 if (unlikely(!pte_present(pte))) {
4210                         huge_pte_clear(mm, address, ptep, sz);
4211                         spin_unlock(ptl);
4212                         continue;
4213                 }
4214
4215                 page = pte_page(pte);
4216                 /*
4217                  * If a reference page is supplied, it is because a specific
4218                  * page is being unmapped, not a range. Ensure the page we
4219                  * are about to unmap is the actual page of interest.
4220                  */
4221                 if (ref_page) {
4222                         if (page != ref_page) {
4223                                 spin_unlock(ptl);
4224                                 continue;
4225                         }
4226                         /*
4227                          * Mark the VMA as having unmapped its page so that
4228                          * future faults in this VMA will fail rather than
4229                          * looking like data was lost
4230                          */
4231                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
4232                 }
4233
4234                 pte = huge_ptep_get_and_clear(mm, address, ptep);
4235                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
4236                 if (huge_pte_dirty(pte))
4237                         set_page_dirty(page);
4238
4239                 hugetlb_count_sub(pages_per_huge_page(h), mm);
4240                 page_remove_rmap(page, true);
4241
4242                 spin_unlock(ptl);
4243                 tlb_remove_page_size(tlb, page, huge_page_size(h));
4244                 /*
4245                  * Bail out after unmapping reference page if supplied
4246                  */
4247                 if (ref_page)
4248                         break;
4249         }
4250         mmu_notifier_invalidate_range_end(&range);
4251         tlb_end_vma(tlb, vma);
4252 }
4253
4254 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
4255                           struct vm_area_struct *vma, unsigned long start,
4256                           unsigned long end, struct page *ref_page)
4257 {
4258         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
4259
4260         /*
4261          * Clear this flag so that x86's huge_pmd_share page_table_shareable
4262          * test will fail on a vma being torn down, and not grab a page table
4263          * on its way out.  We're lucky that the flag has such an appropriate
4264          * name, and can in fact be safely cleared here. We could clear it
4265          * before the __unmap_hugepage_range above, but all that's necessary
4266          * is to clear it before releasing the i_mmap_rwsem. This works
4267          * because in the context this is called, the VMA is about to be
4268          * destroyed and the i_mmap_rwsem is held.
4269          */
4270         vma->vm_flags &= ~VM_MAYSHARE;
4271 }
4272
4273 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
4274                           unsigned long end, struct page *ref_page)
4275 {
4276         struct mmu_gather tlb;
4277
4278         tlb_gather_mmu(&tlb, vma->vm_mm);
4279         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
4280         tlb_finish_mmu(&tlb);
4281 }
4282
4283 /*
4284  * This is called when the original mapper is failing to COW a MAP_PRIVATE
4285  * mapping it owns the reserve page for. The intention is to unmap the page
4286  * from other VMAs and let the children be SIGKILLed if they are faulting the
4287  * same region.
4288  */
4289 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
4290                               struct page *page, unsigned long address)
4291 {
4292         struct hstate *h = hstate_vma(vma);
4293         struct vm_area_struct *iter_vma;
4294         struct address_space *mapping;
4295         pgoff_t pgoff;
4296
4297         /*
4298          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
4299          * from page cache lookup which is in HPAGE_SIZE units.
4300          */
4301         address = address & huge_page_mask(h);
4302         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
4303                         vma->vm_pgoff;
4304         mapping = vma->vm_file->f_mapping;
4305
4306         /*
4307          * Take the mapping lock for the duration of the table walk. As
4308          * this mapping should be shared between all the VMAs,
4309          * __unmap_hugepage_range() is called as the lock is already held
4310          */
4311         i_mmap_lock_write(mapping);
4312         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
4313                 /* Do not unmap the current VMA */
4314                 if (iter_vma == vma)
4315                         continue;
4316
4317                 /*
4318                  * Shared VMAs have their own reserves and do not affect
4319                  * MAP_PRIVATE accounting but it is possible that a shared
4320                  * VMA is using the same page so check and skip such VMAs.
4321                  */
4322                 if (iter_vma->vm_flags & VM_MAYSHARE)
4323                         continue;
4324
4325                 /*
4326                  * Unmap the page from other VMAs without their own reserves.
4327                  * They get marked to be SIGKILLed if they fault in these
4328                  * areas. This is because a future no-page fault on this VMA
4329                  * could insert a zeroed page instead of the data existing
4330                  * from the time of fork. This would look like data corruption
4331                  */
4332                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
4333                         unmap_hugepage_range(iter_vma, address,
4334                                              address + huge_page_size(h), page);
4335         }
4336         i_mmap_unlock_write(mapping);
4337 }
4338
4339 /*
4340  * Hugetlb_cow() should be called with page lock of the original hugepage held.
4341  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
4342  * cannot race with other handlers or page migration.
4343  * Keep the pte_same checks anyway to make transition from the mutex easier.
4344  */
4345 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
4346                        unsigned long address, pte_t *ptep,
4347                        struct page *pagecache_page, spinlock_t *ptl)
4348 {
4349         pte_t pte;
4350         struct hstate *h = hstate_vma(vma);
4351         struct page *old_page, *new_page;
4352         int outside_reserve = 0;
4353         vm_fault_t ret = 0;
4354         unsigned long haddr = address & huge_page_mask(h);
4355         struct mmu_notifier_range range;
4356
4357         pte = huge_ptep_get(ptep);
4358         old_page = pte_page(pte);
4359
4360 retry_avoidcopy:
4361         /* If no-one else is actually using this page, avoid the copy
4362          * and just make the page writable */
4363         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
4364                 page_move_anon_rmap(old_page, vma);
4365                 set_huge_ptep_writable(vma, haddr, ptep);
4366                 return 0;
4367         }
4368
4369         /*
4370          * If the process that created a MAP_PRIVATE mapping is about to
4371          * perform a COW due to a shared page count, attempt to satisfy
4372          * the allocation without using the existing reserves. The pagecache
4373          * page is used to determine if the reserve at this address was
4374          * consumed or not. If reserves were used, a partial faulted mapping
4375          * at the time of fork() could consume its reserves on COW instead
4376          * of the full address range.
4377          */
4378         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
4379                         old_page != pagecache_page)
4380                 outside_reserve = 1;
4381
4382         get_page(old_page);
4383
4384         /*
4385          * Drop page table lock as buddy allocator may be called. It will
4386          * be acquired again before returning to the caller, as expected.
4387          */
4388         spin_unlock(ptl);
4389         new_page = alloc_huge_page(vma, haddr, outside_reserve);
4390
4391         if (IS_ERR(new_page)) {
4392                 /*
4393                  * If a process owning a MAP_PRIVATE mapping fails to COW,
4394                  * it is due to references held by a child and an insufficient
4395                  * huge page pool. To guarantee the original mappers
4396                  * reliability, unmap the page from child processes. The child
4397                  * may get SIGKILLed if it later faults.
4398                  */
4399                 if (outside_reserve) {
4400                         struct address_space *mapping = vma->vm_file->f_mapping;
4401                         pgoff_t idx;
4402                         u32 hash;
4403
4404                         put_page(old_page);
4405                         BUG_ON(huge_pte_none(pte));
4406                         /*
4407                          * Drop hugetlb_fault_mutex and i_mmap_rwsem before
4408                          * unmapping.  unmapping needs to hold i_mmap_rwsem
4409                          * in write mode.  Dropping i_mmap_rwsem in read mode
4410                          * here is OK as COW mappings do not interact with
4411                          * PMD sharing.
4412                          *
4413                          * Reacquire both after unmap operation.
4414                          */
4415                         idx = vma_hugecache_offset(h, vma, haddr);
4416                         hash = hugetlb_fault_mutex_hash(mapping, idx);
4417                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4418                         i_mmap_unlock_read(mapping);
4419
4420                         unmap_ref_private(mm, vma, old_page, haddr);
4421
4422                         i_mmap_lock_read(mapping);
4423                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
4424                         spin_lock(ptl);
4425                         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4426                         if (likely(ptep &&
4427                                    pte_same(huge_ptep_get(ptep), pte)))
4428                                 goto retry_avoidcopy;
4429                         /*
4430                          * race occurs while re-acquiring page table
4431                          * lock, and our job is done.
4432                          */
4433                         return 0;
4434                 }
4435
4436                 ret = vmf_error(PTR_ERR(new_page));
4437                 goto out_release_old;
4438         }
4439
4440         /*
4441          * When the original hugepage is shared one, it does not have
4442          * anon_vma prepared.
4443          */
4444         if (unlikely(anon_vma_prepare(vma))) {
4445                 ret = VM_FAULT_OOM;
4446                 goto out_release_all;
4447         }
4448
4449         copy_user_huge_page(new_page, old_page, address, vma,
4450                             pages_per_huge_page(h));
4451         __SetPageUptodate(new_page);
4452
4453         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
4454                                 haddr + huge_page_size(h));
4455         mmu_notifier_invalidate_range_start(&range);
4456
4457         /*
4458          * Retake the page table lock to check for racing updates
4459          * before the page tables are altered
4460          */
4461         spin_lock(ptl);
4462         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4463         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
4464                 ClearHPageRestoreReserve(new_page);
4465
4466                 /* Break COW */
4467                 huge_ptep_clear_flush(vma, haddr, ptep);
4468                 mmu_notifier_invalidate_range(mm, range.start, range.end);
4469                 set_huge_pte_at(mm, haddr, ptep,
4470                                 make_huge_pte(vma, new_page, 1));
4471                 page_remove_rmap(old_page, true);
4472                 hugepage_add_new_anon_rmap(new_page, vma, haddr);
4473                 SetHPageMigratable(new_page);
4474                 /* Make the old page be freed below */
4475                 new_page = old_page;
4476         }
4477         spin_unlock(ptl);
4478         mmu_notifier_invalidate_range_end(&range);
4479 out_release_all:
4480         restore_reserve_on_error(h, vma, haddr, new_page);
4481         put_page(new_page);
4482 out_release_old:
4483         put_page(old_page);
4484
4485         spin_lock(ptl); /* Caller expects lock to be held */
4486         return ret;
4487 }
4488
4489 /* Return the pagecache page at a given address within a VMA */
4490 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
4491                         struct vm_area_struct *vma, unsigned long address)
4492 {
4493         struct address_space *mapping;
4494         pgoff_t idx;
4495
4496         mapping = vma->vm_file->f_mapping;
4497         idx = vma_hugecache_offset(h, vma, address);
4498
4499         return find_lock_page(mapping, idx);
4500 }
4501
4502 /*
4503  * Return whether there is a pagecache page to back given address within VMA.
4504  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
4505  */
4506 static bool hugetlbfs_pagecache_present(struct hstate *h,
4507                         struct vm_area_struct *vma, unsigned long address)
4508 {
4509         struct address_space *mapping;
4510         pgoff_t idx;
4511         struct page *page;
4512
4513         mapping = vma->vm_file->f_mapping;
4514         idx = vma_hugecache_offset(h, vma, address);
4515
4516         page = find_get_page(mapping, idx);
4517         if (page)
4518                 put_page(page);
4519         return page != NULL;
4520 }
4521
4522 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
4523                            pgoff_t idx)
4524 {
4525         struct inode *inode = mapping->host;
4526         struct hstate *h = hstate_inode(inode);
4527         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
4528
4529         if (err)
4530                 return err;
4531         ClearHPageRestoreReserve(page);
4532
4533         /*
4534          * set page dirty so that it will not be removed from cache/file
4535          * by non-hugetlbfs specific code paths.
4536          */
4537         set_page_dirty(page);
4538
4539         spin_lock(&inode->i_lock);
4540         inode->i_blocks += blocks_per_huge_page(h);
4541         spin_unlock(&inode->i_lock);
4542         return 0;
4543 }
4544
4545 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
4546                                                   struct address_space *mapping,
4547                                                   pgoff_t idx,
4548                                                   unsigned int flags,
4549                                                   unsigned long haddr,
4550                                                   unsigned long reason)
4551 {
4552         vm_fault_t ret;
4553         u32 hash;
4554         struct vm_fault vmf = {
4555                 .vma = vma,
4556                 .address = haddr,
4557                 .flags = flags,
4558
4559                 /*
4560                  * Hard to debug if it ends up being
4561                  * used by a callee that assumes
4562                  * something about the other
4563                  * uninitialized fields... same as in
4564                  * memory.c
4565                  */
4566         };
4567
4568         /*
4569          * hugetlb_fault_mutex and i_mmap_rwsem must be
4570          * dropped before handling userfault.  Reacquire
4571          * after handling fault to make calling code simpler.
4572          */
4573         hash = hugetlb_fault_mutex_hash(mapping, idx);
4574         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4575         i_mmap_unlock_read(mapping);
4576         ret = handle_userfault(&vmf, reason);
4577         i_mmap_lock_read(mapping);
4578         mutex_lock(&hugetlb_fault_mutex_table[hash]);
4579
4580         return ret;
4581 }
4582
4583 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
4584                         struct vm_area_struct *vma,
4585                         struct address_space *mapping, pgoff_t idx,
4586                         unsigned long address, pte_t *ptep, unsigned int flags)
4587 {
4588         struct hstate *h = hstate_vma(vma);
4589         vm_fault_t ret = VM_FAULT_SIGBUS;
4590         int anon_rmap = 0;
4591         unsigned long size;
4592         struct page *page;
4593         pte_t new_pte;
4594         spinlock_t *ptl;
4595         unsigned long haddr = address & huge_page_mask(h);
4596         bool new_page = false;
4597
4598         /*
4599          * Currently, we are forced to kill the process in the event the
4600          * original mapper has unmapped pages from the child due to a failed
4601          * COW. Warn that such a situation has occurred as it may not be obvious
4602          */
4603         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
4604                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
4605                            current->pid);
4606                 return ret;
4607         }
4608
4609         /*
4610          * We can not race with truncation due to holding i_mmap_rwsem.
4611          * i_size is modified when holding i_mmap_rwsem, so check here
4612          * once for faults beyond end of file.
4613          */
4614         size = i_size_read(mapping->host) >> huge_page_shift(h);
4615         if (idx >= size)
4616                 goto out;
4617
4618 retry:
4619         page = find_lock_page(mapping, idx);
4620         if (!page) {
4621                 /* Check for page in userfault range */
4622                 if (userfaultfd_missing(vma)) {
4623                         ret = hugetlb_handle_userfault(vma, mapping, idx,
4624                                                        flags, haddr,
4625                                                        VM_UFFD_MISSING);
4626                         goto out;
4627                 }
4628
4629                 page = alloc_huge_page(vma, haddr, 0);
4630                 if (IS_ERR(page)) {
4631                         /*
4632                          * Returning error will result in faulting task being
4633                          * sent SIGBUS.  The hugetlb fault mutex prevents two
4634                          * tasks from racing to fault in the same page which
4635                          * could result in false unable to allocate errors.
4636                          * Page migration does not take the fault mutex, but
4637                          * does a clear then write of pte's under page table
4638                          * lock.  Page fault code could race with migration,
4639                          * notice the clear pte and try to allocate a page
4640                          * here.  Before returning error, get ptl and make
4641                          * sure there really is no pte entry.
4642                          */
4643                         ptl = huge_pte_lock(h, mm, ptep);
4644                         ret = 0;
4645                         if (huge_pte_none(huge_ptep_get(ptep)))
4646                                 ret = vmf_error(PTR_ERR(page));
4647                         spin_unlock(ptl);
4648                         goto out;
4649                 }
4650                 clear_huge_page(page, address, pages_per_huge_page(h));
4651                 __SetPageUptodate(page);
4652                 new_page = true;
4653
4654                 if (vma->vm_flags & VM_MAYSHARE) {
4655                         int err = huge_add_to_page_cache(page, mapping, idx);
4656                         if (err) {
4657                                 put_page(page);
4658                                 if (err == -EEXIST)
4659                                         goto retry;
4660                                 goto out;
4661                         }
4662                 } else {
4663                         lock_page(page);
4664                         if (unlikely(anon_vma_prepare(vma))) {
4665                                 ret = VM_FAULT_OOM;
4666                                 goto backout_unlocked;
4667                         }
4668                         anon_rmap = 1;
4669                 }
4670         } else {
4671                 /*
4672                  * If memory error occurs between mmap() and fault, some process
4673                  * don't have hwpoisoned swap entry for errored virtual address.
4674                  * So we need to block hugepage fault by PG_hwpoison bit check.
4675                  */
4676                 if (unlikely(PageHWPoison(page))) {
4677                         ret = VM_FAULT_HWPOISON_LARGE |
4678                                 VM_FAULT_SET_HINDEX(hstate_index(h));
4679                         goto backout_unlocked;
4680                 }
4681
4682                 /* Check for page in userfault range. */
4683                 if (userfaultfd_minor(vma)) {
4684                         unlock_page(page);
4685                         put_page(page);
4686                         ret = hugetlb_handle_userfault(vma, mapping, idx,
4687                                                        flags, haddr,
4688                                                        VM_UFFD_MINOR);
4689                         goto out;
4690                 }
4691         }
4692
4693         /*
4694          * If we are going to COW a private mapping later, we examine the
4695          * pending reservations for this page now. This will ensure that
4696          * any allocations necessary to record that reservation occur outside
4697          * the spinlock.
4698          */
4699         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4700                 if (vma_needs_reservation(h, vma, haddr) < 0) {
4701                         ret = VM_FAULT_OOM;
4702                         goto backout_unlocked;
4703                 }
4704                 /* Just decrements count, does not deallocate */
4705                 vma_end_reservation(h, vma, haddr);
4706         }
4707
4708         ptl = huge_pte_lock(h, mm, ptep);
4709         ret = 0;
4710         if (!huge_pte_none(huge_ptep_get(ptep)))
4711                 goto backout;
4712
4713         if (anon_rmap) {
4714                 ClearHPageRestoreReserve(page);
4715                 hugepage_add_new_anon_rmap(page, vma, haddr);
4716         } else
4717                 page_dup_rmap(page, true);
4718         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4719                                 && (vma->vm_flags & VM_SHARED)));
4720         set_huge_pte_at(mm, haddr, ptep, new_pte);
4721
4722         hugetlb_count_add(pages_per_huge_page(h), mm);
4723         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4724                 /* Optimization, do the COW without a second fault */
4725                 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4726         }
4727
4728         spin_unlock(ptl);
4729
4730         /*
4731          * Only set HPageMigratable in newly allocated pages.  Existing pages
4732          * found in the pagecache may not have HPageMigratableset if they have
4733          * been isolated for migration.
4734          */
4735         if (new_page)
4736                 SetHPageMigratable(page);
4737
4738         unlock_page(page);
4739 out:
4740         return ret;
4741
4742 backout:
4743         spin_unlock(ptl);
4744 backout_unlocked:
4745         unlock_page(page);
4746         restore_reserve_on_error(h, vma, haddr, page);
4747         put_page(page);
4748         goto out;
4749 }
4750
4751 #ifdef CONFIG_SMP
4752 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4753 {
4754         unsigned long key[2];
4755         u32 hash;
4756
4757         key[0] = (unsigned long) mapping;
4758         key[1] = idx;
4759
4760         hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
4761
4762         return hash & (num_fault_mutexes - 1);
4763 }
4764 #else
4765 /*
4766  * For uniprocessor systems we always use a single mutex, so just
4767  * return 0 and avoid the hashing overhead.
4768  */
4769 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4770 {
4771         return 0;
4772 }
4773 #endif
4774
4775 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4776                         unsigned long address, unsigned int flags)
4777 {
4778         pte_t *ptep, entry;
4779         spinlock_t *ptl;
4780         vm_fault_t ret;
4781         u32 hash;
4782         pgoff_t idx;
4783         struct page *page = NULL;
4784         struct page *pagecache_page = NULL;
4785         struct hstate *h = hstate_vma(vma);
4786         struct address_space *mapping;
4787         int need_wait_lock = 0;
4788         unsigned long haddr = address & huge_page_mask(h);
4789
4790         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4791         if (ptep) {
4792                 /*
4793                  * Since we hold no locks, ptep could be stale.  That is
4794                  * OK as we are only making decisions based on content and
4795                  * not actually modifying content here.
4796                  */
4797                 entry = huge_ptep_get(ptep);
4798                 if (unlikely(is_hugetlb_entry_migration(entry))) {
4799                         migration_entry_wait_huge(vma, mm, ptep);
4800                         return 0;
4801                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4802                         return VM_FAULT_HWPOISON_LARGE |
4803                                 VM_FAULT_SET_HINDEX(hstate_index(h));
4804         }
4805
4806         /*
4807          * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
4808          * until finished with ptep.  This serves two purposes:
4809          * 1) It prevents huge_pmd_unshare from being called elsewhere
4810          *    and making the ptep no longer valid.
4811          * 2) It synchronizes us with i_size modifications during truncation.
4812          *
4813          * ptep could have already be assigned via huge_pte_offset.  That
4814          * is OK, as huge_pte_alloc will return the same value unless
4815          * something has changed.
4816          */
4817         mapping = vma->vm_file->f_mapping;
4818         i_mmap_lock_read(mapping);
4819         ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
4820         if (!ptep) {
4821                 i_mmap_unlock_read(mapping);
4822                 return VM_FAULT_OOM;
4823         }
4824
4825         /*
4826          * Serialize hugepage allocation and instantiation, so that we don't
4827          * get spurious allocation failures if two CPUs race to instantiate
4828          * the same page in the page cache.
4829          */
4830         idx = vma_hugecache_offset(h, vma, haddr);
4831         hash = hugetlb_fault_mutex_hash(mapping, idx);
4832         mutex_lock(&hugetlb_fault_mutex_table[hash]);
4833
4834         entry = huge_ptep_get(ptep);
4835         if (huge_pte_none(entry)) {
4836                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4837                 goto out_mutex;
4838         }
4839
4840         ret = 0;
4841
4842         /*
4843          * entry could be a migration/hwpoison entry at this point, so this
4844          * check prevents the kernel from going below assuming that we have
4845          * an active hugepage in pagecache. This goto expects the 2nd page
4846          * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
4847          * properly handle it.
4848          */
4849         if (!pte_present(entry))
4850                 goto out_mutex;
4851
4852         /*
4853          * If we are going to COW the mapping later, we examine the pending
4854          * reservations for this page now. This will ensure that any
4855          * allocations necessary to record that reservation occur outside the
4856          * spinlock. For private mappings, we also lookup the pagecache
4857          * page now as it is used to determine if a reservation has been
4858          * consumed.
4859          */
4860         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4861                 if (vma_needs_reservation(h, vma, haddr) < 0) {
4862                         ret = VM_FAULT_OOM;
4863                         goto out_mutex;
4864                 }
4865                 /* Just decrements count, does not deallocate */
4866                 vma_end_reservation(h, vma, haddr);
4867
4868                 if (!(vma->vm_flags & VM_MAYSHARE))
4869                         pagecache_page = hugetlbfs_pagecache_page(h,
4870                                                                 vma, haddr);
4871         }
4872
4873         ptl = huge_pte_lock(h, mm, ptep);
4874
4875         /* Check for a racing update before calling hugetlb_cow */
4876         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4877                 goto out_ptl;
4878
4879         /*
4880          * hugetlb_cow() requires page locks of pte_page(entry) and
4881          * pagecache_page, so here we need take the former one
4882          * when page != pagecache_page or !pagecache_page.
4883          */
4884         page = pte_page(entry);
4885         if (page != pagecache_page)
4886                 if (!trylock_page(page)) {
4887                         need_wait_lock = 1;
4888                         goto out_ptl;
4889                 }
4890
4891         get_page(page);
4892
4893         if (flags & FAULT_FLAG_WRITE) {
4894                 if (!huge_pte_write(entry)) {
4895                         ret = hugetlb_cow(mm, vma, address, ptep,
4896                                           pagecache_page, ptl);
4897                         goto out_put_page;
4898                 }
4899                 entry = huge_pte_mkdirty(entry);
4900         }
4901         entry = pte_mkyoung(entry);
4902         if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4903                                                 flags & FAULT_FLAG_WRITE))
4904                 update_mmu_cache(vma, haddr, ptep);
4905 out_put_page:
4906         if (page != pagecache_page)
4907                 unlock_page(page);
4908         put_page(page);
4909 out_ptl:
4910         spin_unlock(ptl);
4911
4912         if (pagecache_page) {
4913                 unlock_page(pagecache_page);
4914                 put_page(pagecache_page);
4915         }
4916 out_mutex:
4917         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4918         i_mmap_unlock_read(mapping);
4919         /*
4920          * Generally it's safe to hold refcount during waiting page lock. But
4921          * here we just wait to defer the next page fault to avoid busy loop and
4922          * the page is not used after unlocked before returning from the current
4923          * page fault. So we are safe from accessing freed page, even if we wait
4924          * here without taking refcount.
4925          */
4926         if (need_wait_lock)
4927                 wait_on_page_locked(page);
4928         return ret;
4929 }
4930
4931 #ifdef CONFIG_USERFAULTFD
4932 /*
4933  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4934  * modifications for huge pages.
4935  */
4936 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4937                             pte_t *dst_pte,
4938                             struct vm_area_struct *dst_vma,
4939                             unsigned long dst_addr,
4940                             unsigned long src_addr,
4941                             enum mcopy_atomic_mode mode,
4942                             struct page **pagep)
4943 {
4944         bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
4945         struct address_space *mapping;
4946         pgoff_t idx;
4947         unsigned long size;
4948         int vm_shared = dst_vma->vm_flags & VM_SHARED;
4949         struct hstate *h = hstate_vma(dst_vma);
4950         pte_t _dst_pte;
4951         spinlock_t *ptl;
4952         int ret;
4953         struct page *page;
4954         int writable;
4955
4956         mapping = dst_vma->vm_file->f_mapping;
4957         idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4958
4959         if (is_continue) {
4960                 ret = -EFAULT;
4961                 page = find_lock_page(mapping, idx);
4962                 if (!page)
4963                         goto out;
4964         } else if (!*pagep) {
4965                 /* If a page already exists, then it's UFFDIO_COPY for
4966                  * a non-missing case. Return -EEXIST.
4967                  */
4968                 if (vm_shared &&
4969                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
4970                         ret = -EEXIST;
4971                         goto out;
4972                 }
4973
4974                 page = alloc_huge_page(dst_vma, dst_addr, 0);
4975                 if (IS_ERR(page)) {
4976                         ret = -ENOMEM;
4977                         goto out;
4978                 }
4979
4980                 ret = copy_huge_page_from_user(page,
4981                                                 (const void __user *) src_addr,
4982                                                 pages_per_huge_page(h), false);
4983
4984                 /* fallback to copy_from_user outside mmap_lock */
4985                 if (unlikely(ret)) {
4986                         ret = -ENOENT;
4987                         *pagep = page;
4988                         /* don't free the page */
4989                         goto out;
4990                 }
4991         } else {
4992                 page = *pagep;
4993                 *pagep = NULL;
4994         }
4995
4996         /*
4997          * The memory barrier inside __SetPageUptodate makes sure that
4998          * preceding stores to the page contents become visible before
4999          * the set_pte_at() write.
5000          */
5001         __SetPageUptodate(page);
5002
5003         /* Add shared, newly allocated pages to the page cache. */
5004         if (vm_shared && !is_continue) {
5005                 size = i_size_read(mapping->host) >> huge_page_shift(h);
5006                 ret = -EFAULT;
5007                 if (idx >= size)
5008                         goto out_release_nounlock;
5009
5010                 /*
5011                  * Serialization between remove_inode_hugepages() and
5012                  * huge_add_to_page_cache() below happens through the
5013                  * hugetlb_fault_mutex_table that here must be hold by
5014                  * the caller.
5015                  */
5016                 ret = huge_add_to_page_cache(page, mapping, idx);
5017                 if (ret)
5018                         goto out_release_nounlock;
5019         }
5020
5021         ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
5022         spin_lock(ptl);
5023
5024         /*
5025          * Recheck the i_size after holding PT lock to make sure not
5026          * to leave any page mapped (as page_mapped()) beyond the end
5027          * of the i_size (remove_inode_hugepages() is strict about
5028          * enforcing that). If we bail out here, we'll also leave a
5029          * page in the radix tree in the vm_shared case beyond the end
5030          * of the i_size, but remove_inode_hugepages() will take care
5031          * of it as soon as we drop the hugetlb_fault_mutex_table.
5032          */
5033         size = i_size_read(mapping->host) >> huge_page_shift(h);
5034         ret = -EFAULT;
5035         if (idx >= size)
5036                 goto out_release_unlock;
5037
5038         ret = -EEXIST;
5039         if (!huge_pte_none(huge_ptep_get(dst_pte)))
5040                 goto out_release_unlock;
5041
5042         if (vm_shared) {
5043                 page_dup_rmap(page, true);
5044         } else {
5045                 ClearHPageRestoreReserve(page);
5046                 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
5047         }
5048
5049         /* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */
5050         if (is_continue && !vm_shared)
5051                 writable = 0;
5052         else
5053                 writable = dst_vma->vm_flags & VM_WRITE;
5054
5055         _dst_pte = make_huge_pte(dst_vma, page, writable);
5056         if (writable)
5057                 _dst_pte = huge_pte_mkdirty(_dst_pte);
5058         _dst_pte = pte_mkyoung(_dst_pte);
5059
5060         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
5061
5062         (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
5063                                         dst_vma->vm_flags & VM_WRITE);
5064         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
5065
5066         /* No need to invalidate - it was non-present before */
5067         update_mmu_cache(dst_vma, dst_addr, dst_pte);
5068
5069         spin_unlock(ptl);
5070         if (!is_continue)
5071                 SetHPageMigratable(page);
5072         if (vm_shared || is_continue)
5073                 unlock_page(page);
5074         ret = 0;
5075 out:
5076         return ret;
5077 out_release_unlock:
5078         spin_unlock(ptl);
5079         if (vm_shared || is_continue)
5080                 unlock_page(page);
5081 out_release_nounlock:
5082         restore_reserve_on_error(h, dst_vma, dst_addr, page);
5083         put_page(page);
5084         goto out;
5085 }
5086 #endif /* CONFIG_USERFAULTFD */
5087
5088 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
5089                                  int refs, struct page **pages,
5090                                  struct vm_area_struct **vmas)
5091 {
5092         int nr;
5093
5094         for (nr = 0; nr < refs; nr++) {
5095                 if (likely(pages))
5096                         pages[nr] = mem_map_offset(page, nr);
5097                 if (vmas)
5098                         vmas[nr] = vma;
5099         }
5100 }
5101
5102 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
5103                          struct page **pages, struct vm_area_struct **vmas,
5104                          unsigned long *position, unsigned long *nr_pages,
5105                          long i, unsigned int flags, int *locked)
5106 {
5107         unsigned long pfn_offset;
5108         unsigned long vaddr = *position;
5109         unsigned long remainder = *nr_pages;
5110         struct hstate *h = hstate_vma(vma);
5111         int err = -EFAULT, refs;
5112
5113         while (vaddr < vma->vm_end && remainder) {
5114                 pte_t *pte;
5115                 spinlock_t *ptl = NULL;
5116                 int absent;
5117                 struct page *page;
5118
5119                 /*
5120                  * If we have a pending SIGKILL, don't keep faulting pages and
5121                  * potentially allocating memory.
5122                  */
5123                 if (fatal_signal_pending(current)) {
5124                         remainder = 0;
5125                         break;
5126                 }
5127
5128                 /*
5129                  * Some archs (sparc64, sh*) have multiple pte_ts to
5130                  * each hugepage.  We have to make sure we get the
5131                  * first, for the page indexing below to work.
5132                  *
5133                  * Note that page table lock is not held when pte is null.
5134                  */
5135                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
5136                                       huge_page_size(h));
5137                 if (pte)
5138                         ptl = huge_pte_lock(h, mm, pte);
5139                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
5140
5141                 /*
5142                  * When coredumping, it suits get_dump_page if we just return
5143                  * an error where there's an empty slot with no huge pagecache
5144                  * to back it.  This way, we avoid allocating a hugepage, and
5145                  * the sparse dumpfile avoids allocating disk blocks, but its
5146                  * huge holes still show up with zeroes where they need to be.
5147                  */
5148                 if (absent && (flags & FOLL_DUMP) &&
5149                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
5150                         if (pte)
5151                                 spin_unlock(ptl);
5152                         remainder = 0;
5153                         break;
5154                 }
5155
5156                 /*
5157                  * We need call hugetlb_fault for both hugepages under migration
5158                  * (in which case hugetlb_fault waits for the migration,) and
5159                  * hwpoisoned hugepages (in which case we need to prevent the
5160                  * caller from accessing to them.) In order to do this, we use
5161                  * here is_swap_pte instead of is_hugetlb_entry_migration and
5162                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
5163                  * both cases, and because we can't follow correct pages
5164                  * directly from any kind of swap entries.
5165                  */
5166                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
5167                     ((flags & FOLL_WRITE) &&
5168                       !huge_pte_write(huge_ptep_get(pte)))) {
5169                         vm_fault_t ret;
5170                         unsigned int fault_flags = 0;
5171
5172                         if (pte)
5173                                 spin_unlock(ptl);
5174                         if (flags & FOLL_WRITE)
5175                                 fault_flags |= FAULT_FLAG_WRITE;
5176                         if (locked)
5177                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
5178                                         FAULT_FLAG_KILLABLE;
5179                         if (flags & FOLL_NOWAIT)
5180                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
5181                                         FAULT_FLAG_RETRY_NOWAIT;
5182                         if (flags & FOLL_TRIED) {
5183                                 /*
5184                                  * Note: FAULT_FLAG_ALLOW_RETRY and
5185                                  * FAULT_FLAG_TRIED can co-exist
5186                                  */
5187                                 fault_flags |= FAULT_FLAG_TRIED;
5188                         }
5189                         ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
5190                         if (ret & VM_FAULT_ERROR) {
5191                                 err = vm_fault_to_errno(ret, flags);
5192                                 remainder = 0;
5193                                 break;
5194                         }
5195                         if (ret & VM_FAULT_RETRY) {
5196                                 if (locked &&
5197                                     !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
5198                                         *locked = 0;
5199                                 *nr_pages = 0;
5200                                 /*
5201                                  * VM_FAULT_RETRY must not return an
5202                                  * error, it will return zero
5203                                  * instead.
5204                                  *
5205                                  * No need to update "position" as the
5206                                  * caller will not check it after
5207                                  * *nr_pages is set to 0.
5208                                  */
5209                                 return i;
5210                         }
5211                         continue;
5212                 }
5213
5214                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
5215                 page = pte_page(huge_ptep_get(pte));
5216
5217                 /*
5218                  * If subpage information not requested, update counters
5219                  * and skip the same_page loop below.
5220                  */
5221                 if (!pages && !vmas && !pfn_offset &&
5222                     (vaddr + huge_page_size(h) < vma->vm_end) &&
5223                     (remainder >= pages_per_huge_page(h))) {
5224                         vaddr += huge_page_size(h);
5225                         remainder -= pages_per_huge_page(h);
5226                         i += pages_per_huge_page(h);
5227                         spin_unlock(ptl);
5228                         continue;
5229                 }
5230
5231                 refs = min3(pages_per_huge_page(h) - pfn_offset,
5232                             (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
5233
5234                 if (pages || vmas)
5235                         record_subpages_vmas(mem_map_offset(page, pfn_offset),
5236                                              vma, refs,
5237                                              likely(pages) ? pages + i : NULL,
5238                                              vmas ? vmas + i : NULL);
5239
5240                 if (pages) {
5241                         /*
5242                          * try_grab_compound_head() should always succeed here,
5243                          * because: a) we hold the ptl lock, and b) we've just
5244                          * checked that the huge page is present in the page
5245                          * tables. If the huge page is present, then the tail
5246                          * pages must also be present. The ptl prevents the
5247                          * head page and tail pages from being rearranged in
5248                          * any way. So this page must be available at this
5249                          * point, unless the page refcount overflowed:
5250                          */
5251                         if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
5252                                                                  refs,
5253                                                                  flags))) {
5254                                 spin_unlock(ptl);
5255                                 remainder = 0;
5256                                 err = -ENOMEM;
5257                                 break;
5258                         }
5259                 }
5260
5261                 vaddr += (refs << PAGE_SHIFT);
5262                 remainder -= refs;
5263                 i += refs;
5264
5265                 spin_unlock(ptl);
5266         }
5267         *nr_pages = remainder;
5268         /*
5269          * setting position is actually required only if remainder is
5270          * not zero but it's faster not to add a "if (remainder)"
5271          * branch.
5272          */
5273         *position = vaddr;
5274
5275         return i ? i : err;
5276 }
5277
5278 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
5279                 unsigned long address, unsigned long end, pgprot_t newprot)
5280 {
5281         struct mm_struct *mm = vma->vm_mm;
5282         unsigned long start = address;
5283         pte_t *ptep;
5284         pte_t pte;
5285         struct hstate *h = hstate_vma(vma);
5286         unsigned long pages = 0;
5287         bool shared_pmd = false;
5288         struct mmu_notifier_range range;
5289
5290         /*
5291          * In the case of shared PMDs, the area to flush could be beyond
5292          * start/end.  Set range.start/range.end to cover the maximum possible
5293          * range if PMD sharing is possible.
5294          */
5295         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
5296                                 0, vma, mm, start, end);
5297         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5298
5299         BUG_ON(address >= end);
5300         flush_cache_range(vma, range.start, range.end);
5301
5302         mmu_notifier_invalidate_range_start(&range);
5303         i_mmap_lock_write(vma->vm_file->f_mapping);
5304         for (; address < end; address += huge_page_size(h)) {
5305                 spinlock_t *ptl;
5306                 ptep = huge_pte_offset(mm, address, huge_page_size(h));
5307                 if (!ptep)
5308                         continue;
5309                 ptl = huge_pte_lock(h, mm, ptep);
5310                 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5311                         pages++;
5312                         spin_unlock(ptl);
5313                         shared_pmd = true;
5314                         continue;
5315                 }
5316                 pte = huge_ptep_get(ptep);
5317                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
5318                         spin_unlock(ptl);
5319                         continue;
5320                 }
5321                 if (unlikely(is_hugetlb_entry_migration(pte))) {
5322                         swp_entry_t entry = pte_to_swp_entry(pte);
5323
5324                         if (is_write_migration_entry(entry)) {
5325                                 pte_t newpte;
5326
5327                                 make_migration_entry_read(&entry);
5328                                 newpte = swp_entry_to_pte(entry);
5329                                 set_huge_swap_pte_at(mm, address, ptep,
5330                                                      newpte, huge_page_size(h));
5331                                 pages++;
5332                         }
5333                         spin_unlock(ptl);
5334                         continue;
5335                 }
5336                 if (!huge_pte_none(pte)) {
5337                         pte_t old_pte;
5338
5339                         old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
5340                         pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
5341                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
5342                         huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
5343                         pages++;
5344                 }
5345                 spin_unlock(ptl);
5346         }
5347         /*
5348          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
5349          * may have cleared our pud entry and done put_page on the page table:
5350          * once we release i_mmap_rwsem, another task can do the final put_page
5351          * and that page table be reused and filled with junk.  If we actually
5352          * did unshare a page of pmds, flush the range corresponding to the pud.
5353          */
5354         if (shared_pmd)
5355                 flush_hugetlb_tlb_range(vma, range.start, range.end);
5356         else
5357                 flush_hugetlb_tlb_range(vma, start, end);
5358         /*
5359          * No need to call mmu_notifier_invalidate_range() we are downgrading
5360          * page table protection not changing it to point to a new page.
5361          *
5362          * See Documentation/vm/mmu_notifier.rst
5363          */
5364         i_mmap_unlock_write(vma->vm_file->f_mapping);
5365         mmu_notifier_invalidate_range_end(&range);
5366
5367         return pages << h->order;
5368 }
5369
5370 /* Return true if reservation was successful, false otherwise.  */
5371 bool hugetlb_reserve_pages(struct inode *inode,
5372                                         long from, long to,
5373                                         struct vm_area_struct *vma,
5374                                         vm_flags_t vm_flags)
5375 {
5376         long chg, add = -1;
5377         struct hstate *h = hstate_inode(inode);
5378         struct hugepage_subpool *spool = subpool_inode(inode);
5379         struct resv_map *resv_map;
5380         struct hugetlb_cgroup *h_cg = NULL;
5381         long gbl_reserve, regions_needed = 0;
5382
5383         /* This should never happen */
5384         if (from > to) {
5385                 VM_WARN(1, "%s called with a negative range\n", __func__);
5386                 return false;
5387         }
5388
5389         /*
5390          * Only apply hugepage reservation if asked. At fault time, an
5391          * attempt will be made for VM_NORESERVE to allocate a page
5392          * without using reserves
5393          */
5394         if (vm_flags & VM_NORESERVE)
5395                 return true;
5396
5397         /*
5398          * Shared mappings base their reservation on the number of pages that
5399          * are already allocated on behalf of the file. Private mappings need
5400          * to reserve the full area even if read-only as mprotect() may be
5401          * called to make the mapping read-write. Assume !vma is a shm mapping
5402          */
5403         if (!vma || vma->vm_flags & VM_MAYSHARE) {
5404                 /*
5405                  * resv_map can not be NULL as hugetlb_reserve_pages is only
5406                  * called for inodes for which resv_maps were created (see
5407                  * hugetlbfs_get_inode).
5408                  */
5409                 resv_map = inode_resv_map(inode);
5410
5411                 chg = region_chg(resv_map, from, to, &regions_needed);
5412
5413         } else {
5414                 /* Private mapping. */
5415                 resv_map = resv_map_alloc();
5416                 if (!resv_map)
5417                         return false;
5418
5419                 chg = to - from;
5420
5421                 set_vma_resv_map(vma, resv_map);
5422                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
5423         }
5424
5425         if (chg < 0)
5426                 goto out_err;
5427
5428         if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
5429                                 chg * pages_per_huge_page(h), &h_cg) < 0)
5430                 goto out_err;
5431
5432         if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
5433                 /* For private mappings, the hugetlb_cgroup uncharge info hangs
5434                  * of the resv_map.
5435                  */
5436                 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
5437         }
5438
5439         /*
5440          * There must be enough pages in the subpool for the mapping. If
5441          * the subpool has a minimum size, there may be some global
5442          * reservations already in place (gbl_reserve).
5443          */
5444         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
5445         if (gbl_reserve < 0)
5446                 goto out_uncharge_cgroup;
5447
5448         /*
5449          * Check enough hugepages are available for the reservation.
5450          * Hand the pages back to the subpool if there are not
5451          */
5452         if (hugetlb_acct_memory(h, gbl_reserve) < 0)
5453                 goto out_put_pages;
5454
5455         /*
5456          * Account for the reservations made. Shared mappings record regions
5457          * that have reservations as they are shared by multiple VMAs.
5458          * When the last VMA disappears, the region map says how much
5459          * the reservation was and the page cache tells how much of
5460          * the reservation was consumed. Private mappings are per-VMA and
5461          * only the consumed reservations are tracked. When the VMA
5462          * disappears, the original reservation is the VMA size and the
5463          * consumed reservations are stored in the map. Hence, nothing
5464          * else has to be done for private mappings here
5465          */
5466         if (!vma || vma->vm_flags & VM_MAYSHARE) {
5467                 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
5468
5469                 if (unlikely(add < 0)) {
5470                         hugetlb_acct_memory(h, -gbl_reserve);
5471                         goto out_put_pages;
5472                 } else if (unlikely(chg > add)) {
5473                         /*
5474                          * pages in this range were added to the reserve
5475                          * map between region_chg and region_add.  This
5476                          * indicates a race with alloc_huge_page.  Adjust
5477                          * the subpool and reserve counts modified above
5478                          * based on the difference.
5479                          */
5480                         long rsv_adjust;
5481
5482                         /*
5483                          * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
5484                          * reference to h_cg->css. See comment below for detail.
5485                          */
5486                         hugetlb_cgroup_uncharge_cgroup_rsvd(
5487                                 hstate_index(h),
5488                                 (chg - add) * pages_per_huge_page(h), h_cg);
5489
5490                         rsv_adjust = hugepage_subpool_put_pages(spool,
5491                                                                 chg - add);
5492                         hugetlb_acct_memory(h, -rsv_adjust);
5493                 } else if (h_cg) {
5494                         /*
5495                          * The file_regions will hold their own reference to
5496                          * h_cg->css. So we should release the reference held
5497                          * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
5498                          * done.
5499                          */
5500                         hugetlb_cgroup_put_rsvd_cgroup(h_cg);
5501                 }
5502         }
5503         return true;
5504
5505 out_put_pages:
5506         /* put back original number of pages, chg */
5507         (void)hugepage_subpool_put_pages(spool, chg);
5508 out_uncharge_cgroup:
5509         hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
5510                                             chg * pages_per_huge_page(h), h_cg);
5511 out_err:
5512         if (!vma || vma->vm_flags & VM_MAYSHARE)
5513                 /* Only call region_abort if the region_chg succeeded but the
5514                  * region_add failed or didn't run.
5515                  */
5516                 if (chg >= 0 && add < 0)
5517                         region_abort(resv_map, from, to, regions_needed);
5518         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5519                 kref_put(&resv_map->refs, resv_map_release);
5520         return false;
5521 }
5522
5523 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5524                                                                 long freed)
5525 {
5526         struct hstate *h = hstate_inode(inode);
5527         struct resv_map *resv_map = inode_resv_map(inode);
5528         long chg = 0;
5529         struct hugepage_subpool *spool = subpool_inode(inode);
5530         long gbl_reserve;
5531
5532         /*
5533          * Since this routine can be called in the evict inode path for all
5534          * hugetlbfs inodes, resv_map could be NULL.
5535          */
5536         if (resv_map) {
5537                 chg = region_del(resv_map, start, end);
5538                 /*
5539                  * region_del() can fail in the rare case where a region
5540                  * must be split and another region descriptor can not be
5541                  * allocated.  If end == LONG_MAX, it will not fail.
5542                  */
5543                 if (chg < 0)
5544                         return chg;
5545         }
5546
5547         spin_lock(&inode->i_lock);
5548         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
5549         spin_unlock(&inode->i_lock);
5550
5551         /*
5552          * If the subpool has a minimum size, the number of global
5553          * reservations to be released may be adjusted.
5554          *
5555          * Note that !resv_map implies freed == 0. So (chg - freed)
5556          * won't go negative.
5557          */
5558         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
5559         hugetlb_acct_memory(h, -gbl_reserve);
5560
5561         return 0;
5562 }
5563
5564 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
5565 static unsigned long page_table_shareable(struct vm_area_struct *svma,
5566                                 struct vm_area_struct *vma,
5567                                 unsigned long addr, pgoff_t idx)
5568 {
5569         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
5570                                 svma->vm_start;
5571         unsigned long sbase = saddr & PUD_MASK;
5572         unsigned long s_end = sbase + PUD_SIZE;
5573
5574         /* Allow segments to share if only one is marked locked */
5575         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
5576         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
5577
5578         /*
5579          * match the virtual addresses, permission and the alignment of the
5580          * page table page.
5581          */
5582         if (pmd_index(addr) != pmd_index(saddr) ||
5583             vm_flags != svm_flags ||
5584             !range_in_vma(svma, sbase, s_end))
5585                 return 0;
5586
5587         return saddr;
5588 }
5589
5590 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
5591 {
5592         unsigned long base = addr & PUD_MASK;
5593         unsigned long end = base + PUD_SIZE;
5594
5595         /*
5596          * check on proper vm_flags and page table alignment
5597          */
5598         if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
5599                 return true;
5600         return false;
5601 }
5602
5603 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
5604 {
5605 #ifdef CONFIG_USERFAULTFD
5606         if (uffd_disable_huge_pmd_share(vma))
5607                 return false;
5608 #endif
5609         return vma_shareable(vma, addr);
5610 }
5611
5612 /*
5613  * Determine if start,end range within vma could be mapped by shared pmd.
5614  * If yes, adjust start and end to cover range associated with possible
5615  * shared pmd mappings.
5616  */
5617 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5618                                 unsigned long *start, unsigned long *end)
5619 {
5620         unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
5621                 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
5622
5623         /*
5624          * vma needs to span at least one aligned PUD size, and the range
5625          * must be at least partially within in.
5626          */
5627         if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
5628                 (*end <= v_start) || (*start >= v_end))
5629                 return;
5630
5631         /* Extend the range to be PUD aligned for a worst case scenario */
5632         if (*start > v_start)
5633                 *start = ALIGN_DOWN(*start, PUD_SIZE);
5634
5635         if (*end < v_end)
5636                 *end = ALIGN(*end, PUD_SIZE);
5637 }
5638
5639 /*
5640  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
5641  * and returns the corresponding pte. While this is not necessary for the
5642  * !shared pmd case because we can allocate the pmd later as well, it makes the
5643  * code much cleaner.
5644  *
5645  * This routine must be called with i_mmap_rwsem held in at least read mode if
5646  * sharing is possible.  For hugetlbfs, this prevents removal of any page
5647  * table entries associated with the address space.  This is important as we
5648  * are setting up sharing based on existing page table entries (mappings).
5649  *
5650  * NOTE: This routine is only called from huge_pte_alloc.  Some callers of
5651  * huge_pte_alloc know that sharing is not possible and do not take
5652  * i_mmap_rwsem as a performance optimization.  This is handled by the
5653  * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
5654  * only required for subsequent processing.
5655  */
5656 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
5657                       unsigned long addr, pud_t *pud)
5658 {
5659         struct address_space *mapping = vma->vm_file->f_mapping;
5660         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
5661                         vma->vm_pgoff;
5662         struct vm_area_struct *svma;
5663         unsigned long saddr;
5664         pte_t *spte = NULL;
5665         pte_t *pte;
5666         spinlock_t *ptl;
5667
5668         i_mmap_assert_locked(mapping);
5669         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
5670                 if (svma == vma)
5671                         continue;
5672
5673                 saddr = page_table_shareable(svma, vma, addr, idx);
5674                 if (saddr) {
5675                         spte = huge_pte_offset(svma->vm_mm, saddr,
5676                                                vma_mmu_pagesize(svma));
5677                         if (spte) {
5678                                 get_page(virt_to_page(spte));
5679                                 break;
5680                         }
5681                 }
5682         }
5683
5684         if (!spte)
5685                 goto out;
5686
5687         ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
5688         if (pud_none(*pud)) {
5689                 pud_populate(mm, pud,
5690                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
5691                 mm_inc_nr_pmds(mm);
5692         } else {
5693                 put_page(virt_to_page(spte));
5694         }
5695         spin_unlock(ptl);
5696 out:
5697         pte = (pte_t *)pmd_alloc(mm, pud, addr);
5698         return pte;
5699 }
5700
5701 /*
5702  * unmap huge page backed by shared pte.
5703  *
5704  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
5705  * indicated by page_count > 1, unmap is achieved by clearing pud and
5706  * decrementing the ref count. If count == 1, the pte page is not shared.
5707  *
5708  * Called with page table lock held and i_mmap_rwsem held in write mode.
5709  *
5710  * returns: 1 successfully unmapped a shared pte page
5711  *          0 the underlying pte page is not shared, or it is the last user
5712  */
5713 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5714                                         unsigned long *addr, pte_t *ptep)
5715 {
5716         pgd_t *pgd = pgd_offset(mm, *addr);
5717         p4d_t *p4d = p4d_offset(pgd, *addr);
5718         pud_t *pud = pud_offset(p4d, *addr);
5719
5720         i_mmap_assert_write_locked(vma->vm_file->f_mapping);
5721         BUG_ON(page_count(virt_to_page(ptep)) == 0);
5722         if (page_count(virt_to_page(ptep)) == 1)
5723                 return 0;
5724
5725         pud_clear(pud);
5726         put_page(virt_to_page(ptep));
5727         mm_dec_nr_pmds(mm);
5728         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
5729         return 1;
5730 }
5731
5732 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5733 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
5734                       unsigned long addr, pud_t *pud)
5735 {
5736         return NULL;
5737 }
5738
5739 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5740                                 unsigned long *addr, pte_t *ptep)
5741 {
5742         return 0;
5743 }
5744
5745 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5746                                 unsigned long *start, unsigned long *end)
5747 {
5748 }
5749
5750 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
5751 {
5752         return false;
5753 }
5754 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5755
5756 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
5757 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
5758                         unsigned long addr, unsigned long sz)
5759 {
5760         pgd_t *pgd;
5761         p4d_t *p4d;
5762         pud_t *pud;
5763         pte_t *pte = NULL;
5764
5765         pgd = pgd_offset(mm, addr);
5766         p4d = p4d_alloc(mm, pgd, addr);
5767         if (!p4d)
5768                 return NULL;
5769         pud = pud_alloc(mm, p4d, addr);
5770         if (pud) {
5771                 if (sz == PUD_SIZE) {
5772                         pte = (pte_t *)pud;
5773                 } else {
5774                         BUG_ON(sz != PMD_SIZE);
5775                         if (want_pmd_share(vma, addr) && pud_none(*pud))
5776                                 pte = huge_pmd_share(mm, vma, addr, pud);
5777                         else
5778                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
5779                 }
5780         }
5781         BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
5782
5783         return pte;
5784 }
5785
5786 /*
5787  * huge_pte_offset() - Walk the page table to resolve the hugepage
5788  * entry at address @addr
5789  *
5790  * Return: Pointer to page table entry (PUD or PMD) for
5791  * address @addr, or NULL if a !p*d_present() entry is encountered and the
5792  * size @sz doesn't match the hugepage size at this level of the page
5793  * table.
5794  */
5795 pte_t *huge_pte_offset(struct mm_struct *mm,
5796                        unsigned long addr, unsigned long sz)
5797 {
5798         pgd_t *pgd;
5799         p4d_t *p4d;
5800         pud_t *pud;
5801         pmd_t *pmd;
5802
5803         pgd = pgd_offset(mm, addr);
5804         if (!pgd_present(*pgd))
5805                 return NULL;
5806         p4d = p4d_offset(pgd, addr);
5807         if (!p4d_present(*p4d))
5808                 return NULL;
5809
5810         pud = pud_offset(p4d, addr);
5811         if (sz == PUD_SIZE)
5812                 /* must be pud huge, non-present or none */
5813                 return (pte_t *)pud;
5814         if (!pud_present(*pud))
5815                 return NULL;
5816         /* must have a valid entry and size to go further */
5817
5818         pmd = pmd_offset(pud, addr);
5819         /* must be pmd huge, non-present or none */
5820         return (pte_t *)pmd;
5821 }
5822
5823 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
5824
5825 /*
5826  * These functions are overwritable if your architecture needs its own
5827  * behavior.
5828  */
5829 struct page * __weak
5830 follow_huge_addr(struct mm_struct *mm, unsigned long address,
5831                               int write)
5832 {
5833         return ERR_PTR(-EINVAL);
5834 }
5835
5836 struct page * __weak
5837 follow_huge_pd(struct vm_area_struct *vma,
5838                unsigned long address, hugepd_t hpd, int flags, int pdshift)
5839 {
5840         WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5841         return NULL;
5842 }
5843
5844 struct page * __weak
5845 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
5846                 pmd_t *pmd, int flags)
5847 {
5848         struct page *page = NULL;
5849         spinlock_t *ptl;
5850         pte_t pte;
5851
5852         /* FOLL_GET and FOLL_PIN are mutually exclusive. */
5853         if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
5854                          (FOLL_PIN | FOLL_GET)))
5855                 return NULL;
5856
5857 retry:
5858         ptl = pmd_lockptr(mm, pmd);
5859         spin_lock(ptl);
5860         /*
5861          * make sure that the address range covered by this pmd is not
5862          * unmapped from other threads.
5863          */
5864         if (!pmd_huge(*pmd))
5865                 goto out;
5866         pte = huge_ptep_get((pte_t *)pmd);
5867         if (pte_present(pte)) {
5868                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
5869                 /*
5870                  * try_grab_page() should always succeed here, because: a) we
5871                  * hold the pmd (ptl) lock, and b) we've just checked that the
5872                  * huge pmd (head) page is present in the page tables. The ptl
5873                  * prevents the head page and tail pages from being rearranged
5874                  * in any way. So this page must be available at this point,
5875                  * unless the page refcount overflowed:
5876                  */
5877                 if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
5878                         page = NULL;
5879                         goto out;
5880                 }
5881         } else {
5882                 if (is_hugetlb_entry_migration(pte)) {
5883                         spin_unlock(ptl);
5884                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
5885                         goto retry;
5886                 }
5887                 /*
5888                  * hwpoisoned entry is treated as no_page_table in
5889                  * follow_page_mask().
5890                  */
5891         }
5892 out:
5893         spin_unlock(ptl);
5894         return page;
5895 }
5896
5897 struct page * __weak
5898 follow_huge_pud(struct mm_struct *mm, unsigned long address,
5899                 pud_t *pud, int flags)
5900 {
5901         if (flags & (FOLL_GET | FOLL_PIN))
5902                 return NULL;
5903
5904         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5905 }
5906
5907 struct page * __weak
5908 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5909 {
5910         if (flags & (FOLL_GET | FOLL_PIN))
5911                 return NULL;
5912
5913         return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5914 }
5915
5916 bool isolate_huge_page(struct page *page, struct list_head *list)
5917 {
5918         bool ret = true;
5919
5920         spin_lock_irq(&hugetlb_lock);
5921         if (!PageHeadHuge(page) ||
5922             !HPageMigratable(page) ||
5923             !get_page_unless_zero(page)) {
5924                 ret = false;
5925                 goto unlock;
5926         }
5927         ClearHPageMigratable(page);
5928         list_move_tail(&page->lru, list);
5929 unlock:
5930         spin_unlock_irq(&hugetlb_lock);
5931         return ret;
5932 }
5933
5934 int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
5935 {
5936         int ret = 0;
5937
5938         *hugetlb = false;
5939         spin_lock_irq(&hugetlb_lock);
5940         if (PageHeadHuge(page)) {
5941                 *hugetlb = true;
5942                 if (HPageFreed(page) || HPageMigratable(page))
5943                         ret = get_page_unless_zero(page);
5944         }
5945         spin_unlock_irq(&hugetlb_lock);
5946         return ret;
5947 }
5948
5949 void putback_active_hugepage(struct page *page)
5950 {
5951         spin_lock_irq(&hugetlb_lock);
5952         SetHPageMigratable(page);
5953         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5954         spin_unlock_irq(&hugetlb_lock);
5955         put_page(page);
5956 }
5957
5958 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5959 {
5960         struct hstate *h = page_hstate(oldpage);
5961
5962         hugetlb_cgroup_migrate(oldpage, newpage);
5963         set_page_owner_migrate_reason(newpage, reason);
5964
5965         /*
5966          * transfer temporary state of the new huge page. This is
5967          * reverse to other transitions because the newpage is going to
5968          * be final while the old one will be freed so it takes over
5969          * the temporary status.
5970          *
5971          * Also note that we have to transfer the per-node surplus state
5972          * here as well otherwise the global surplus count will not match
5973          * the per-node's.
5974          */
5975         if (HPageTemporary(newpage)) {
5976                 int old_nid = page_to_nid(oldpage);
5977                 int new_nid = page_to_nid(newpage);
5978
5979                 SetHPageTemporary(oldpage);
5980                 ClearHPageTemporary(newpage);
5981
5982                 /*
5983                  * There is no need to transfer the per-node surplus state
5984                  * when we do not cross the node.
5985                  */
5986                 if (new_nid == old_nid)
5987                         return;
5988                 spin_lock_irq(&hugetlb_lock);
5989                 if (h->surplus_huge_pages_node[old_nid]) {
5990                         h->surplus_huge_pages_node[old_nid]--;
5991                         h->surplus_huge_pages_node[new_nid]++;
5992                 }
5993                 spin_unlock_irq(&hugetlb_lock);
5994         }
5995 }
5996
5997 /*
5998  * This function will unconditionally remove all the shared pmd pgtable entries
5999  * within the specific vma for a hugetlbfs memory range.
6000  */
6001 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
6002 {
6003         struct hstate *h = hstate_vma(vma);
6004         unsigned long sz = huge_page_size(h);
6005         struct mm_struct *mm = vma->vm_mm;
6006         struct mmu_notifier_range range;
6007         unsigned long address, start, end;
6008         spinlock_t *ptl;
6009         pte_t *ptep;
6010
6011         if (!(vma->vm_flags & VM_MAYSHARE))
6012                 return;
6013
6014         start = ALIGN(vma->vm_start, PUD_SIZE);
6015         end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6016
6017         if (start >= end)
6018                 return;
6019
6020         /*
6021          * No need to call adjust_range_if_pmd_sharing_possible(), because
6022          * we have already done the PUD_SIZE alignment.
6023          */
6024         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
6025                                 start, end);
6026         mmu_notifier_invalidate_range_start(&range);
6027         i_mmap_lock_write(vma->vm_file->f_mapping);
6028         for (address = start; address < end; address += PUD_SIZE) {
6029                 unsigned long tmp = address;
6030
6031                 ptep = huge_pte_offset(mm, address, sz);
6032                 if (!ptep)
6033                         continue;
6034                 ptl = huge_pte_lock(h, mm, ptep);
6035                 /* We don't want 'address' to be changed */
6036                 huge_pmd_unshare(mm, vma, &tmp, ptep);
6037                 spin_unlock(ptl);
6038         }
6039         flush_hugetlb_tlb_range(vma, start, end);
6040         i_mmap_unlock_write(vma->vm_file->f_mapping);
6041         /*
6042          * No need to call mmu_notifier_invalidate_range(), see
6043          * Documentation/vm/mmu_notifier.rst.
6044          */
6045         mmu_notifier_invalidate_range_end(&range);
6046 }
6047
6048 #ifdef CONFIG_CMA
6049 static bool cma_reserve_called __initdata;
6050
6051 static int __init cmdline_parse_hugetlb_cma(char *p)
6052 {
6053         hugetlb_cma_size = memparse(p, &p);
6054         return 0;
6055 }
6056
6057 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
6058
6059 void __init hugetlb_cma_reserve(int order)
6060 {
6061         unsigned long size, reserved, per_node;
6062         int nid;
6063
6064         cma_reserve_called = true;
6065
6066         if (!hugetlb_cma_size)
6067                 return;
6068
6069         if (hugetlb_cma_size < (PAGE_SIZE << order)) {
6070                 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
6071                         (PAGE_SIZE << order) / SZ_1M);
6072                 return;
6073         }
6074
6075         /*
6076          * If 3 GB area is requested on a machine with 4 numa nodes,
6077          * let's allocate 1 GB on first three nodes and ignore the last one.
6078          */
6079         per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
6080         pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
6081                 hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
6082
6083         reserved = 0;
6084         for_each_node_state(nid, N_ONLINE) {
6085                 int res;
6086                 char name[CMA_MAX_NAME];
6087
6088                 size = min(per_node, hugetlb_cma_size - reserved);
6089                 size = round_up(size, PAGE_SIZE << order);
6090
6091                 snprintf(name, sizeof(name), "hugetlb%d", nid);
6092                 res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
6093                                                  0, false, name,
6094                                                  &hugetlb_cma[nid], nid);
6095                 if (res) {
6096                         pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
6097                                 res, nid);
6098                         continue;
6099                 }
6100
6101                 reserved += size;
6102                 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
6103                         size / SZ_1M, nid);
6104
6105                 if (reserved >= hugetlb_cma_size)
6106                         break;
6107         }
6108 }
6109
6110 void __init hugetlb_cma_check(void)
6111 {
6112         if (!hugetlb_cma_size || cma_reserve_called)
6113                 return;
6114
6115         pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
6116 }
6117
6118 #endif /* CONFIG_CMA */