drm/amd/display: Refactor clk_mgr functions
[linux-2.6-microblaze.git] / mm / hugetlb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/mmdebug.h>
23 #include <linux/sched/signal.h>
24 #include <linux/rmap.h>
25 #include <linux/string_helpers.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/jhash.h>
29 #include <linux/numa.h>
30
31 #include <asm/page.h>
32 #include <asm/pgtable.h>
33 #include <asm/tlb.h>
34
35 #include <linux/io.h>
36 #include <linux/hugetlb.h>
37 #include <linux/hugetlb_cgroup.h>
38 #include <linux/node.h>
39 #include <linux/userfaultfd_k.h>
40 #include <linux/page_owner.h>
41 #include "internal.h"
42
43 int hugetlb_max_hstate __read_mostly;
44 unsigned int default_hstate_idx;
45 struct hstate hstates[HUGE_MAX_HSTATE];
46 /*
47  * Minimum page order among possible hugepage sizes, set to a proper value
48  * at boot time.
49  */
50 static unsigned int minimum_order __read_mostly = UINT_MAX;
51
52 __initdata LIST_HEAD(huge_boot_pages);
53
54 /* for command line parsing */
55 static struct hstate * __initdata parsed_hstate;
56 static unsigned long __initdata default_hstate_max_huge_pages;
57 static unsigned long __initdata default_hstate_size;
58 static bool __initdata parsed_valid_hugepagesz = true;
59
60 /*
61  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
62  * free_huge_pages, and surplus_huge_pages.
63  */
64 DEFINE_SPINLOCK(hugetlb_lock);
65
66 /*
67  * Serializes faults on the same logical page.  This is used to
68  * prevent spurious OOMs when the hugepage pool is fully utilized.
69  */
70 static int num_fault_mutexes;
71 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
72
73 /* Forward declaration */
74 static int hugetlb_acct_memory(struct hstate *h, long delta);
75
76 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
77 {
78         bool free = (spool->count == 0) && (spool->used_hpages == 0);
79
80         spin_unlock(&spool->lock);
81
82         /* If no pages are used, and no other handles to the subpool
83          * remain, give up any reservations mased on minimum size and
84          * free the subpool */
85         if (free) {
86                 if (spool->min_hpages != -1)
87                         hugetlb_acct_memory(spool->hstate,
88                                                 -spool->min_hpages);
89                 kfree(spool);
90         }
91 }
92
93 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
94                                                 long min_hpages)
95 {
96         struct hugepage_subpool *spool;
97
98         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
99         if (!spool)
100                 return NULL;
101
102         spin_lock_init(&spool->lock);
103         spool->count = 1;
104         spool->max_hpages = max_hpages;
105         spool->hstate = h;
106         spool->min_hpages = min_hpages;
107
108         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
109                 kfree(spool);
110                 return NULL;
111         }
112         spool->rsv_hpages = min_hpages;
113
114         return spool;
115 }
116
117 void hugepage_put_subpool(struct hugepage_subpool *spool)
118 {
119         spin_lock(&spool->lock);
120         BUG_ON(!spool->count);
121         spool->count--;
122         unlock_or_release_subpool(spool);
123 }
124
125 /*
126  * Subpool accounting for allocating and reserving pages.
127  * Return -ENOMEM if there are not enough resources to satisfy the
128  * the request.  Otherwise, return the number of pages by which the
129  * global pools must be adjusted (upward).  The returned value may
130  * only be different than the passed value (delta) in the case where
131  * a subpool minimum size must be manitained.
132  */
133 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
134                                       long delta)
135 {
136         long ret = delta;
137
138         if (!spool)
139                 return ret;
140
141         spin_lock(&spool->lock);
142
143         if (spool->max_hpages != -1) {          /* maximum size accounting */
144                 if ((spool->used_hpages + delta) <= spool->max_hpages)
145                         spool->used_hpages += delta;
146                 else {
147                         ret = -ENOMEM;
148                         goto unlock_ret;
149                 }
150         }
151
152         /* minimum size accounting */
153         if (spool->min_hpages != -1 && spool->rsv_hpages) {
154                 if (delta > spool->rsv_hpages) {
155                         /*
156                          * Asking for more reserves than those already taken on
157                          * behalf of subpool.  Return difference.
158                          */
159                         ret = delta - spool->rsv_hpages;
160                         spool->rsv_hpages = 0;
161                 } else {
162                         ret = 0;        /* reserves already accounted for */
163                         spool->rsv_hpages -= delta;
164                 }
165         }
166
167 unlock_ret:
168         spin_unlock(&spool->lock);
169         return ret;
170 }
171
172 /*
173  * Subpool accounting for freeing and unreserving pages.
174  * Return the number of global page reservations that must be dropped.
175  * The return value may only be different than the passed value (delta)
176  * in the case where a subpool minimum size must be maintained.
177  */
178 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
179                                        long delta)
180 {
181         long ret = delta;
182
183         if (!spool)
184                 return delta;
185
186         spin_lock(&spool->lock);
187
188         if (spool->max_hpages != -1)            /* maximum size accounting */
189                 spool->used_hpages -= delta;
190
191          /* minimum size accounting */
192         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
193                 if (spool->rsv_hpages + delta <= spool->min_hpages)
194                         ret = 0;
195                 else
196                         ret = spool->rsv_hpages + delta - spool->min_hpages;
197
198                 spool->rsv_hpages += delta;
199                 if (spool->rsv_hpages > spool->min_hpages)
200                         spool->rsv_hpages = spool->min_hpages;
201         }
202
203         /*
204          * If hugetlbfs_put_super couldn't free spool due to an outstanding
205          * quota reference, free it now.
206          */
207         unlock_or_release_subpool(spool);
208
209         return ret;
210 }
211
212 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
213 {
214         return HUGETLBFS_SB(inode->i_sb)->spool;
215 }
216
217 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
218 {
219         return subpool_inode(file_inode(vma->vm_file));
220 }
221
222 /*
223  * Region tracking -- allows tracking of reservations and instantiated pages
224  *                    across the pages in a mapping.
225  *
226  * The region data structures are embedded into a resv_map and protected
227  * by a resv_map's lock.  The set of regions within the resv_map represent
228  * reservations for huge pages, or huge pages that have already been
229  * instantiated within the map.  The from and to elements are huge page
230  * indicies into the associated mapping.  from indicates the starting index
231  * of the region.  to represents the first index past the end of  the region.
232  *
233  * For example, a file region structure with from == 0 and to == 4 represents
234  * four huge pages in a mapping.  It is important to note that the to element
235  * represents the first element past the end of the region. This is used in
236  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
237  *
238  * Interval notation of the form [from, to) will be used to indicate that
239  * the endpoint from is inclusive and to is exclusive.
240  */
241 struct file_region {
242         struct list_head link;
243         long from;
244         long to;
245 };
246
247 /*
248  * Add the huge page range represented by [f, t) to the reserve
249  * map.  In the normal case, existing regions will be expanded
250  * to accommodate the specified range.  Sufficient regions should
251  * exist for expansion due to the previous call to region_chg
252  * with the same range.  However, it is possible that region_del
253  * could have been called after region_chg and modifed the map
254  * in such a way that no region exists to be expanded.  In this
255  * case, pull a region descriptor from the cache associated with
256  * the map and use that for the new range.
257  *
258  * Return the number of new huge pages added to the map.  This
259  * number is greater than or equal to zero.
260  */
261 static long region_add(struct resv_map *resv, long f, long t)
262 {
263         struct list_head *head = &resv->regions;
264         struct file_region *rg, *nrg, *trg;
265         long add = 0;
266
267         spin_lock(&resv->lock);
268         /* Locate the region we are either in or before. */
269         list_for_each_entry(rg, head, link)
270                 if (f <= rg->to)
271                         break;
272
273         /*
274          * If no region exists which can be expanded to include the
275          * specified range, the list must have been modified by an
276          * interleving call to region_del().  Pull a region descriptor
277          * from the cache and use it for this range.
278          */
279         if (&rg->link == head || t < rg->from) {
280                 VM_BUG_ON(resv->region_cache_count <= 0);
281
282                 resv->region_cache_count--;
283                 nrg = list_first_entry(&resv->region_cache, struct file_region,
284                                         link);
285                 list_del(&nrg->link);
286
287                 nrg->from = f;
288                 nrg->to = t;
289                 list_add(&nrg->link, rg->link.prev);
290
291                 add += t - f;
292                 goto out_locked;
293         }
294
295         /* Round our left edge to the current segment if it encloses us. */
296         if (f > rg->from)
297                 f = rg->from;
298
299         /* Check for and consume any regions we now overlap with. */
300         nrg = rg;
301         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
302                 if (&rg->link == head)
303                         break;
304                 if (rg->from > t)
305                         break;
306
307                 /* If this area reaches higher then extend our area to
308                  * include it completely.  If this is not the first area
309                  * which we intend to reuse, free it. */
310                 if (rg->to > t)
311                         t = rg->to;
312                 if (rg != nrg) {
313                         /* Decrement return value by the deleted range.
314                          * Another range will span this area so that by
315                          * end of routine add will be >= zero
316                          */
317                         add -= (rg->to - rg->from);
318                         list_del(&rg->link);
319                         kfree(rg);
320                 }
321         }
322
323         add += (nrg->from - f);         /* Added to beginning of region */
324         nrg->from = f;
325         add += t - nrg->to;             /* Added to end of region */
326         nrg->to = t;
327
328 out_locked:
329         resv->adds_in_progress--;
330         spin_unlock(&resv->lock);
331         VM_BUG_ON(add < 0);
332         return add;
333 }
334
335 /*
336  * Examine the existing reserve map and determine how many
337  * huge pages in the specified range [f, t) are NOT currently
338  * represented.  This routine is called before a subsequent
339  * call to region_add that will actually modify the reserve
340  * map to add the specified range [f, t).  region_chg does
341  * not change the number of huge pages represented by the
342  * map.  However, if the existing regions in the map can not
343  * be expanded to represent the new range, a new file_region
344  * structure is added to the map as a placeholder.  This is
345  * so that the subsequent region_add call will have all the
346  * regions it needs and will not fail.
347  *
348  * Upon entry, region_chg will also examine the cache of region descriptors
349  * associated with the map.  If there are not enough descriptors cached, one
350  * will be allocated for the in progress add operation.
351  *
352  * Returns the number of huge pages that need to be added to the existing
353  * reservation map for the range [f, t).  This number is greater or equal to
354  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
355  * is needed and can not be allocated.
356  */
357 static long region_chg(struct resv_map *resv, long f, long t)
358 {
359         struct list_head *head = &resv->regions;
360         struct file_region *rg, *nrg = NULL;
361         long chg = 0;
362
363 retry:
364         spin_lock(&resv->lock);
365 retry_locked:
366         resv->adds_in_progress++;
367
368         /*
369          * Check for sufficient descriptors in the cache to accommodate
370          * the number of in progress add operations.
371          */
372         if (resv->adds_in_progress > resv->region_cache_count) {
373                 struct file_region *trg;
374
375                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
376                 /* Must drop lock to allocate a new descriptor. */
377                 resv->adds_in_progress--;
378                 spin_unlock(&resv->lock);
379
380                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
381                 if (!trg) {
382                         kfree(nrg);
383                         return -ENOMEM;
384                 }
385
386                 spin_lock(&resv->lock);
387                 list_add(&trg->link, &resv->region_cache);
388                 resv->region_cache_count++;
389                 goto retry_locked;
390         }
391
392         /* Locate the region we are before or in. */
393         list_for_each_entry(rg, head, link)
394                 if (f <= rg->to)
395                         break;
396
397         /* If we are below the current region then a new region is required.
398          * Subtle, allocate a new region at the position but make it zero
399          * size such that we can guarantee to record the reservation. */
400         if (&rg->link == head || t < rg->from) {
401                 if (!nrg) {
402                         resv->adds_in_progress--;
403                         spin_unlock(&resv->lock);
404                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
405                         if (!nrg)
406                                 return -ENOMEM;
407
408                         nrg->from = f;
409                         nrg->to   = f;
410                         INIT_LIST_HEAD(&nrg->link);
411                         goto retry;
412                 }
413
414                 list_add(&nrg->link, rg->link.prev);
415                 chg = t - f;
416                 goto out_nrg;
417         }
418
419         /* Round our left edge to the current segment if it encloses us. */
420         if (f > rg->from)
421                 f = rg->from;
422         chg = t - f;
423
424         /* Check for and consume any regions we now overlap with. */
425         list_for_each_entry(rg, rg->link.prev, link) {
426                 if (&rg->link == head)
427                         break;
428                 if (rg->from > t)
429                         goto out;
430
431                 /* We overlap with this area, if it extends further than
432                  * us then we must extend ourselves.  Account for its
433                  * existing reservation. */
434                 if (rg->to > t) {
435                         chg += rg->to - t;
436                         t = rg->to;
437                 }
438                 chg -= rg->to - rg->from;
439         }
440
441 out:
442         spin_unlock(&resv->lock);
443         /*  We already know we raced and no longer need the new region */
444         kfree(nrg);
445         return chg;
446 out_nrg:
447         spin_unlock(&resv->lock);
448         return chg;
449 }
450
451 /*
452  * Abort the in progress add operation.  The adds_in_progress field
453  * of the resv_map keeps track of the operations in progress between
454  * calls to region_chg and region_add.  Operations are sometimes
455  * aborted after the call to region_chg.  In such cases, region_abort
456  * is called to decrement the adds_in_progress counter.
457  *
458  * NOTE: The range arguments [f, t) are not needed or used in this
459  * routine.  They are kept to make reading the calling code easier as
460  * arguments will match the associated region_chg call.
461  */
462 static void region_abort(struct resv_map *resv, long f, long t)
463 {
464         spin_lock(&resv->lock);
465         VM_BUG_ON(!resv->region_cache_count);
466         resv->adds_in_progress--;
467         spin_unlock(&resv->lock);
468 }
469
470 /*
471  * Delete the specified range [f, t) from the reserve map.  If the
472  * t parameter is LONG_MAX, this indicates that ALL regions after f
473  * should be deleted.  Locate the regions which intersect [f, t)
474  * and either trim, delete or split the existing regions.
475  *
476  * Returns the number of huge pages deleted from the reserve map.
477  * In the normal case, the return value is zero or more.  In the
478  * case where a region must be split, a new region descriptor must
479  * be allocated.  If the allocation fails, -ENOMEM will be returned.
480  * NOTE: If the parameter t == LONG_MAX, then we will never split
481  * a region and possibly return -ENOMEM.  Callers specifying
482  * t == LONG_MAX do not need to check for -ENOMEM error.
483  */
484 static long region_del(struct resv_map *resv, long f, long t)
485 {
486         struct list_head *head = &resv->regions;
487         struct file_region *rg, *trg;
488         struct file_region *nrg = NULL;
489         long del = 0;
490
491 retry:
492         spin_lock(&resv->lock);
493         list_for_each_entry_safe(rg, trg, head, link) {
494                 /*
495                  * Skip regions before the range to be deleted.  file_region
496                  * ranges are normally of the form [from, to).  However, there
497                  * may be a "placeholder" entry in the map which is of the form
498                  * (from, to) with from == to.  Check for placeholder entries
499                  * at the beginning of the range to be deleted.
500                  */
501                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
502                         continue;
503
504                 if (rg->from >= t)
505                         break;
506
507                 if (f > rg->from && t < rg->to) { /* Must split region */
508                         /*
509                          * Check for an entry in the cache before dropping
510                          * lock and attempting allocation.
511                          */
512                         if (!nrg &&
513                             resv->region_cache_count > resv->adds_in_progress) {
514                                 nrg = list_first_entry(&resv->region_cache,
515                                                         struct file_region,
516                                                         link);
517                                 list_del(&nrg->link);
518                                 resv->region_cache_count--;
519                         }
520
521                         if (!nrg) {
522                                 spin_unlock(&resv->lock);
523                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
524                                 if (!nrg)
525                                         return -ENOMEM;
526                                 goto retry;
527                         }
528
529                         del += t - f;
530
531                         /* New entry for end of split region */
532                         nrg->from = t;
533                         nrg->to = rg->to;
534                         INIT_LIST_HEAD(&nrg->link);
535
536                         /* Original entry is trimmed */
537                         rg->to = f;
538
539                         list_add(&nrg->link, &rg->link);
540                         nrg = NULL;
541                         break;
542                 }
543
544                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
545                         del += rg->to - rg->from;
546                         list_del(&rg->link);
547                         kfree(rg);
548                         continue;
549                 }
550
551                 if (f <= rg->from) {    /* Trim beginning of region */
552                         del += t - rg->from;
553                         rg->from = t;
554                 } else {                /* Trim end of region */
555                         del += rg->to - f;
556                         rg->to = f;
557                 }
558         }
559
560         spin_unlock(&resv->lock);
561         kfree(nrg);
562         return del;
563 }
564
565 /*
566  * A rare out of memory error was encountered which prevented removal of
567  * the reserve map region for a page.  The huge page itself was free'ed
568  * and removed from the page cache.  This routine will adjust the subpool
569  * usage count, and the global reserve count if needed.  By incrementing
570  * these counts, the reserve map entry which could not be deleted will
571  * appear as a "reserved" entry instead of simply dangling with incorrect
572  * counts.
573  */
574 void hugetlb_fix_reserve_counts(struct inode *inode)
575 {
576         struct hugepage_subpool *spool = subpool_inode(inode);
577         long rsv_adjust;
578
579         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
580         if (rsv_adjust) {
581                 struct hstate *h = hstate_inode(inode);
582
583                 hugetlb_acct_memory(h, 1);
584         }
585 }
586
587 /*
588  * Count and return the number of huge pages in the reserve map
589  * that intersect with the range [f, t).
590  */
591 static long region_count(struct resv_map *resv, long f, long t)
592 {
593         struct list_head *head = &resv->regions;
594         struct file_region *rg;
595         long chg = 0;
596
597         spin_lock(&resv->lock);
598         /* Locate each segment we overlap with, and count that overlap. */
599         list_for_each_entry(rg, head, link) {
600                 long seg_from;
601                 long seg_to;
602
603                 if (rg->to <= f)
604                         continue;
605                 if (rg->from >= t)
606                         break;
607
608                 seg_from = max(rg->from, f);
609                 seg_to = min(rg->to, t);
610
611                 chg += seg_to - seg_from;
612         }
613         spin_unlock(&resv->lock);
614
615         return chg;
616 }
617
618 /*
619  * Convert the address within this vma to the page offset within
620  * the mapping, in pagecache page units; huge pages here.
621  */
622 static pgoff_t vma_hugecache_offset(struct hstate *h,
623                         struct vm_area_struct *vma, unsigned long address)
624 {
625         return ((address - vma->vm_start) >> huge_page_shift(h)) +
626                         (vma->vm_pgoff >> huge_page_order(h));
627 }
628
629 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
630                                      unsigned long address)
631 {
632         return vma_hugecache_offset(hstate_vma(vma), vma, address);
633 }
634 EXPORT_SYMBOL_GPL(linear_hugepage_index);
635
636 /*
637  * Return the size of the pages allocated when backing a VMA. In the majority
638  * cases this will be same size as used by the page table entries.
639  */
640 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
641 {
642         if (vma->vm_ops && vma->vm_ops->pagesize)
643                 return vma->vm_ops->pagesize(vma);
644         return PAGE_SIZE;
645 }
646 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
647
648 /*
649  * Return the page size being used by the MMU to back a VMA. In the majority
650  * of cases, the page size used by the kernel matches the MMU size. On
651  * architectures where it differs, an architecture-specific 'strong'
652  * version of this symbol is required.
653  */
654 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
655 {
656         return vma_kernel_pagesize(vma);
657 }
658
659 /*
660  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
661  * bits of the reservation map pointer, which are always clear due to
662  * alignment.
663  */
664 #define HPAGE_RESV_OWNER    (1UL << 0)
665 #define HPAGE_RESV_UNMAPPED (1UL << 1)
666 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
667
668 /*
669  * These helpers are used to track how many pages are reserved for
670  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
671  * is guaranteed to have their future faults succeed.
672  *
673  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
674  * the reserve counters are updated with the hugetlb_lock held. It is safe
675  * to reset the VMA at fork() time as it is not in use yet and there is no
676  * chance of the global counters getting corrupted as a result of the values.
677  *
678  * The private mapping reservation is represented in a subtly different
679  * manner to a shared mapping.  A shared mapping has a region map associated
680  * with the underlying file, this region map represents the backing file
681  * pages which have ever had a reservation assigned which this persists even
682  * after the page is instantiated.  A private mapping has a region map
683  * associated with the original mmap which is attached to all VMAs which
684  * reference it, this region map represents those offsets which have consumed
685  * reservation ie. where pages have been instantiated.
686  */
687 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
688 {
689         return (unsigned long)vma->vm_private_data;
690 }
691
692 static void set_vma_private_data(struct vm_area_struct *vma,
693                                                         unsigned long value)
694 {
695         vma->vm_private_data = (void *)value;
696 }
697
698 struct resv_map *resv_map_alloc(void)
699 {
700         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
701         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
702
703         if (!resv_map || !rg) {
704                 kfree(resv_map);
705                 kfree(rg);
706                 return NULL;
707         }
708
709         kref_init(&resv_map->refs);
710         spin_lock_init(&resv_map->lock);
711         INIT_LIST_HEAD(&resv_map->regions);
712
713         resv_map->adds_in_progress = 0;
714
715         INIT_LIST_HEAD(&resv_map->region_cache);
716         list_add(&rg->link, &resv_map->region_cache);
717         resv_map->region_cache_count = 1;
718
719         return resv_map;
720 }
721
722 void resv_map_release(struct kref *ref)
723 {
724         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
725         struct list_head *head = &resv_map->region_cache;
726         struct file_region *rg, *trg;
727
728         /* Clear out any active regions before we release the map. */
729         region_del(resv_map, 0, LONG_MAX);
730
731         /* ... and any entries left in the cache */
732         list_for_each_entry_safe(rg, trg, head, link) {
733                 list_del(&rg->link);
734                 kfree(rg);
735         }
736
737         VM_BUG_ON(resv_map->adds_in_progress);
738
739         kfree(resv_map);
740 }
741
742 static inline struct resv_map *inode_resv_map(struct inode *inode)
743 {
744         /*
745          * At inode evict time, i_mapping may not point to the original
746          * address space within the inode.  This original address space
747          * contains the pointer to the resv_map.  So, always use the
748          * address space embedded within the inode.
749          * The VERY common case is inode->mapping == &inode->i_data but,
750          * this may not be true for device special inodes.
751          */
752         return (struct resv_map *)(&inode->i_data)->private_data;
753 }
754
755 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
756 {
757         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
758         if (vma->vm_flags & VM_MAYSHARE) {
759                 struct address_space *mapping = vma->vm_file->f_mapping;
760                 struct inode *inode = mapping->host;
761
762                 return inode_resv_map(inode);
763
764         } else {
765                 return (struct resv_map *)(get_vma_private_data(vma) &
766                                                         ~HPAGE_RESV_MASK);
767         }
768 }
769
770 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
771 {
772         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
774
775         set_vma_private_data(vma, (get_vma_private_data(vma) &
776                                 HPAGE_RESV_MASK) | (unsigned long)map);
777 }
778
779 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
780 {
781         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
782         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
783
784         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
785 }
786
787 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
788 {
789         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
790
791         return (get_vma_private_data(vma) & flag) != 0;
792 }
793
794 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
795 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
796 {
797         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
798         if (!(vma->vm_flags & VM_MAYSHARE))
799                 vma->vm_private_data = (void *)0;
800 }
801
802 /* Returns true if the VMA has associated reserve pages */
803 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
804 {
805         if (vma->vm_flags & VM_NORESERVE) {
806                 /*
807                  * This address is already reserved by other process(chg == 0),
808                  * so, we should decrement reserved count. Without decrementing,
809                  * reserve count remains after releasing inode, because this
810                  * allocated page will go into page cache and is regarded as
811                  * coming from reserved pool in releasing step.  Currently, we
812                  * don't have any other solution to deal with this situation
813                  * properly, so add work-around here.
814                  */
815                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
816                         return true;
817                 else
818                         return false;
819         }
820
821         /* Shared mappings always use reserves */
822         if (vma->vm_flags & VM_MAYSHARE) {
823                 /*
824                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
825                  * be a region map for all pages.  The only situation where
826                  * there is no region map is if a hole was punched via
827                  * fallocate.  In this case, there really are no reverves to
828                  * use.  This situation is indicated if chg != 0.
829                  */
830                 if (chg)
831                         return false;
832                 else
833                         return true;
834         }
835
836         /*
837          * Only the process that called mmap() has reserves for
838          * private mappings.
839          */
840         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
841                 /*
842                  * Like the shared case above, a hole punch or truncate
843                  * could have been performed on the private mapping.
844                  * Examine the value of chg to determine if reserves
845                  * actually exist or were previously consumed.
846                  * Very Subtle - The value of chg comes from a previous
847                  * call to vma_needs_reserves().  The reserve map for
848                  * private mappings has different (opposite) semantics
849                  * than that of shared mappings.  vma_needs_reserves()
850                  * has already taken this difference in semantics into
851                  * account.  Therefore, the meaning of chg is the same
852                  * as in the shared case above.  Code could easily be
853                  * combined, but keeping it separate draws attention to
854                  * subtle differences.
855                  */
856                 if (chg)
857                         return false;
858                 else
859                         return true;
860         }
861
862         return false;
863 }
864
865 static void enqueue_huge_page(struct hstate *h, struct page *page)
866 {
867         int nid = page_to_nid(page);
868         list_move(&page->lru, &h->hugepage_freelists[nid]);
869         h->free_huge_pages++;
870         h->free_huge_pages_node[nid]++;
871 }
872
873 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
874 {
875         struct page *page;
876
877         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
878                 if (!PageHWPoison(page))
879                         break;
880         /*
881          * if 'non-isolated free hugepage' not found on the list,
882          * the allocation fails.
883          */
884         if (&h->hugepage_freelists[nid] == &page->lru)
885                 return NULL;
886         list_move(&page->lru, &h->hugepage_activelist);
887         set_page_refcounted(page);
888         h->free_huge_pages--;
889         h->free_huge_pages_node[nid]--;
890         return page;
891 }
892
893 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
894                 nodemask_t *nmask)
895 {
896         unsigned int cpuset_mems_cookie;
897         struct zonelist *zonelist;
898         struct zone *zone;
899         struct zoneref *z;
900         int node = NUMA_NO_NODE;
901
902         zonelist = node_zonelist(nid, gfp_mask);
903
904 retry_cpuset:
905         cpuset_mems_cookie = read_mems_allowed_begin();
906         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
907                 struct page *page;
908
909                 if (!cpuset_zone_allowed(zone, gfp_mask))
910                         continue;
911                 /*
912                  * no need to ask again on the same node. Pool is node rather than
913                  * zone aware
914                  */
915                 if (zone_to_nid(zone) == node)
916                         continue;
917                 node = zone_to_nid(zone);
918
919                 page = dequeue_huge_page_node_exact(h, node);
920                 if (page)
921                         return page;
922         }
923         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
924                 goto retry_cpuset;
925
926         return NULL;
927 }
928
929 /* Movability of hugepages depends on migration support. */
930 static inline gfp_t htlb_alloc_mask(struct hstate *h)
931 {
932         if (hugepage_movable_supported(h))
933                 return GFP_HIGHUSER_MOVABLE;
934         else
935                 return GFP_HIGHUSER;
936 }
937
938 static struct page *dequeue_huge_page_vma(struct hstate *h,
939                                 struct vm_area_struct *vma,
940                                 unsigned long address, int avoid_reserve,
941                                 long chg)
942 {
943         struct page *page;
944         struct mempolicy *mpol;
945         gfp_t gfp_mask;
946         nodemask_t *nodemask;
947         int nid;
948
949         /*
950          * A child process with MAP_PRIVATE mappings created by their parent
951          * have no page reserves. This check ensures that reservations are
952          * not "stolen". The child may still get SIGKILLed
953          */
954         if (!vma_has_reserves(vma, chg) &&
955                         h->free_huge_pages - h->resv_huge_pages == 0)
956                 goto err;
957
958         /* If reserves cannot be used, ensure enough pages are in the pool */
959         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
960                 goto err;
961
962         gfp_mask = htlb_alloc_mask(h);
963         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
964         page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
965         if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
966                 SetPagePrivate(page);
967                 h->resv_huge_pages--;
968         }
969
970         mpol_cond_put(mpol);
971         return page;
972
973 err:
974         return NULL;
975 }
976
977 /*
978  * common helper functions for hstate_next_node_to_{alloc|free}.
979  * We may have allocated or freed a huge page based on a different
980  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
981  * be outside of *nodes_allowed.  Ensure that we use an allowed
982  * node for alloc or free.
983  */
984 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
985 {
986         nid = next_node_in(nid, *nodes_allowed);
987         VM_BUG_ON(nid >= MAX_NUMNODES);
988
989         return nid;
990 }
991
992 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
993 {
994         if (!node_isset(nid, *nodes_allowed))
995                 nid = next_node_allowed(nid, nodes_allowed);
996         return nid;
997 }
998
999 /*
1000  * returns the previously saved node ["this node"] from which to
1001  * allocate a persistent huge page for the pool and advance the
1002  * next node from which to allocate, handling wrap at end of node
1003  * mask.
1004  */
1005 static int hstate_next_node_to_alloc(struct hstate *h,
1006                                         nodemask_t *nodes_allowed)
1007 {
1008         int nid;
1009
1010         VM_BUG_ON(!nodes_allowed);
1011
1012         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1013         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1014
1015         return nid;
1016 }
1017
1018 /*
1019  * helper for free_pool_huge_page() - return the previously saved
1020  * node ["this node"] from which to free a huge page.  Advance the
1021  * next node id whether or not we find a free huge page to free so
1022  * that the next attempt to free addresses the next node.
1023  */
1024 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1025 {
1026         int nid;
1027
1028         VM_BUG_ON(!nodes_allowed);
1029
1030         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1031         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1032
1033         return nid;
1034 }
1035
1036 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1037         for (nr_nodes = nodes_weight(*mask);                            \
1038                 nr_nodes > 0 &&                                         \
1039                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1040                 nr_nodes--)
1041
1042 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1043         for (nr_nodes = nodes_weight(*mask);                            \
1044                 nr_nodes > 0 &&                                         \
1045                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1046                 nr_nodes--)
1047
1048 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1049 static void destroy_compound_gigantic_page(struct page *page,
1050                                         unsigned int order)
1051 {
1052         int i;
1053         int nr_pages = 1 << order;
1054         struct page *p = page + 1;
1055
1056         atomic_set(compound_mapcount_ptr(page), 0);
1057         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1058                 clear_compound_head(p);
1059                 set_page_refcounted(p);
1060         }
1061
1062         set_compound_order(page, 0);
1063         __ClearPageHead(page);
1064 }
1065
1066 static void free_gigantic_page(struct page *page, unsigned int order)
1067 {
1068         free_contig_range(page_to_pfn(page), 1 << order);
1069 }
1070
1071 #ifdef CONFIG_CONTIG_ALLOC
1072 static int __alloc_gigantic_page(unsigned long start_pfn,
1073                                 unsigned long nr_pages, gfp_t gfp_mask)
1074 {
1075         unsigned long end_pfn = start_pfn + nr_pages;
1076         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1077                                   gfp_mask);
1078 }
1079
1080 static bool pfn_range_valid_gigantic(struct zone *z,
1081                         unsigned long start_pfn, unsigned long nr_pages)
1082 {
1083         unsigned long i, end_pfn = start_pfn + nr_pages;
1084         struct page *page;
1085
1086         for (i = start_pfn; i < end_pfn; i++) {
1087                 if (!pfn_valid(i))
1088                         return false;
1089
1090                 page = pfn_to_page(i);
1091
1092                 if (page_zone(page) != z)
1093                         return false;
1094
1095                 if (PageReserved(page))
1096                         return false;
1097
1098                 if (page_count(page) > 0)
1099                         return false;
1100
1101                 if (PageHuge(page))
1102                         return false;
1103         }
1104
1105         return true;
1106 }
1107
1108 static bool zone_spans_last_pfn(const struct zone *zone,
1109                         unsigned long start_pfn, unsigned long nr_pages)
1110 {
1111         unsigned long last_pfn = start_pfn + nr_pages - 1;
1112         return zone_spans_pfn(zone, last_pfn);
1113 }
1114
1115 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1116                 int nid, nodemask_t *nodemask)
1117 {
1118         unsigned int order = huge_page_order(h);
1119         unsigned long nr_pages = 1 << order;
1120         unsigned long ret, pfn, flags;
1121         struct zonelist *zonelist;
1122         struct zone *zone;
1123         struct zoneref *z;
1124
1125         zonelist = node_zonelist(nid, gfp_mask);
1126         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1127                 spin_lock_irqsave(&zone->lock, flags);
1128
1129                 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1130                 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1131                         if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1132                                 /*
1133                                  * We release the zone lock here because
1134                                  * alloc_contig_range() will also lock the zone
1135                                  * at some point. If there's an allocation
1136                                  * spinning on this lock, it may win the race
1137                                  * and cause alloc_contig_range() to fail...
1138                                  */
1139                                 spin_unlock_irqrestore(&zone->lock, flags);
1140                                 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1141                                 if (!ret)
1142                                         return pfn_to_page(pfn);
1143                                 spin_lock_irqsave(&zone->lock, flags);
1144                         }
1145                         pfn += nr_pages;
1146                 }
1147
1148                 spin_unlock_irqrestore(&zone->lock, flags);
1149         }
1150
1151         return NULL;
1152 }
1153
1154 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1155 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1156 #else /* !CONFIG_CONTIG_ALLOC */
1157 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1158                                         int nid, nodemask_t *nodemask)
1159 {
1160         return NULL;
1161 }
1162 #endif /* CONFIG_CONTIG_ALLOC */
1163
1164 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1165 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1166                                         int nid, nodemask_t *nodemask)
1167 {
1168         return NULL;
1169 }
1170 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1171 static inline void destroy_compound_gigantic_page(struct page *page,
1172                                                 unsigned int order) { }
1173 #endif
1174
1175 static void update_and_free_page(struct hstate *h, struct page *page)
1176 {
1177         int i;
1178
1179         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1180                 return;
1181
1182         h->nr_huge_pages--;
1183         h->nr_huge_pages_node[page_to_nid(page)]--;
1184         for (i = 0; i < pages_per_huge_page(h); i++) {
1185                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1186                                 1 << PG_referenced | 1 << PG_dirty |
1187                                 1 << PG_active | 1 << PG_private |
1188                                 1 << PG_writeback);
1189         }
1190         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1191         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1192         set_page_refcounted(page);
1193         if (hstate_is_gigantic(h)) {
1194                 destroy_compound_gigantic_page(page, huge_page_order(h));
1195                 free_gigantic_page(page, huge_page_order(h));
1196         } else {
1197                 __free_pages(page, huge_page_order(h));
1198         }
1199 }
1200
1201 struct hstate *size_to_hstate(unsigned long size)
1202 {
1203         struct hstate *h;
1204
1205         for_each_hstate(h) {
1206                 if (huge_page_size(h) == size)
1207                         return h;
1208         }
1209         return NULL;
1210 }
1211
1212 /*
1213  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1214  * to hstate->hugepage_activelist.)
1215  *
1216  * This function can be called for tail pages, but never returns true for them.
1217  */
1218 bool page_huge_active(struct page *page)
1219 {
1220         VM_BUG_ON_PAGE(!PageHuge(page), page);
1221         return PageHead(page) && PagePrivate(&page[1]);
1222 }
1223
1224 /* never called for tail page */
1225 static void set_page_huge_active(struct page *page)
1226 {
1227         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1228         SetPagePrivate(&page[1]);
1229 }
1230
1231 static void clear_page_huge_active(struct page *page)
1232 {
1233         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1234         ClearPagePrivate(&page[1]);
1235 }
1236
1237 /*
1238  * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1239  * code
1240  */
1241 static inline bool PageHugeTemporary(struct page *page)
1242 {
1243         if (!PageHuge(page))
1244                 return false;
1245
1246         return (unsigned long)page[2].mapping == -1U;
1247 }
1248
1249 static inline void SetPageHugeTemporary(struct page *page)
1250 {
1251         page[2].mapping = (void *)-1U;
1252 }
1253
1254 static inline void ClearPageHugeTemporary(struct page *page)
1255 {
1256         page[2].mapping = NULL;
1257 }
1258
1259 void free_huge_page(struct page *page)
1260 {
1261         /*
1262          * Can't pass hstate in here because it is called from the
1263          * compound page destructor.
1264          */
1265         struct hstate *h = page_hstate(page);
1266         int nid = page_to_nid(page);
1267         struct hugepage_subpool *spool =
1268                 (struct hugepage_subpool *)page_private(page);
1269         bool restore_reserve;
1270
1271         VM_BUG_ON_PAGE(page_count(page), page);
1272         VM_BUG_ON_PAGE(page_mapcount(page), page);
1273
1274         set_page_private(page, 0);
1275         page->mapping = NULL;
1276         restore_reserve = PagePrivate(page);
1277         ClearPagePrivate(page);
1278
1279         /*
1280          * If PagePrivate() was set on page, page allocation consumed a
1281          * reservation.  If the page was associated with a subpool, there
1282          * would have been a page reserved in the subpool before allocation
1283          * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1284          * reservtion, do not call hugepage_subpool_put_pages() as this will
1285          * remove the reserved page from the subpool.
1286          */
1287         if (!restore_reserve) {
1288                 /*
1289                  * A return code of zero implies that the subpool will be
1290                  * under its minimum size if the reservation is not restored
1291                  * after page is free.  Therefore, force restore_reserve
1292                  * operation.
1293                  */
1294                 if (hugepage_subpool_put_pages(spool, 1) == 0)
1295                         restore_reserve = true;
1296         }
1297
1298         spin_lock(&hugetlb_lock);
1299         clear_page_huge_active(page);
1300         hugetlb_cgroup_uncharge_page(hstate_index(h),
1301                                      pages_per_huge_page(h), page);
1302         if (restore_reserve)
1303                 h->resv_huge_pages++;
1304
1305         if (PageHugeTemporary(page)) {
1306                 list_del(&page->lru);
1307                 ClearPageHugeTemporary(page);
1308                 update_and_free_page(h, page);
1309         } else if (h->surplus_huge_pages_node[nid]) {
1310                 /* remove the page from active list */
1311                 list_del(&page->lru);
1312                 update_and_free_page(h, page);
1313                 h->surplus_huge_pages--;
1314                 h->surplus_huge_pages_node[nid]--;
1315         } else {
1316                 arch_clear_hugepage_flags(page);
1317                 enqueue_huge_page(h, page);
1318         }
1319         spin_unlock(&hugetlb_lock);
1320 }
1321
1322 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1323 {
1324         INIT_LIST_HEAD(&page->lru);
1325         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1326         spin_lock(&hugetlb_lock);
1327         set_hugetlb_cgroup(page, NULL);
1328         h->nr_huge_pages++;
1329         h->nr_huge_pages_node[nid]++;
1330         spin_unlock(&hugetlb_lock);
1331 }
1332
1333 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1334 {
1335         int i;
1336         int nr_pages = 1 << order;
1337         struct page *p = page + 1;
1338
1339         /* we rely on prep_new_huge_page to set the destructor */
1340         set_compound_order(page, order);
1341         __ClearPageReserved(page);
1342         __SetPageHead(page);
1343         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1344                 /*
1345                  * For gigantic hugepages allocated through bootmem at
1346                  * boot, it's safer to be consistent with the not-gigantic
1347                  * hugepages and clear the PG_reserved bit from all tail pages
1348                  * too.  Otherwse drivers using get_user_pages() to access tail
1349                  * pages may get the reference counting wrong if they see
1350                  * PG_reserved set on a tail page (despite the head page not
1351                  * having PG_reserved set).  Enforcing this consistency between
1352                  * head and tail pages allows drivers to optimize away a check
1353                  * on the head page when they need know if put_page() is needed
1354                  * after get_user_pages().
1355                  */
1356                 __ClearPageReserved(p);
1357                 set_page_count(p, 0);
1358                 set_compound_head(p, page);
1359         }
1360         atomic_set(compound_mapcount_ptr(page), -1);
1361 }
1362
1363 /*
1364  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1365  * transparent huge pages.  See the PageTransHuge() documentation for more
1366  * details.
1367  */
1368 int PageHuge(struct page *page)
1369 {
1370         if (!PageCompound(page))
1371                 return 0;
1372
1373         page = compound_head(page);
1374         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1375 }
1376 EXPORT_SYMBOL_GPL(PageHuge);
1377
1378 /*
1379  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1380  * normal or transparent huge pages.
1381  */
1382 int PageHeadHuge(struct page *page_head)
1383 {
1384         if (!PageHead(page_head))
1385                 return 0;
1386
1387         return get_compound_page_dtor(page_head) == free_huge_page;
1388 }
1389
1390 pgoff_t __basepage_index(struct page *page)
1391 {
1392         struct page *page_head = compound_head(page);
1393         pgoff_t index = page_index(page_head);
1394         unsigned long compound_idx;
1395
1396         if (!PageHuge(page_head))
1397                 return page_index(page);
1398
1399         if (compound_order(page_head) >= MAX_ORDER)
1400                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1401         else
1402                 compound_idx = page - page_head;
1403
1404         return (index << compound_order(page_head)) + compound_idx;
1405 }
1406
1407 static struct page *alloc_buddy_huge_page(struct hstate *h,
1408                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1409 {
1410         int order = huge_page_order(h);
1411         struct page *page;
1412
1413         gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1414         if (nid == NUMA_NO_NODE)
1415                 nid = numa_mem_id();
1416         page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1417         if (page)
1418                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1419         else
1420                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1421
1422         return page;
1423 }
1424
1425 /*
1426  * Common helper to allocate a fresh hugetlb page. All specific allocators
1427  * should use this function to get new hugetlb pages
1428  */
1429 static struct page *alloc_fresh_huge_page(struct hstate *h,
1430                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1431 {
1432         struct page *page;
1433
1434         if (hstate_is_gigantic(h))
1435                 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1436         else
1437                 page = alloc_buddy_huge_page(h, gfp_mask,
1438                                 nid, nmask);
1439         if (!page)
1440                 return NULL;
1441
1442         if (hstate_is_gigantic(h))
1443                 prep_compound_gigantic_page(page, huge_page_order(h));
1444         prep_new_huge_page(h, page, page_to_nid(page));
1445
1446         return page;
1447 }
1448
1449 /*
1450  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1451  * manner.
1452  */
1453 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1454 {
1455         struct page *page;
1456         int nr_nodes, node;
1457         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1458
1459         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1460                 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1461                 if (page)
1462                         break;
1463         }
1464
1465         if (!page)
1466                 return 0;
1467
1468         put_page(page); /* free it into the hugepage allocator */
1469
1470         return 1;
1471 }
1472
1473 /*
1474  * Free huge page from pool from next node to free.
1475  * Attempt to keep persistent huge pages more or less
1476  * balanced over allowed nodes.
1477  * Called with hugetlb_lock locked.
1478  */
1479 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1480                                                          bool acct_surplus)
1481 {
1482         int nr_nodes, node;
1483         int ret = 0;
1484
1485         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1486                 /*
1487                  * If we're returning unused surplus pages, only examine
1488                  * nodes with surplus pages.
1489                  */
1490                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1491                     !list_empty(&h->hugepage_freelists[node])) {
1492                         struct page *page =
1493                                 list_entry(h->hugepage_freelists[node].next,
1494                                           struct page, lru);
1495                         list_del(&page->lru);
1496                         h->free_huge_pages--;
1497                         h->free_huge_pages_node[node]--;
1498                         if (acct_surplus) {
1499                                 h->surplus_huge_pages--;
1500                                 h->surplus_huge_pages_node[node]--;
1501                         }
1502                         update_and_free_page(h, page);
1503                         ret = 1;
1504                         break;
1505                 }
1506         }
1507
1508         return ret;
1509 }
1510
1511 /*
1512  * Dissolve a given free hugepage into free buddy pages. This function does
1513  * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1514  * dissolution fails because a give page is not a free hugepage, or because
1515  * free hugepages are fully reserved.
1516  */
1517 int dissolve_free_huge_page(struct page *page)
1518 {
1519         int rc = -EBUSY;
1520
1521         spin_lock(&hugetlb_lock);
1522         if (PageHuge(page) && !page_count(page)) {
1523                 struct page *head = compound_head(page);
1524                 struct hstate *h = page_hstate(head);
1525                 int nid = page_to_nid(head);
1526                 if (h->free_huge_pages - h->resv_huge_pages == 0)
1527                         goto out;
1528                 /*
1529                  * Move PageHWPoison flag from head page to the raw error page,
1530                  * which makes any subpages rather than the error page reusable.
1531                  */
1532                 if (PageHWPoison(head) && page != head) {
1533                         SetPageHWPoison(page);
1534                         ClearPageHWPoison(head);
1535                 }
1536                 list_del(&head->lru);
1537                 h->free_huge_pages--;
1538                 h->free_huge_pages_node[nid]--;
1539                 h->max_huge_pages--;
1540                 update_and_free_page(h, head);
1541                 rc = 0;
1542         }
1543 out:
1544         spin_unlock(&hugetlb_lock);
1545         return rc;
1546 }
1547
1548 /*
1549  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1550  * make specified memory blocks removable from the system.
1551  * Note that this will dissolve a free gigantic hugepage completely, if any
1552  * part of it lies within the given range.
1553  * Also note that if dissolve_free_huge_page() returns with an error, all
1554  * free hugepages that were dissolved before that error are lost.
1555  */
1556 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1557 {
1558         unsigned long pfn;
1559         struct page *page;
1560         int rc = 0;
1561
1562         if (!hugepages_supported())
1563                 return rc;
1564
1565         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1566                 page = pfn_to_page(pfn);
1567                 if (PageHuge(page) && !page_count(page)) {
1568                         rc = dissolve_free_huge_page(page);
1569                         if (rc)
1570                                 break;
1571                 }
1572         }
1573
1574         return rc;
1575 }
1576
1577 /*
1578  * Allocates a fresh surplus page from the page allocator.
1579  */
1580 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1581                 int nid, nodemask_t *nmask)
1582 {
1583         struct page *page = NULL;
1584
1585         if (hstate_is_gigantic(h))
1586                 return NULL;
1587
1588         spin_lock(&hugetlb_lock);
1589         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1590                 goto out_unlock;
1591         spin_unlock(&hugetlb_lock);
1592
1593         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1594         if (!page)
1595                 return NULL;
1596
1597         spin_lock(&hugetlb_lock);
1598         /*
1599          * We could have raced with the pool size change.
1600          * Double check that and simply deallocate the new page
1601          * if we would end up overcommiting the surpluses. Abuse
1602          * temporary page to workaround the nasty free_huge_page
1603          * codeflow
1604          */
1605         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1606                 SetPageHugeTemporary(page);
1607                 spin_unlock(&hugetlb_lock);
1608                 put_page(page);
1609                 return NULL;
1610         } else {
1611                 h->surplus_huge_pages++;
1612                 h->surplus_huge_pages_node[page_to_nid(page)]++;
1613         }
1614
1615 out_unlock:
1616         spin_unlock(&hugetlb_lock);
1617
1618         return page;
1619 }
1620
1621 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1622                                      int nid, nodemask_t *nmask)
1623 {
1624         struct page *page;
1625
1626         if (hstate_is_gigantic(h))
1627                 return NULL;
1628
1629         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1630         if (!page)
1631                 return NULL;
1632
1633         /*
1634          * We do not account these pages as surplus because they are only
1635          * temporary and will be released properly on the last reference
1636          */
1637         SetPageHugeTemporary(page);
1638
1639         return page;
1640 }
1641
1642 /*
1643  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1644  */
1645 static
1646 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1647                 struct vm_area_struct *vma, unsigned long addr)
1648 {
1649         struct page *page;
1650         struct mempolicy *mpol;
1651         gfp_t gfp_mask = htlb_alloc_mask(h);
1652         int nid;
1653         nodemask_t *nodemask;
1654
1655         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1656         page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1657         mpol_cond_put(mpol);
1658
1659         return page;
1660 }
1661
1662 /* page migration callback function */
1663 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1664 {
1665         gfp_t gfp_mask = htlb_alloc_mask(h);
1666         struct page *page = NULL;
1667
1668         if (nid != NUMA_NO_NODE)
1669                 gfp_mask |= __GFP_THISNODE;
1670
1671         spin_lock(&hugetlb_lock);
1672         if (h->free_huge_pages - h->resv_huge_pages > 0)
1673                 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1674         spin_unlock(&hugetlb_lock);
1675
1676         if (!page)
1677                 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1678
1679         return page;
1680 }
1681
1682 /* page migration callback function */
1683 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1684                 nodemask_t *nmask)
1685 {
1686         gfp_t gfp_mask = htlb_alloc_mask(h);
1687
1688         spin_lock(&hugetlb_lock);
1689         if (h->free_huge_pages - h->resv_huge_pages > 0) {
1690                 struct page *page;
1691
1692                 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1693                 if (page) {
1694                         spin_unlock(&hugetlb_lock);
1695                         return page;
1696                 }
1697         }
1698         spin_unlock(&hugetlb_lock);
1699
1700         return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1701 }
1702
1703 /* mempolicy aware migration callback */
1704 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1705                 unsigned long address)
1706 {
1707         struct mempolicy *mpol;
1708         nodemask_t *nodemask;
1709         struct page *page;
1710         gfp_t gfp_mask;
1711         int node;
1712
1713         gfp_mask = htlb_alloc_mask(h);
1714         node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1715         page = alloc_huge_page_nodemask(h, node, nodemask);
1716         mpol_cond_put(mpol);
1717
1718         return page;
1719 }
1720
1721 /*
1722  * Increase the hugetlb pool such that it can accommodate a reservation
1723  * of size 'delta'.
1724  */
1725 static int gather_surplus_pages(struct hstate *h, int delta)
1726 {
1727         struct list_head surplus_list;
1728         struct page *page, *tmp;
1729         int ret, i;
1730         int needed, allocated;
1731         bool alloc_ok = true;
1732
1733         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1734         if (needed <= 0) {
1735                 h->resv_huge_pages += delta;
1736                 return 0;
1737         }
1738
1739         allocated = 0;
1740         INIT_LIST_HEAD(&surplus_list);
1741
1742         ret = -ENOMEM;
1743 retry:
1744         spin_unlock(&hugetlb_lock);
1745         for (i = 0; i < needed; i++) {
1746                 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1747                                 NUMA_NO_NODE, NULL);
1748                 if (!page) {
1749                         alloc_ok = false;
1750                         break;
1751                 }
1752                 list_add(&page->lru, &surplus_list);
1753                 cond_resched();
1754         }
1755         allocated += i;
1756
1757         /*
1758          * After retaking hugetlb_lock, we need to recalculate 'needed'
1759          * because either resv_huge_pages or free_huge_pages may have changed.
1760          */
1761         spin_lock(&hugetlb_lock);
1762         needed = (h->resv_huge_pages + delta) -
1763                         (h->free_huge_pages + allocated);
1764         if (needed > 0) {
1765                 if (alloc_ok)
1766                         goto retry;
1767                 /*
1768                  * We were not able to allocate enough pages to
1769                  * satisfy the entire reservation so we free what
1770                  * we've allocated so far.
1771                  */
1772                 goto free;
1773         }
1774         /*
1775          * The surplus_list now contains _at_least_ the number of extra pages
1776          * needed to accommodate the reservation.  Add the appropriate number
1777          * of pages to the hugetlb pool and free the extras back to the buddy
1778          * allocator.  Commit the entire reservation here to prevent another
1779          * process from stealing the pages as they are added to the pool but
1780          * before they are reserved.
1781          */
1782         needed += allocated;
1783         h->resv_huge_pages += delta;
1784         ret = 0;
1785
1786         /* Free the needed pages to the hugetlb pool */
1787         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1788                 if ((--needed) < 0)
1789                         break;
1790                 /*
1791                  * This page is now managed by the hugetlb allocator and has
1792                  * no users -- drop the buddy allocator's reference.
1793                  */
1794                 put_page_testzero(page);
1795                 VM_BUG_ON_PAGE(page_count(page), page);
1796                 enqueue_huge_page(h, page);
1797         }
1798 free:
1799         spin_unlock(&hugetlb_lock);
1800
1801         /* Free unnecessary surplus pages to the buddy allocator */
1802         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1803                 put_page(page);
1804         spin_lock(&hugetlb_lock);
1805
1806         return ret;
1807 }
1808
1809 /*
1810  * This routine has two main purposes:
1811  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1812  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1813  *    to the associated reservation map.
1814  * 2) Free any unused surplus pages that may have been allocated to satisfy
1815  *    the reservation.  As many as unused_resv_pages may be freed.
1816  *
1817  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1818  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1819  * we must make sure nobody else can claim pages we are in the process of
1820  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1821  * number of huge pages we plan to free when dropping the lock.
1822  */
1823 static void return_unused_surplus_pages(struct hstate *h,
1824                                         unsigned long unused_resv_pages)
1825 {
1826         unsigned long nr_pages;
1827
1828         /* Cannot return gigantic pages currently */
1829         if (hstate_is_gigantic(h))
1830                 goto out;
1831
1832         /*
1833          * Part (or even all) of the reservation could have been backed
1834          * by pre-allocated pages. Only free surplus pages.
1835          */
1836         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1837
1838         /*
1839          * We want to release as many surplus pages as possible, spread
1840          * evenly across all nodes with memory. Iterate across these nodes
1841          * until we can no longer free unreserved surplus pages. This occurs
1842          * when the nodes with surplus pages have no free pages.
1843          * free_pool_huge_page() will balance the the freed pages across the
1844          * on-line nodes with memory and will handle the hstate accounting.
1845          *
1846          * Note that we decrement resv_huge_pages as we free the pages.  If
1847          * we drop the lock, resv_huge_pages will still be sufficiently large
1848          * to cover subsequent pages we may free.
1849          */
1850         while (nr_pages--) {
1851                 h->resv_huge_pages--;
1852                 unused_resv_pages--;
1853                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1854                         goto out;
1855                 cond_resched_lock(&hugetlb_lock);
1856         }
1857
1858 out:
1859         /* Fully uncommit the reservation */
1860         h->resv_huge_pages -= unused_resv_pages;
1861 }
1862
1863
1864 /*
1865  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1866  * are used by the huge page allocation routines to manage reservations.
1867  *
1868  * vma_needs_reservation is called to determine if the huge page at addr
1869  * within the vma has an associated reservation.  If a reservation is
1870  * needed, the value 1 is returned.  The caller is then responsible for
1871  * managing the global reservation and subpool usage counts.  After
1872  * the huge page has been allocated, vma_commit_reservation is called
1873  * to add the page to the reservation map.  If the page allocation fails,
1874  * the reservation must be ended instead of committed.  vma_end_reservation
1875  * is called in such cases.
1876  *
1877  * In the normal case, vma_commit_reservation returns the same value
1878  * as the preceding vma_needs_reservation call.  The only time this
1879  * is not the case is if a reserve map was changed between calls.  It
1880  * is the responsibility of the caller to notice the difference and
1881  * take appropriate action.
1882  *
1883  * vma_add_reservation is used in error paths where a reservation must
1884  * be restored when a newly allocated huge page must be freed.  It is
1885  * to be called after calling vma_needs_reservation to determine if a
1886  * reservation exists.
1887  */
1888 enum vma_resv_mode {
1889         VMA_NEEDS_RESV,
1890         VMA_COMMIT_RESV,
1891         VMA_END_RESV,
1892         VMA_ADD_RESV,
1893 };
1894 static long __vma_reservation_common(struct hstate *h,
1895                                 struct vm_area_struct *vma, unsigned long addr,
1896                                 enum vma_resv_mode mode)
1897 {
1898         struct resv_map *resv;
1899         pgoff_t idx;
1900         long ret;
1901
1902         resv = vma_resv_map(vma);
1903         if (!resv)
1904                 return 1;
1905
1906         idx = vma_hugecache_offset(h, vma, addr);
1907         switch (mode) {
1908         case VMA_NEEDS_RESV:
1909                 ret = region_chg(resv, idx, idx + 1);
1910                 break;
1911         case VMA_COMMIT_RESV:
1912                 ret = region_add(resv, idx, idx + 1);
1913                 break;
1914         case VMA_END_RESV:
1915                 region_abort(resv, idx, idx + 1);
1916                 ret = 0;
1917                 break;
1918         case VMA_ADD_RESV:
1919                 if (vma->vm_flags & VM_MAYSHARE)
1920                         ret = region_add(resv, idx, idx + 1);
1921                 else {
1922                         region_abort(resv, idx, idx + 1);
1923                         ret = region_del(resv, idx, idx + 1);
1924                 }
1925                 break;
1926         default:
1927                 BUG();
1928         }
1929
1930         if (vma->vm_flags & VM_MAYSHARE)
1931                 return ret;
1932         else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1933                 /*
1934                  * In most cases, reserves always exist for private mappings.
1935                  * However, a file associated with mapping could have been
1936                  * hole punched or truncated after reserves were consumed.
1937                  * As subsequent fault on such a range will not use reserves.
1938                  * Subtle - The reserve map for private mappings has the
1939                  * opposite meaning than that of shared mappings.  If NO
1940                  * entry is in the reserve map, it means a reservation exists.
1941                  * If an entry exists in the reserve map, it means the
1942                  * reservation has already been consumed.  As a result, the
1943                  * return value of this routine is the opposite of the
1944                  * value returned from reserve map manipulation routines above.
1945                  */
1946                 if (ret)
1947                         return 0;
1948                 else
1949                         return 1;
1950         }
1951         else
1952                 return ret < 0 ? ret : 0;
1953 }
1954
1955 static long vma_needs_reservation(struct hstate *h,
1956                         struct vm_area_struct *vma, unsigned long addr)
1957 {
1958         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1959 }
1960
1961 static long vma_commit_reservation(struct hstate *h,
1962                         struct vm_area_struct *vma, unsigned long addr)
1963 {
1964         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1965 }
1966
1967 static void vma_end_reservation(struct hstate *h,
1968                         struct vm_area_struct *vma, unsigned long addr)
1969 {
1970         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1971 }
1972
1973 static long vma_add_reservation(struct hstate *h,
1974                         struct vm_area_struct *vma, unsigned long addr)
1975 {
1976         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1977 }
1978
1979 /*
1980  * This routine is called to restore a reservation on error paths.  In the
1981  * specific error paths, a huge page was allocated (via alloc_huge_page)
1982  * and is about to be freed.  If a reservation for the page existed,
1983  * alloc_huge_page would have consumed the reservation and set PagePrivate
1984  * in the newly allocated page.  When the page is freed via free_huge_page,
1985  * the global reservation count will be incremented if PagePrivate is set.
1986  * However, free_huge_page can not adjust the reserve map.  Adjust the
1987  * reserve map here to be consistent with global reserve count adjustments
1988  * to be made by free_huge_page.
1989  */
1990 static void restore_reserve_on_error(struct hstate *h,
1991                         struct vm_area_struct *vma, unsigned long address,
1992                         struct page *page)
1993 {
1994         if (unlikely(PagePrivate(page))) {
1995                 long rc = vma_needs_reservation(h, vma, address);
1996
1997                 if (unlikely(rc < 0)) {
1998                         /*
1999                          * Rare out of memory condition in reserve map
2000                          * manipulation.  Clear PagePrivate so that
2001                          * global reserve count will not be incremented
2002                          * by free_huge_page.  This will make it appear
2003                          * as though the reservation for this page was
2004                          * consumed.  This may prevent the task from
2005                          * faulting in the page at a later time.  This
2006                          * is better than inconsistent global huge page
2007                          * accounting of reserve counts.
2008                          */
2009                         ClearPagePrivate(page);
2010                 } else if (rc) {
2011                         rc = vma_add_reservation(h, vma, address);
2012                         if (unlikely(rc < 0))
2013                                 /*
2014                                  * See above comment about rare out of
2015                                  * memory condition.
2016                                  */
2017                                 ClearPagePrivate(page);
2018                 } else
2019                         vma_end_reservation(h, vma, address);
2020         }
2021 }
2022
2023 struct page *alloc_huge_page(struct vm_area_struct *vma,
2024                                     unsigned long addr, int avoid_reserve)
2025 {
2026         struct hugepage_subpool *spool = subpool_vma(vma);
2027         struct hstate *h = hstate_vma(vma);
2028         struct page *page;
2029         long map_chg, map_commit;
2030         long gbl_chg;
2031         int ret, idx;
2032         struct hugetlb_cgroup *h_cg;
2033
2034         idx = hstate_index(h);
2035         /*
2036          * Examine the region/reserve map to determine if the process
2037          * has a reservation for the page to be allocated.  A return
2038          * code of zero indicates a reservation exists (no change).
2039          */
2040         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2041         if (map_chg < 0)
2042                 return ERR_PTR(-ENOMEM);
2043
2044         /*
2045          * Processes that did not create the mapping will have no
2046          * reserves as indicated by the region/reserve map. Check
2047          * that the allocation will not exceed the subpool limit.
2048          * Allocations for MAP_NORESERVE mappings also need to be
2049          * checked against any subpool limit.
2050          */
2051         if (map_chg || avoid_reserve) {
2052                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2053                 if (gbl_chg < 0) {
2054                         vma_end_reservation(h, vma, addr);
2055                         return ERR_PTR(-ENOSPC);
2056                 }
2057
2058                 /*
2059                  * Even though there was no reservation in the region/reserve
2060                  * map, there could be reservations associated with the
2061                  * subpool that can be used.  This would be indicated if the
2062                  * return value of hugepage_subpool_get_pages() is zero.
2063                  * However, if avoid_reserve is specified we still avoid even
2064                  * the subpool reservations.
2065                  */
2066                 if (avoid_reserve)
2067                         gbl_chg = 1;
2068         }
2069
2070         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2071         if (ret)
2072                 goto out_subpool_put;
2073
2074         spin_lock(&hugetlb_lock);
2075         /*
2076          * glb_chg is passed to indicate whether or not a page must be taken
2077          * from the global free pool (global change).  gbl_chg == 0 indicates
2078          * a reservation exists for the allocation.
2079          */
2080         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2081         if (!page) {
2082                 spin_unlock(&hugetlb_lock);
2083                 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2084                 if (!page)
2085                         goto out_uncharge_cgroup;
2086                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2087                         SetPagePrivate(page);
2088                         h->resv_huge_pages--;
2089                 }
2090                 spin_lock(&hugetlb_lock);
2091                 list_move(&page->lru, &h->hugepage_activelist);
2092                 /* Fall through */
2093         }
2094         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2095         spin_unlock(&hugetlb_lock);
2096
2097         set_page_private(page, (unsigned long)spool);
2098
2099         map_commit = vma_commit_reservation(h, vma, addr);
2100         if (unlikely(map_chg > map_commit)) {
2101                 /*
2102                  * The page was added to the reservation map between
2103                  * vma_needs_reservation and vma_commit_reservation.
2104                  * This indicates a race with hugetlb_reserve_pages.
2105                  * Adjust for the subpool count incremented above AND
2106                  * in hugetlb_reserve_pages for the same page.  Also,
2107                  * the reservation count added in hugetlb_reserve_pages
2108                  * no longer applies.
2109                  */
2110                 long rsv_adjust;
2111
2112                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2113                 hugetlb_acct_memory(h, -rsv_adjust);
2114         }
2115         return page;
2116
2117 out_uncharge_cgroup:
2118         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2119 out_subpool_put:
2120         if (map_chg || avoid_reserve)
2121                 hugepage_subpool_put_pages(spool, 1);
2122         vma_end_reservation(h, vma, addr);
2123         return ERR_PTR(-ENOSPC);
2124 }
2125
2126 int alloc_bootmem_huge_page(struct hstate *h)
2127         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2128 int __alloc_bootmem_huge_page(struct hstate *h)
2129 {
2130         struct huge_bootmem_page *m;
2131         int nr_nodes, node;
2132
2133         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2134                 void *addr;
2135
2136                 addr = memblock_alloc_try_nid_raw(
2137                                 huge_page_size(h), huge_page_size(h),
2138                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2139                 if (addr) {
2140                         /*
2141                          * Use the beginning of the huge page to store the
2142                          * huge_bootmem_page struct (until gather_bootmem
2143                          * puts them into the mem_map).
2144                          */
2145                         m = addr;
2146                         goto found;
2147                 }
2148         }
2149         return 0;
2150
2151 found:
2152         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2153         /* Put them into a private list first because mem_map is not up yet */
2154         INIT_LIST_HEAD(&m->list);
2155         list_add(&m->list, &huge_boot_pages);
2156         m->hstate = h;
2157         return 1;
2158 }
2159
2160 static void __init prep_compound_huge_page(struct page *page,
2161                 unsigned int order)
2162 {
2163         if (unlikely(order > (MAX_ORDER - 1)))
2164                 prep_compound_gigantic_page(page, order);
2165         else
2166                 prep_compound_page(page, order);
2167 }
2168
2169 /* Put bootmem huge pages into the standard lists after mem_map is up */
2170 static void __init gather_bootmem_prealloc(void)
2171 {
2172         struct huge_bootmem_page *m;
2173
2174         list_for_each_entry(m, &huge_boot_pages, list) {
2175                 struct page *page = virt_to_page(m);
2176                 struct hstate *h = m->hstate;
2177
2178                 WARN_ON(page_count(page) != 1);
2179                 prep_compound_huge_page(page, h->order);
2180                 WARN_ON(PageReserved(page));
2181                 prep_new_huge_page(h, page, page_to_nid(page));
2182                 put_page(page); /* free it into the hugepage allocator */
2183
2184                 /*
2185                  * If we had gigantic hugepages allocated at boot time, we need
2186                  * to restore the 'stolen' pages to totalram_pages in order to
2187                  * fix confusing memory reports from free(1) and another
2188                  * side-effects, like CommitLimit going negative.
2189                  */
2190                 if (hstate_is_gigantic(h))
2191                         adjust_managed_page_count(page, 1 << h->order);
2192                 cond_resched();
2193         }
2194 }
2195
2196 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2197 {
2198         unsigned long i;
2199
2200         for (i = 0; i < h->max_huge_pages; ++i) {
2201                 if (hstate_is_gigantic(h)) {
2202                         if (!alloc_bootmem_huge_page(h))
2203                                 break;
2204                 } else if (!alloc_pool_huge_page(h,
2205                                          &node_states[N_MEMORY]))
2206                         break;
2207                 cond_resched();
2208         }
2209         if (i < h->max_huge_pages) {
2210                 char buf[32];
2211
2212                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2213                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2214                         h->max_huge_pages, buf, i);
2215                 h->max_huge_pages = i;
2216         }
2217 }
2218
2219 static void __init hugetlb_init_hstates(void)
2220 {
2221         struct hstate *h;
2222
2223         for_each_hstate(h) {
2224                 if (minimum_order > huge_page_order(h))
2225                         minimum_order = huge_page_order(h);
2226
2227                 /* oversize hugepages were init'ed in early boot */
2228                 if (!hstate_is_gigantic(h))
2229                         hugetlb_hstate_alloc_pages(h);
2230         }
2231         VM_BUG_ON(minimum_order == UINT_MAX);
2232 }
2233
2234 static void __init report_hugepages(void)
2235 {
2236         struct hstate *h;
2237
2238         for_each_hstate(h) {
2239                 char buf[32];
2240
2241                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2242                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2243                         buf, h->free_huge_pages);
2244         }
2245 }
2246
2247 #ifdef CONFIG_HIGHMEM
2248 static void try_to_free_low(struct hstate *h, unsigned long count,
2249                                                 nodemask_t *nodes_allowed)
2250 {
2251         int i;
2252
2253         if (hstate_is_gigantic(h))
2254                 return;
2255
2256         for_each_node_mask(i, *nodes_allowed) {
2257                 struct page *page, *next;
2258                 struct list_head *freel = &h->hugepage_freelists[i];
2259                 list_for_each_entry_safe(page, next, freel, lru) {
2260                         if (count >= h->nr_huge_pages)
2261                                 return;
2262                         if (PageHighMem(page))
2263                                 continue;
2264                         list_del(&page->lru);
2265                         update_and_free_page(h, page);
2266                         h->free_huge_pages--;
2267                         h->free_huge_pages_node[page_to_nid(page)]--;
2268                 }
2269         }
2270 }
2271 #else
2272 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2273                                                 nodemask_t *nodes_allowed)
2274 {
2275 }
2276 #endif
2277
2278 /*
2279  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2280  * balanced by operating on them in a round-robin fashion.
2281  * Returns 1 if an adjustment was made.
2282  */
2283 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2284                                 int delta)
2285 {
2286         int nr_nodes, node;
2287
2288         VM_BUG_ON(delta != -1 && delta != 1);
2289
2290         if (delta < 0) {
2291                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2292                         if (h->surplus_huge_pages_node[node])
2293                                 goto found;
2294                 }
2295         } else {
2296                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2297                         if (h->surplus_huge_pages_node[node] <
2298                                         h->nr_huge_pages_node[node])
2299                                 goto found;
2300                 }
2301         }
2302         return 0;
2303
2304 found:
2305         h->surplus_huge_pages += delta;
2306         h->surplus_huge_pages_node[node] += delta;
2307         return 1;
2308 }
2309
2310 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2311 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2312                               nodemask_t *nodes_allowed)
2313 {
2314         unsigned long min_count, ret;
2315
2316         spin_lock(&hugetlb_lock);
2317
2318         /*
2319          * Check for a node specific request.
2320          * Changing node specific huge page count may require a corresponding
2321          * change to the global count.  In any case, the passed node mask
2322          * (nodes_allowed) will restrict alloc/free to the specified node.
2323          */
2324         if (nid != NUMA_NO_NODE) {
2325                 unsigned long old_count = count;
2326
2327                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2328                 /*
2329                  * User may have specified a large count value which caused the
2330                  * above calculation to overflow.  In this case, they wanted
2331                  * to allocate as many huge pages as possible.  Set count to
2332                  * largest possible value to align with their intention.
2333                  */
2334                 if (count < old_count)
2335                         count = ULONG_MAX;
2336         }
2337
2338         /*
2339          * Gigantic pages runtime allocation depend on the capability for large
2340          * page range allocation.
2341          * If the system does not provide this feature, return an error when
2342          * the user tries to allocate gigantic pages but let the user free the
2343          * boottime allocated gigantic pages.
2344          */
2345         if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2346                 if (count > persistent_huge_pages(h)) {
2347                         spin_unlock(&hugetlb_lock);
2348                         return -EINVAL;
2349                 }
2350                 /* Fall through to decrease pool */
2351         }
2352
2353         /*
2354          * Increase the pool size
2355          * First take pages out of surplus state.  Then make up the
2356          * remaining difference by allocating fresh huge pages.
2357          *
2358          * We might race with alloc_surplus_huge_page() here and be unable
2359          * to convert a surplus huge page to a normal huge page. That is
2360          * not critical, though, it just means the overall size of the
2361          * pool might be one hugepage larger than it needs to be, but
2362          * within all the constraints specified by the sysctls.
2363          */
2364         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2365                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2366                         break;
2367         }
2368
2369         while (count > persistent_huge_pages(h)) {
2370                 /*
2371                  * If this allocation races such that we no longer need the
2372                  * page, free_huge_page will handle it by freeing the page
2373                  * and reducing the surplus.
2374                  */
2375                 spin_unlock(&hugetlb_lock);
2376
2377                 /* yield cpu to avoid soft lockup */
2378                 cond_resched();
2379
2380                 ret = alloc_pool_huge_page(h, nodes_allowed);
2381                 spin_lock(&hugetlb_lock);
2382                 if (!ret)
2383                         goto out;
2384
2385                 /* Bail for signals. Probably ctrl-c from user */
2386                 if (signal_pending(current))
2387                         goto out;
2388         }
2389
2390         /*
2391          * Decrease the pool size
2392          * First return free pages to the buddy allocator (being careful
2393          * to keep enough around to satisfy reservations).  Then place
2394          * pages into surplus state as needed so the pool will shrink
2395          * to the desired size as pages become free.
2396          *
2397          * By placing pages into the surplus state independent of the
2398          * overcommit value, we are allowing the surplus pool size to
2399          * exceed overcommit. There are few sane options here. Since
2400          * alloc_surplus_huge_page() is checking the global counter,
2401          * though, we'll note that we're not allowed to exceed surplus
2402          * and won't grow the pool anywhere else. Not until one of the
2403          * sysctls are changed, or the surplus pages go out of use.
2404          */
2405         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2406         min_count = max(count, min_count);
2407         try_to_free_low(h, min_count, nodes_allowed);
2408         while (min_count < persistent_huge_pages(h)) {
2409                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2410                         break;
2411                 cond_resched_lock(&hugetlb_lock);
2412         }
2413         while (count < persistent_huge_pages(h)) {
2414                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2415                         break;
2416         }
2417 out:
2418         h->max_huge_pages = persistent_huge_pages(h);
2419         spin_unlock(&hugetlb_lock);
2420
2421         return 0;
2422 }
2423
2424 #define HSTATE_ATTR_RO(_name) \
2425         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2426
2427 #define HSTATE_ATTR(_name) \
2428         static struct kobj_attribute _name##_attr = \
2429                 __ATTR(_name, 0644, _name##_show, _name##_store)
2430
2431 static struct kobject *hugepages_kobj;
2432 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2433
2434 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2435
2436 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2437 {
2438         int i;
2439
2440         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2441                 if (hstate_kobjs[i] == kobj) {
2442                         if (nidp)
2443                                 *nidp = NUMA_NO_NODE;
2444                         return &hstates[i];
2445                 }
2446
2447         return kobj_to_node_hstate(kobj, nidp);
2448 }
2449
2450 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2451                                         struct kobj_attribute *attr, char *buf)
2452 {
2453         struct hstate *h;
2454         unsigned long nr_huge_pages;
2455         int nid;
2456
2457         h = kobj_to_hstate(kobj, &nid);
2458         if (nid == NUMA_NO_NODE)
2459                 nr_huge_pages = h->nr_huge_pages;
2460         else
2461                 nr_huge_pages = h->nr_huge_pages_node[nid];
2462
2463         return sprintf(buf, "%lu\n", nr_huge_pages);
2464 }
2465
2466 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2467                                            struct hstate *h, int nid,
2468                                            unsigned long count, size_t len)
2469 {
2470         int err;
2471         nodemask_t nodes_allowed, *n_mask;
2472
2473         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2474                 return -EINVAL;
2475
2476         if (nid == NUMA_NO_NODE) {
2477                 /*
2478                  * global hstate attribute
2479                  */
2480                 if (!(obey_mempolicy &&
2481                                 init_nodemask_of_mempolicy(&nodes_allowed)))
2482                         n_mask = &node_states[N_MEMORY];
2483                 else
2484                         n_mask = &nodes_allowed;
2485         } else {
2486                 /*
2487                  * Node specific request.  count adjustment happens in
2488                  * set_max_huge_pages() after acquiring hugetlb_lock.
2489                  */
2490                 init_nodemask_of_node(&nodes_allowed, nid);
2491                 n_mask = &nodes_allowed;
2492         }
2493
2494         err = set_max_huge_pages(h, count, nid, n_mask);
2495
2496         return err ? err : len;
2497 }
2498
2499 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2500                                          struct kobject *kobj, const char *buf,
2501                                          size_t len)
2502 {
2503         struct hstate *h;
2504         unsigned long count;
2505         int nid;
2506         int err;
2507
2508         err = kstrtoul(buf, 10, &count);
2509         if (err)
2510                 return err;
2511
2512         h = kobj_to_hstate(kobj, &nid);
2513         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2514 }
2515
2516 static ssize_t nr_hugepages_show(struct kobject *kobj,
2517                                        struct kobj_attribute *attr, char *buf)
2518 {
2519         return nr_hugepages_show_common(kobj, attr, buf);
2520 }
2521
2522 static ssize_t nr_hugepages_store(struct kobject *kobj,
2523                struct kobj_attribute *attr, const char *buf, size_t len)
2524 {
2525         return nr_hugepages_store_common(false, kobj, buf, len);
2526 }
2527 HSTATE_ATTR(nr_hugepages);
2528
2529 #ifdef CONFIG_NUMA
2530
2531 /*
2532  * hstate attribute for optionally mempolicy-based constraint on persistent
2533  * huge page alloc/free.
2534  */
2535 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2536                                        struct kobj_attribute *attr, char *buf)
2537 {
2538         return nr_hugepages_show_common(kobj, attr, buf);
2539 }
2540
2541 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2542                struct kobj_attribute *attr, const char *buf, size_t len)
2543 {
2544         return nr_hugepages_store_common(true, kobj, buf, len);
2545 }
2546 HSTATE_ATTR(nr_hugepages_mempolicy);
2547 #endif
2548
2549
2550 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2551                                         struct kobj_attribute *attr, char *buf)
2552 {
2553         struct hstate *h = kobj_to_hstate(kobj, NULL);
2554         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2555 }
2556
2557 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2558                 struct kobj_attribute *attr, const char *buf, size_t count)
2559 {
2560         int err;
2561         unsigned long input;
2562         struct hstate *h = kobj_to_hstate(kobj, NULL);
2563
2564         if (hstate_is_gigantic(h))
2565                 return -EINVAL;
2566
2567         err = kstrtoul(buf, 10, &input);
2568         if (err)
2569                 return err;
2570
2571         spin_lock(&hugetlb_lock);
2572         h->nr_overcommit_huge_pages = input;
2573         spin_unlock(&hugetlb_lock);
2574
2575         return count;
2576 }
2577 HSTATE_ATTR(nr_overcommit_hugepages);
2578
2579 static ssize_t free_hugepages_show(struct kobject *kobj,
2580                                         struct kobj_attribute *attr, char *buf)
2581 {
2582         struct hstate *h;
2583         unsigned long free_huge_pages;
2584         int nid;
2585
2586         h = kobj_to_hstate(kobj, &nid);
2587         if (nid == NUMA_NO_NODE)
2588                 free_huge_pages = h->free_huge_pages;
2589         else
2590                 free_huge_pages = h->free_huge_pages_node[nid];
2591
2592         return sprintf(buf, "%lu\n", free_huge_pages);
2593 }
2594 HSTATE_ATTR_RO(free_hugepages);
2595
2596 static ssize_t resv_hugepages_show(struct kobject *kobj,
2597                                         struct kobj_attribute *attr, char *buf)
2598 {
2599         struct hstate *h = kobj_to_hstate(kobj, NULL);
2600         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2601 }
2602 HSTATE_ATTR_RO(resv_hugepages);
2603
2604 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2605                                         struct kobj_attribute *attr, char *buf)
2606 {
2607         struct hstate *h;
2608         unsigned long surplus_huge_pages;
2609         int nid;
2610
2611         h = kobj_to_hstate(kobj, &nid);
2612         if (nid == NUMA_NO_NODE)
2613                 surplus_huge_pages = h->surplus_huge_pages;
2614         else
2615                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2616
2617         return sprintf(buf, "%lu\n", surplus_huge_pages);
2618 }
2619 HSTATE_ATTR_RO(surplus_hugepages);
2620
2621 static struct attribute *hstate_attrs[] = {
2622         &nr_hugepages_attr.attr,
2623         &nr_overcommit_hugepages_attr.attr,
2624         &free_hugepages_attr.attr,
2625         &resv_hugepages_attr.attr,
2626         &surplus_hugepages_attr.attr,
2627 #ifdef CONFIG_NUMA
2628         &nr_hugepages_mempolicy_attr.attr,
2629 #endif
2630         NULL,
2631 };
2632
2633 static const struct attribute_group hstate_attr_group = {
2634         .attrs = hstate_attrs,
2635 };
2636
2637 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2638                                     struct kobject **hstate_kobjs,
2639                                     const struct attribute_group *hstate_attr_group)
2640 {
2641         int retval;
2642         int hi = hstate_index(h);
2643
2644         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2645         if (!hstate_kobjs[hi])
2646                 return -ENOMEM;
2647
2648         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2649         if (retval)
2650                 kobject_put(hstate_kobjs[hi]);
2651
2652         return retval;
2653 }
2654
2655 static void __init hugetlb_sysfs_init(void)
2656 {
2657         struct hstate *h;
2658         int err;
2659
2660         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2661         if (!hugepages_kobj)
2662                 return;
2663
2664         for_each_hstate(h) {
2665                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2666                                          hstate_kobjs, &hstate_attr_group);
2667                 if (err)
2668                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2669         }
2670 }
2671
2672 #ifdef CONFIG_NUMA
2673
2674 /*
2675  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2676  * with node devices in node_devices[] using a parallel array.  The array
2677  * index of a node device or _hstate == node id.
2678  * This is here to avoid any static dependency of the node device driver, in
2679  * the base kernel, on the hugetlb module.
2680  */
2681 struct node_hstate {
2682         struct kobject          *hugepages_kobj;
2683         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2684 };
2685 static struct node_hstate node_hstates[MAX_NUMNODES];
2686
2687 /*
2688  * A subset of global hstate attributes for node devices
2689  */
2690 static struct attribute *per_node_hstate_attrs[] = {
2691         &nr_hugepages_attr.attr,
2692         &free_hugepages_attr.attr,
2693         &surplus_hugepages_attr.attr,
2694         NULL,
2695 };
2696
2697 static const struct attribute_group per_node_hstate_attr_group = {
2698         .attrs = per_node_hstate_attrs,
2699 };
2700
2701 /*
2702  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2703  * Returns node id via non-NULL nidp.
2704  */
2705 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2706 {
2707         int nid;
2708
2709         for (nid = 0; nid < nr_node_ids; nid++) {
2710                 struct node_hstate *nhs = &node_hstates[nid];
2711                 int i;
2712                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2713                         if (nhs->hstate_kobjs[i] == kobj) {
2714                                 if (nidp)
2715                                         *nidp = nid;
2716                                 return &hstates[i];
2717                         }
2718         }
2719
2720         BUG();
2721         return NULL;
2722 }
2723
2724 /*
2725  * Unregister hstate attributes from a single node device.
2726  * No-op if no hstate attributes attached.
2727  */
2728 static void hugetlb_unregister_node(struct node *node)
2729 {
2730         struct hstate *h;
2731         struct node_hstate *nhs = &node_hstates[node->dev.id];
2732
2733         if (!nhs->hugepages_kobj)
2734                 return;         /* no hstate attributes */
2735
2736         for_each_hstate(h) {
2737                 int idx = hstate_index(h);
2738                 if (nhs->hstate_kobjs[idx]) {
2739                         kobject_put(nhs->hstate_kobjs[idx]);
2740                         nhs->hstate_kobjs[idx] = NULL;
2741                 }
2742         }
2743
2744         kobject_put(nhs->hugepages_kobj);
2745         nhs->hugepages_kobj = NULL;
2746 }
2747
2748
2749 /*
2750  * Register hstate attributes for a single node device.
2751  * No-op if attributes already registered.
2752  */
2753 static void hugetlb_register_node(struct node *node)
2754 {
2755         struct hstate *h;
2756         struct node_hstate *nhs = &node_hstates[node->dev.id];
2757         int err;
2758
2759         if (nhs->hugepages_kobj)
2760                 return;         /* already allocated */
2761
2762         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2763                                                         &node->dev.kobj);
2764         if (!nhs->hugepages_kobj)
2765                 return;
2766
2767         for_each_hstate(h) {
2768                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2769                                                 nhs->hstate_kobjs,
2770                                                 &per_node_hstate_attr_group);
2771                 if (err) {
2772                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2773                                 h->name, node->dev.id);
2774                         hugetlb_unregister_node(node);
2775                         break;
2776                 }
2777         }
2778 }
2779
2780 /*
2781  * hugetlb init time:  register hstate attributes for all registered node
2782  * devices of nodes that have memory.  All on-line nodes should have
2783  * registered their associated device by this time.
2784  */
2785 static void __init hugetlb_register_all_nodes(void)
2786 {
2787         int nid;
2788
2789         for_each_node_state(nid, N_MEMORY) {
2790                 struct node *node = node_devices[nid];
2791                 if (node->dev.id == nid)
2792                         hugetlb_register_node(node);
2793         }
2794
2795         /*
2796          * Let the node device driver know we're here so it can
2797          * [un]register hstate attributes on node hotplug.
2798          */
2799         register_hugetlbfs_with_node(hugetlb_register_node,
2800                                      hugetlb_unregister_node);
2801 }
2802 #else   /* !CONFIG_NUMA */
2803
2804 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2805 {
2806         BUG();
2807         if (nidp)
2808                 *nidp = -1;
2809         return NULL;
2810 }
2811
2812 static void hugetlb_register_all_nodes(void) { }
2813
2814 #endif
2815
2816 static int __init hugetlb_init(void)
2817 {
2818         int i;
2819
2820         if (!hugepages_supported())
2821                 return 0;
2822
2823         if (!size_to_hstate(default_hstate_size)) {
2824                 if (default_hstate_size != 0) {
2825                         pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2826                                default_hstate_size, HPAGE_SIZE);
2827                 }
2828
2829                 default_hstate_size = HPAGE_SIZE;
2830                 if (!size_to_hstate(default_hstate_size))
2831                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2832         }
2833         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2834         if (default_hstate_max_huge_pages) {
2835                 if (!default_hstate.max_huge_pages)
2836                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2837         }
2838
2839         hugetlb_init_hstates();
2840         gather_bootmem_prealloc();
2841         report_hugepages();
2842
2843         hugetlb_sysfs_init();
2844         hugetlb_register_all_nodes();
2845         hugetlb_cgroup_file_init();
2846
2847 #ifdef CONFIG_SMP
2848         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2849 #else
2850         num_fault_mutexes = 1;
2851 #endif
2852         hugetlb_fault_mutex_table =
2853                 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2854                               GFP_KERNEL);
2855         BUG_ON(!hugetlb_fault_mutex_table);
2856
2857         for (i = 0; i < num_fault_mutexes; i++)
2858                 mutex_init(&hugetlb_fault_mutex_table[i]);
2859         return 0;
2860 }
2861 subsys_initcall(hugetlb_init);
2862
2863 /* Should be called on processing a hugepagesz=... option */
2864 void __init hugetlb_bad_size(void)
2865 {
2866         parsed_valid_hugepagesz = false;
2867 }
2868
2869 void __init hugetlb_add_hstate(unsigned int order)
2870 {
2871         struct hstate *h;
2872         unsigned long i;
2873
2874         if (size_to_hstate(PAGE_SIZE << order)) {
2875                 pr_warn("hugepagesz= specified twice, ignoring\n");
2876                 return;
2877         }
2878         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2879         BUG_ON(order == 0);
2880         h = &hstates[hugetlb_max_hstate++];
2881         h->order = order;
2882         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2883         h->nr_huge_pages = 0;
2884         h->free_huge_pages = 0;
2885         for (i = 0; i < MAX_NUMNODES; ++i)
2886                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2887         INIT_LIST_HEAD(&h->hugepage_activelist);
2888         h->next_nid_to_alloc = first_memory_node;
2889         h->next_nid_to_free = first_memory_node;
2890         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2891                                         huge_page_size(h)/1024);
2892
2893         parsed_hstate = h;
2894 }
2895
2896 static int __init hugetlb_nrpages_setup(char *s)
2897 {
2898         unsigned long *mhp;
2899         static unsigned long *last_mhp;
2900
2901         if (!parsed_valid_hugepagesz) {
2902                 pr_warn("hugepages = %s preceded by "
2903                         "an unsupported hugepagesz, ignoring\n", s);
2904                 parsed_valid_hugepagesz = true;
2905                 return 1;
2906         }
2907         /*
2908          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2909          * so this hugepages= parameter goes to the "default hstate".
2910          */
2911         else if (!hugetlb_max_hstate)
2912                 mhp = &default_hstate_max_huge_pages;
2913         else
2914                 mhp = &parsed_hstate->max_huge_pages;
2915
2916         if (mhp == last_mhp) {
2917                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2918                 return 1;
2919         }
2920
2921         if (sscanf(s, "%lu", mhp) <= 0)
2922                 *mhp = 0;
2923
2924         /*
2925          * Global state is always initialized later in hugetlb_init.
2926          * But we need to allocate >= MAX_ORDER hstates here early to still
2927          * use the bootmem allocator.
2928          */
2929         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2930                 hugetlb_hstate_alloc_pages(parsed_hstate);
2931
2932         last_mhp = mhp;
2933
2934         return 1;
2935 }
2936 __setup("hugepages=", hugetlb_nrpages_setup);
2937
2938 static int __init hugetlb_default_setup(char *s)
2939 {
2940         default_hstate_size = memparse(s, &s);
2941         return 1;
2942 }
2943 __setup("default_hugepagesz=", hugetlb_default_setup);
2944
2945 static unsigned int cpuset_mems_nr(unsigned int *array)
2946 {
2947         int node;
2948         unsigned int nr = 0;
2949
2950         for_each_node_mask(node, cpuset_current_mems_allowed)
2951                 nr += array[node];
2952
2953         return nr;
2954 }
2955
2956 #ifdef CONFIG_SYSCTL
2957 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2958                          struct ctl_table *table, int write,
2959                          void __user *buffer, size_t *length, loff_t *ppos)
2960 {
2961         struct hstate *h = &default_hstate;
2962         unsigned long tmp = h->max_huge_pages;
2963         int ret;
2964
2965         if (!hugepages_supported())
2966                 return -EOPNOTSUPP;
2967
2968         table->data = &tmp;
2969         table->maxlen = sizeof(unsigned long);
2970         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2971         if (ret)
2972                 goto out;
2973
2974         if (write)
2975                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2976                                                   NUMA_NO_NODE, tmp, *length);
2977 out:
2978         return ret;
2979 }
2980
2981 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2982                           void __user *buffer, size_t *length, loff_t *ppos)
2983 {
2984
2985         return hugetlb_sysctl_handler_common(false, table, write,
2986                                                         buffer, length, ppos);
2987 }
2988
2989 #ifdef CONFIG_NUMA
2990 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2991                           void __user *buffer, size_t *length, loff_t *ppos)
2992 {
2993         return hugetlb_sysctl_handler_common(true, table, write,
2994                                                         buffer, length, ppos);
2995 }
2996 #endif /* CONFIG_NUMA */
2997
2998 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2999                         void __user *buffer,
3000                         size_t *length, loff_t *ppos)
3001 {
3002         struct hstate *h = &default_hstate;
3003         unsigned long tmp;
3004         int ret;
3005
3006         if (!hugepages_supported())
3007                 return -EOPNOTSUPP;
3008
3009         tmp = h->nr_overcommit_huge_pages;
3010
3011         if (write && hstate_is_gigantic(h))
3012                 return -EINVAL;
3013
3014         table->data = &tmp;
3015         table->maxlen = sizeof(unsigned long);
3016         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3017         if (ret)
3018                 goto out;
3019
3020         if (write) {
3021                 spin_lock(&hugetlb_lock);
3022                 h->nr_overcommit_huge_pages = tmp;
3023                 spin_unlock(&hugetlb_lock);
3024         }
3025 out:
3026         return ret;
3027 }
3028
3029 #endif /* CONFIG_SYSCTL */
3030
3031 void hugetlb_report_meminfo(struct seq_file *m)
3032 {
3033         struct hstate *h;
3034         unsigned long total = 0;
3035
3036         if (!hugepages_supported())
3037                 return;
3038
3039         for_each_hstate(h) {
3040                 unsigned long count = h->nr_huge_pages;
3041
3042                 total += (PAGE_SIZE << huge_page_order(h)) * count;
3043
3044                 if (h == &default_hstate)
3045                         seq_printf(m,
3046                                    "HugePages_Total:   %5lu\n"
3047                                    "HugePages_Free:    %5lu\n"
3048                                    "HugePages_Rsvd:    %5lu\n"
3049                                    "HugePages_Surp:    %5lu\n"
3050                                    "Hugepagesize:   %8lu kB\n",
3051                                    count,
3052                                    h->free_huge_pages,
3053                                    h->resv_huge_pages,
3054                                    h->surplus_huge_pages,
3055                                    (PAGE_SIZE << huge_page_order(h)) / 1024);
3056         }
3057
3058         seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
3059 }
3060
3061 int hugetlb_report_node_meminfo(int nid, char *buf)
3062 {
3063         struct hstate *h = &default_hstate;
3064         if (!hugepages_supported())
3065                 return 0;
3066         return sprintf(buf,
3067                 "Node %d HugePages_Total: %5u\n"
3068                 "Node %d HugePages_Free:  %5u\n"
3069                 "Node %d HugePages_Surp:  %5u\n",
3070                 nid, h->nr_huge_pages_node[nid],
3071                 nid, h->free_huge_pages_node[nid],
3072                 nid, h->surplus_huge_pages_node[nid]);
3073 }
3074
3075 void hugetlb_show_meminfo(void)
3076 {
3077         struct hstate *h;
3078         int nid;
3079
3080         if (!hugepages_supported())
3081                 return;
3082
3083         for_each_node_state(nid, N_MEMORY)
3084                 for_each_hstate(h)
3085                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3086                                 nid,
3087                                 h->nr_huge_pages_node[nid],
3088                                 h->free_huge_pages_node[nid],
3089                                 h->surplus_huge_pages_node[nid],
3090                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3091 }
3092
3093 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3094 {
3095         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3096                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3097 }
3098
3099 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3100 unsigned long hugetlb_total_pages(void)
3101 {
3102         struct hstate *h;
3103         unsigned long nr_total_pages = 0;
3104
3105         for_each_hstate(h)
3106                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3107         return nr_total_pages;
3108 }
3109
3110 static int hugetlb_acct_memory(struct hstate *h, long delta)
3111 {
3112         int ret = -ENOMEM;
3113
3114         spin_lock(&hugetlb_lock);
3115         /*
3116          * When cpuset is configured, it breaks the strict hugetlb page
3117          * reservation as the accounting is done on a global variable. Such
3118          * reservation is completely rubbish in the presence of cpuset because
3119          * the reservation is not checked against page availability for the
3120          * current cpuset. Application can still potentially OOM'ed by kernel
3121          * with lack of free htlb page in cpuset that the task is in.
3122          * Attempt to enforce strict accounting with cpuset is almost
3123          * impossible (or too ugly) because cpuset is too fluid that
3124          * task or memory node can be dynamically moved between cpusets.
3125          *
3126          * The change of semantics for shared hugetlb mapping with cpuset is
3127          * undesirable. However, in order to preserve some of the semantics,
3128          * we fall back to check against current free page availability as
3129          * a best attempt and hopefully to minimize the impact of changing
3130          * semantics that cpuset has.
3131          */
3132         if (delta > 0) {
3133                 if (gather_surplus_pages(h, delta) < 0)
3134                         goto out;
3135
3136                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3137                         return_unused_surplus_pages(h, delta);
3138                         goto out;
3139                 }
3140         }
3141
3142         ret = 0;
3143         if (delta < 0)
3144                 return_unused_surplus_pages(h, (unsigned long) -delta);
3145
3146 out:
3147         spin_unlock(&hugetlb_lock);
3148         return ret;
3149 }
3150
3151 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3152 {
3153         struct resv_map *resv = vma_resv_map(vma);
3154
3155         /*
3156          * This new VMA should share its siblings reservation map if present.
3157          * The VMA will only ever have a valid reservation map pointer where
3158          * it is being copied for another still existing VMA.  As that VMA
3159          * has a reference to the reservation map it cannot disappear until
3160          * after this open call completes.  It is therefore safe to take a
3161          * new reference here without additional locking.
3162          */
3163         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3164                 kref_get(&resv->refs);
3165 }
3166
3167 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3168 {
3169         struct hstate *h = hstate_vma(vma);
3170         struct resv_map *resv = vma_resv_map(vma);
3171         struct hugepage_subpool *spool = subpool_vma(vma);
3172         unsigned long reserve, start, end;
3173         long gbl_reserve;
3174
3175         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3176                 return;
3177
3178         start = vma_hugecache_offset(h, vma, vma->vm_start);
3179         end = vma_hugecache_offset(h, vma, vma->vm_end);
3180
3181         reserve = (end - start) - region_count(resv, start, end);
3182
3183         kref_put(&resv->refs, resv_map_release);
3184
3185         if (reserve) {
3186                 /*
3187                  * Decrement reserve counts.  The global reserve count may be
3188                  * adjusted if the subpool has a minimum size.
3189                  */
3190                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3191                 hugetlb_acct_memory(h, -gbl_reserve);
3192         }
3193 }
3194
3195 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3196 {
3197         if (addr & ~(huge_page_mask(hstate_vma(vma))))
3198                 return -EINVAL;
3199         return 0;
3200 }
3201
3202 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3203 {
3204         struct hstate *hstate = hstate_vma(vma);
3205
3206         return 1UL << huge_page_shift(hstate);
3207 }
3208
3209 /*
3210  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3211  * handle_mm_fault() to try to instantiate regular-sized pages in the
3212  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3213  * this far.
3214  */
3215 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3216 {
3217         BUG();
3218         return 0;
3219 }
3220
3221 /*
3222  * When a new function is introduced to vm_operations_struct and added
3223  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3224  * This is because under System V memory model, mappings created via
3225  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3226  * their original vm_ops are overwritten with shm_vm_ops.
3227  */
3228 const struct vm_operations_struct hugetlb_vm_ops = {
3229         .fault = hugetlb_vm_op_fault,
3230         .open = hugetlb_vm_op_open,
3231         .close = hugetlb_vm_op_close,
3232         .split = hugetlb_vm_op_split,
3233         .pagesize = hugetlb_vm_op_pagesize,
3234 };
3235
3236 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3237                                 int writable)
3238 {
3239         pte_t entry;
3240
3241         if (writable) {
3242                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3243                                          vma->vm_page_prot)));
3244         } else {
3245                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3246                                            vma->vm_page_prot));
3247         }
3248         entry = pte_mkyoung(entry);
3249         entry = pte_mkhuge(entry);
3250         entry = arch_make_huge_pte(entry, vma, page, writable);
3251
3252         return entry;
3253 }
3254
3255 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3256                                    unsigned long address, pte_t *ptep)
3257 {
3258         pte_t entry;
3259
3260         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3261         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3262                 update_mmu_cache(vma, address, ptep);
3263 }
3264
3265 bool is_hugetlb_entry_migration(pte_t pte)
3266 {
3267         swp_entry_t swp;
3268
3269         if (huge_pte_none(pte) || pte_present(pte))
3270                 return false;
3271         swp = pte_to_swp_entry(pte);
3272         if (non_swap_entry(swp) && is_migration_entry(swp))
3273                 return true;
3274         else
3275                 return false;
3276 }
3277
3278 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3279 {
3280         swp_entry_t swp;
3281
3282         if (huge_pte_none(pte) || pte_present(pte))
3283                 return 0;
3284         swp = pte_to_swp_entry(pte);
3285         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3286                 return 1;
3287         else
3288                 return 0;
3289 }
3290
3291 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3292                             struct vm_area_struct *vma)
3293 {
3294         pte_t *src_pte, *dst_pte, entry, dst_entry;
3295         struct page *ptepage;
3296         unsigned long addr;
3297         int cow;
3298         struct hstate *h = hstate_vma(vma);
3299         unsigned long sz = huge_page_size(h);
3300         struct mmu_notifier_range range;
3301         int ret = 0;
3302
3303         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3304
3305         if (cow) {
3306                 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3307                                         vma->vm_start,
3308                                         vma->vm_end);
3309                 mmu_notifier_invalidate_range_start(&range);
3310         }
3311
3312         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3313                 spinlock_t *src_ptl, *dst_ptl;
3314                 src_pte = huge_pte_offset(src, addr, sz);
3315                 if (!src_pte)
3316                         continue;
3317                 dst_pte = huge_pte_alloc(dst, addr, sz);
3318                 if (!dst_pte) {
3319                         ret = -ENOMEM;
3320                         break;
3321                 }
3322
3323                 /*
3324                  * If the pagetables are shared don't copy or take references.
3325                  * dst_pte == src_pte is the common case of src/dest sharing.
3326                  *
3327                  * However, src could have 'unshared' and dst shares with
3328                  * another vma.  If dst_pte !none, this implies sharing.
3329                  * Check here before taking page table lock, and once again
3330                  * after taking the lock below.
3331                  */
3332                 dst_entry = huge_ptep_get(dst_pte);
3333                 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3334                         continue;
3335
3336                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3337                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3338                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3339                 entry = huge_ptep_get(src_pte);
3340                 dst_entry = huge_ptep_get(dst_pte);
3341                 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3342                         /*
3343                          * Skip if src entry none.  Also, skip in the
3344                          * unlikely case dst entry !none as this implies
3345                          * sharing with another vma.
3346                          */
3347                         ;
3348                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3349                                     is_hugetlb_entry_hwpoisoned(entry))) {
3350                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3351
3352                         if (is_write_migration_entry(swp_entry) && cow) {
3353                                 /*
3354                                  * COW mappings require pages in both
3355                                  * parent and child to be set to read.
3356                                  */
3357                                 make_migration_entry_read(&swp_entry);
3358                                 entry = swp_entry_to_pte(swp_entry);
3359                                 set_huge_swap_pte_at(src, addr, src_pte,
3360                                                      entry, sz);
3361                         }
3362                         set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3363                 } else {
3364                         if (cow) {
3365                                 /*
3366                                  * No need to notify as we are downgrading page
3367                                  * table protection not changing it to point
3368                                  * to a new page.
3369                                  *
3370                                  * See Documentation/vm/mmu_notifier.rst
3371                                  */
3372                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3373                         }
3374                         entry = huge_ptep_get(src_pte);
3375                         ptepage = pte_page(entry);
3376                         get_page(ptepage);
3377                         page_dup_rmap(ptepage, true);
3378                         set_huge_pte_at(dst, addr, dst_pte, entry);
3379                         hugetlb_count_add(pages_per_huge_page(h), dst);
3380                 }
3381                 spin_unlock(src_ptl);
3382                 spin_unlock(dst_ptl);
3383         }
3384
3385         if (cow)
3386                 mmu_notifier_invalidate_range_end(&range);
3387
3388         return ret;
3389 }
3390
3391 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3392                             unsigned long start, unsigned long end,
3393                             struct page *ref_page)
3394 {
3395         struct mm_struct *mm = vma->vm_mm;
3396         unsigned long address;
3397         pte_t *ptep;
3398         pte_t pte;
3399         spinlock_t *ptl;
3400         struct page *page;
3401         struct hstate *h = hstate_vma(vma);
3402         unsigned long sz = huge_page_size(h);
3403         struct mmu_notifier_range range;
3404
3405         WARN_ON(!is_vm_hugetlb_page(vma));
3406         BUG_ON(start & ~huge_page_mask(h));
3407         BUG_ON(end & ~huge_page_mask(h));
3408
3409         /*
3410          * This is a hugetlb vma, all the pte entries should point
3411          * to huge page.
3412          */
3413         tlb_change_page_size(tlb, sz);
3414         tlb_start_vma(tlb, vma);
3415
3416         /*
3417          * If sharing possible, alert mmu notifiers of worst case.
3418          */
3419         mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3420                                 end);
3421         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3422         mmu_notifier_invalidate_range_start(&range);
3423         address = start;
3424         for (; address < end; address += sz) {
3425                 ptep = huge_pte_offset(mm, address, sz);
3426                 if (!ptep)
3427                         continue;
3428
3429                 ptl = huge_pte_lock(h, mm, ptep);
3430                 if (huge_pmd_unshare(mm, &address, ptep)) {
3431                         spin_unlock(ptl);
3432                         /*
3433                          * We just unmapped a page of PMDs by clearing a PUD.
3434                          * The caller's TLB flush range should cover this area.
3435                          */
3436                         continue;
3437                 }
3438
3439                 pte = huge_ptep_get(ptep);
3440                 if (huge_pte_none(pte)) {
3441                         spin_unlock(ptl);
3442                         continue;
3443                 }
3444
3445                 /*
3446                  * Migrating hugepage or HWPoisoned hugepage is already
3447                  * unmapped and its refcount is dropped, so just clear pte here.
3448                  */
3449                 if (unlikely(!pte_present(pte))) {
3450                         huge_pte_clear(mm, address, ptep, sz);
3451                         spin_unlock(ptl);
3452                         continue;
3453                 }
3454
3455                 page = pte_page(pte);
3456                 /*
3457                  * If a reference page is supplied, it is because a specific
3458                  * page is being unmapped, not a range. Ensure the page we
3459                  * are about to unmap is the actual page of interest.
3460                  */
3461                 if (ref_page) {
3462                         if (page != ref_page) {
3463                                 spin_unlock(ptl);
3464                                 continue;
3465                         }
3466                         /*
3467                          * Mark the VMA as having unmapped its page so that
3468                          * future faults in this VMA will fail rather than
3469                          * looking like data was lost
3470                          */
3471                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3472                 }
3473
3474                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3475                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3476                 if (huge_pte_dirty(pte))
3477                         set_page_dirty(page);
3478
3479                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3480                 page_remove_rmap(page, true);
3481
3482                 spin_unlock(ptl);
3483                 tlb_remove_page_size(tlb, page, huge_page_size(h));
3484                 /*
3485                  * Bail out after unmapping reference page if supplied
3486                  */
3487                 if (ref_page)
3488                         break;
3489         }
3490         mmu_notifier_invalidate_range_end(&range);
3491         tlb_end_vma(tlb, vma);
3492 }
3493
3494 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3495                           struct vm_area_struct *vma, unsigned long start,
3496                           unsigned long end, struct page *ref_page)
3497 {
3498         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3499
3500         /*
3501          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3502          * test will fail on a vma being torn down, and not grab a page table
3503          * on its way out.  We're lucky that the flag has such an appropriate
3504          * name, and can in fact be safely cleared here. We could clear it
3505          * before the __unmap_hugepage_range above, but all that's necessary
3506          * is to clear it before releasing the i_mmap_rwsem. This works
3507          * because in the context this is called, the VMA is about to be
3508          * destroyed and the i_mmap_rwsem is held.
3509          */
3510         vma->vm_flags &= ~VM_MAYSHARE;
3511 }
3512
3513 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3514                           unsigned long end, struct page *ref_page)
3515 {
3516         struct mm_struct *mm;
3517         struct mmu_gather tlb;
3518         unsigned long tlb_start = start;
3519         unsigned long tlb_end = end;
3520
3521         /*
3522          * If shared PMDs were possibly used within this vma range, adjust
3523          * start/end for worst case tlb flushing.
3524          * Note that we can not be sure if PMDs are shared until we try to
3525          * unmap pages.  However, we want to make sure TLB flushing covers
3526          * the largest possible range.
3527          */
3528         adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3529
3530         mm = vma->vm_mm;
3531
3532         tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3533         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3534         tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3535 }
3536
3537 /*
3538  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3539  * mappping it owns the reserve page for. The intention is to unmap the page
3540  * from other VMAs and let the children be SIGKILLed if they are faulting the
3541  * same region.
3542  */
3543 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3544                               struct page *page, unsigned long address)
3545 {
3546         struct hstate *h = hstate_vma(vma);
3547         struct vm_area_struct *iter_vma;
3548         struct address_space *mapping;
3549         pgoff_t pgoff;
3550
3551         /*
3552          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3553          * from page cache lookup which is in HPAGE_SIZE units.
3554          */
3555         address = address & huge_page_mask(h);
3556         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3557                         vma->vm_pgoff;
3558         mapping = vma->vm_file->f_mapping;
3559
3560         /*
3561          * Take the mapping lock for the duration of the table walk. As
3562          * this mapping should be shared between all the VMAs,
3563          * __unmap_hugepage_range() is called as the lock is already held
3564          */
3565         i_mmap_lock_write(mapping);
3566         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3567                 /* Do not unmap the current VMA */
3568                 if (iter_vma == vma)
3569                         continue;
3570
3571                 /*
3572                  * Shared VMAs have their own reserves and do not affect
3573                  * MAP_PRIVATE accounting but it is possible that a shared
3574                  * VMA is using the same page so check and skip such VMAs.
3575                  */
3576                 if (iter_vma->vm_flags & VM_MAYSHARE)
3577                         continue;
3578
3579                 /*
3580                  * Unmap the page from other VMAs without their own reserves.
3581                  * They get marked to be SIGKILLed if they fault in these
3582                  * areas. This is because a future no-page fault on this VMA
3583                  * could insert a zeroed page instead of the data existing
3584                  * from the time of fork. This would look like data corruption
3585                  */
3586                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3587                         unmap_hugepage_range(iter_vma, address,
3588                                              address + huge_page_size(h), page);
3589         }
3590         i_mmap_unlock_write(mapping);
3591 }
3592
3593 /*
3594  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3595  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3596  * cannot race with other handlers or page migration.
3597  * Keep the pte_same checks anyway to make transition from the mutex easier.
3598  */
3599 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3600                        unsigned long address, pte_t *ptep,
3601                        struct page *pagecache_page, spinlock_t *ptl)
3602 {
3603         pte_t pte;
3604         struct hstate *h = hstate_vma(vma);
3605         struct page *old_page, *new_page;
3606         int outside_reserve = 0;
3607         vm_fault_t ret = 0;
3608         unsigned long haddr = address & huge_page_mask(h);
3609         struct mmu_notifier_range range;
3610
3611         pte = huge_ptep_get(ptep);
3612         old_page = pte_page(pte);
3613
3614 retry_avoidcopy:
3615         /* If no-one else is actually using this page, avoid the copy
3616          * and just make the page writable */
3617         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3618                 page_move_anon_rmap(old_page, vma);
3619                 set_huge_ptep_writable(vma, haddr, ptep);
3620                 return 0;
3621         }
3622
3623         /*
3624          * If the process that created a MAP_PRIVATE mapping is about to
3625          * perform a COW due to a shared page count, attempt to satisfy
3626          * the allocation without using the existing reserves. The pagecache
3627          * page is used to determine if the reserve at this address was
3628          * consumed or not. If reserves were used, a partial faulted mapping
3629          * at the time of fork() could consume its reserves on COW instead
3630          * of the full address range.
3631          */
3632         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3633                         old_page != pagecache_page)
3634                 outside_reserve = 1;
3635
3636         get_page(old_page);
3637
3638         /*
3639          * Drop page table lock as buddy allocator may be called. It will
3640          * be acquired again before returning to the caller, as expected.
3641          */
3642         spin_unlock(ptl);
3643         new_page = alloc_huge_page(vma, haddr, outside_reserve);
3644
3645         if (IS_ERR(new_page)) {
3646                 /*
3647                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3648                  * it is due to references held by a child and an insufficient
3649                  * huge page pool. To guarantee the original mappers
3650                  * reliability, unmap the page from child processes. The child
3651                  * may get SIGKILLed if it later faults.
3652                  */
3653                 if (outside_reserve) {
3654                         put_page(old_page);
3655                         BUG_ON(huge_pte_none(pte));
3656                         unmap_ref_private(mm, vma, old_page, haddr);
3657                         BUG_ON(huge_pte_none(pte));
3658                         spin_lock(ptl);
3659                         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3660                         if (likely(ptep &&
3661                                    pte_same(huge_ptep_get(ptep), pte)))
3662                                 goto retry_avoidcopy;
3663                         /*
3664                          * race occurs while re-acquiring page table
3665                          * lock, and our job is done.
3666                          */
3667                         return 0;
3668                 }
3669
3670                 ret = vmf_error(PTR_ERR(new_page));
3671                 goto out_release_old;
3672         }
3673
3674         /*
3675          * When the original hugepage is shared one, it does not have
3676          * anon_vma prepared.
3677          */
3678         if (unlikely(anon_vma_prepare(vma))) {
3679                 ret = VM_FAULT_OOM;
3680                 goto out_release_all;
3681         }
3682
3683         copy_user_huge_page(new_page, old_page, address, vma,
3684                             pages_per_huge_page(h));
3685         __SetPageUptodate(new_page);
3686
3687         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
3688                                 haddr + huge_page_size(h));
3689         mmu_notifier_invalidate_range_start(&range);
3690
3691         /*
3692          * Retake the page table lock to check for racing updates
3693          * before the page tables are altered
3694          */
3695         spin_lock(ptl);
3696         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3697         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3698                 ClearPagePrivate(new_page);
3699
3700                 /* Break COW */
3701                 huge_ptep_clear_flush(vma, haddr, ptep);
3702                 mmu_notifier_invalidate_range(mm, range.start, range.end);
3703                 set_huge_pte_at(mm, haddr, ptep,
3704                                 make_huge_pte(vma, new_page, 1));
3705                 page_remove_rmap(old_page, true);
3706                 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3707                 set_page_huge_active(new_page);
3708                 /* Make the old page be freed below */
3709                 new_page = old_page;
3710         }
3711         spin_unlock(ptl);
3712         mmu_notifier_invalidate_range_end(&range);
3713 out_release_all:
3714         restore_reserve_on_error(h, vma, haddr, new_page);
3715         put_page(new_page);
3716 out_release_old:
3717         put_page(old_page);
3718
3719         spin_lock(ptl); /* Caller expects lock to be held */
3720         return ret;
3721 }
3722
3723 /* Return the pagecache page at a given address within a VMA */
3724 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3725                         struct vm_area_struct *vma, unsigned long address)
3726 {
3727         struct address_space *mapping;
3728         pgoff_t idx;
3729
3730         mapping = vma->vm_file->f_mapping;
3731         idx = vma_hugecache_offset(h, vma, address);
3732
3733         return find_lock_page(mapping, idx);
3734 }
3735
3736 /*
3737  * Return whether there is a pagecache page to back given address within VMA.
3738  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3739  */
3740 static bool hugetlbfs_pagecache_present(struct hstate *h,
3741                         struct vm_area_struct *vma, unsigned long address)
3742 {
3743         struct address_space *mapping;
3744         pgoff_t idx;
3745         struct page *page;
3746
3747         mapping = vma->vm_file->f_mapping;
3748         idx = vma_hugecache_offset(h, vma, address);
3749
3750         page = find_get_page(mapping, idx);
3751         if (page)
3752                 put_page(page);
3753         return page != NULL;
3754 }
3755
3756 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3757                            pgoff_t idx)
3758 {
3759         struct inode *inode = mapping->host;
3760         struct hstate *h = hstate_inode(inode);
3761         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3762
3763         if (err)
3764                 return err;
3765         ClearPagePrivate(page);
3766
3767         /*
3768          * set page dirty so that it will not be removed from cache/file
3769          * by non-hugetlbfs specific code paths.
3770          */
3771         set_page_dirty(page);
3772
3773         spin_lock(&inode->i_lock);
3774         inode->i_blocks += blocks_per_huge_page(h);
3775         spin_unlock(&inode->i_lock);
3776         return 0;
3777 }
3778
3779 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3780                         struct vm_area_struct *vma,
3781                         struct address_space *mapping, pgoff_t idx,
3782                         unsigned long address, pte_t *ptep, unsigned int flags)
3783 {
3784         struct hstate *h = hstate_vma(vma);
3785         vm_fault_t ret = VM_FAULT_SIGBUS;
3786         int anon_rmap = 0;
3787         unsigned long size;
3788         struct page *page;
3789         pte_t new_pte;
3790         spinlock_t *ptl;
3791         unsigned long haddr = address & huge_page_mask(h);
3792         bool new_page = false;
3793
3794         /*
3795          * Currently, we are forced to kill the process in the event the
3796          * original mapper has unmapped pages from the child due to a failed
3797          * COW. Warn that such a situation has occurred as it may not be obvious
3798          */
3799         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3800                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3801                            current->pid);
3802                 return ret;
3803         }
3804
3805         /*
3806          * Use page lock to guard against racing truncation
3807          * before we get page_table_lock.
3808          */
3809 retry:
3810         page = find_lock_page(mapping, idx);
3811         if (!page) {
3812                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3813                 if (idx >= size)
3814                         goto out;
3815
3816                 /*
3817                  * Check for page in userfault range
3818                  */
3819                 if (userfaultfd_missing(vma)) {
3820                         u32 hash;
3821                         struct vm_fault vmf = {
3822                                 .vma = vma,
3823                                 .address = haddr,
3824                                 .flags = flags,
3825                                 /*
3826                                  * Hard to debug if it ends up being
3827                                  * used by a callee that assumes
3828                                  * something about the other
3829                                  * uninitialized fields... same as in
3830                                  * memory.c
3831                                  */
3832                         };
3833
3834                         /*
3835                          * hugetlb_fault_mutex must be dropped before
3836                          * handling userfault.  Reacquire after handling
3837                          * fault to make calling code simpler.
3838                          */
3839                         hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3840                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3841                         ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3842                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3843                         goto out;
3844                 }
3845
3846                 page = alloc_huge_page(vma, haddr, 0);
3847                 if (IS_ERR(page)) {
3848                         ret = vmf_error(PTR_ERR(page));
3849                         goto out;
3850                 }
3851                 clear_huge_page(page, address, pages_per_huge_page(h));
3852                 __SetPageUptodate(page);
3853                 new_page = true;
3854
3855                 if (vma->vm_flags & VM_MAYSHARE) {
3856                         int err = huge_add_to_page_cache(page, mapping, idx);
3857                         if (err) {
3858                                 put_page(page);
3859                                 if (err == -EEXIST)
3860                                         goto retry;
3861                                 goto out;
3862                         }
3863                 } else {
3864                         lock_page(page);
3865                         if (unlikely(anon_vma_prepare(vma))) {
3866                                 ret = VM_FAULT_OOM;
3867                                 goto backout_unlocked;
3868                         }
3869                         anon_rmap = 1;
3870                 }
3871         } else {
3872                 /*
3873                  * If memory error occurs between mmap() and fault, some process
3874                  * don't have hwpoisoned swap entry for errored virtual address.
3875                  * So we need to block hugepage fault by PG_hwpoison bit check.
3876                  */
3877                 if (unlikely(PageHWPoison(page))) {
3878                         ret = VM_FAULT_HWPOISON |
3879                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3880                         goto backout_unlocked;
3881                 }
3882         }
3883
3884         /*
3885          * If we are going to COW a private mapping later, we examine the
3886          * pending reservations for this page now. This will ensure that
3887          * any allocations necessary to record that reservation occur outside
3888          * the spinlock.
3889          */
3890         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3891                 if (vma_needs_reservation(h, vma, haddr) < 0) {
3892                         ret = VM_FAULT_OOM;
3893                         goto backout_unlocked;
3894                 }
3895                 /* Just decrements count, does not deallocate */
3896                 vma_end_reservation(h, vma, haddr);
3897         }
3898
3899         ptl = huge_pte_lock(h, mm, ptep);
3900         size = i_size_read(mapping->host) >> huge_page_shift(h);
3901         if (idx >= size)
3902                 goto backout;
3903
3904         ret = 0;
3905         if (!huge_pte_none(huge_ptep_get(ptep)))
3906                 goto backout;
3907
3908         if (anon_rmap) {
3909                 ClearPagePrivate(page);
3910                 hugepage_add_new_anon_rmap(page, vma, haddr);
3911         } else
3912                 page_dup_rmap(page, true);
3913         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3914                                 && (vma->vm_flags & VM_SHARED)));
3915         set_huge_pte_at(mm, haddr, ptep, new_pte);
3916
3917         hugetlb_count_add(pages_per_huge_page(h), mm);
3918         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3919                 /* Optimization, do the COW without a second fault */
3920                 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3921         }
3922
3923         spin_unlock(ptl);
3924
3925         /*
3926          * Only make newly allocated pages active.  Existing pages found
3927          * in the pagecache could be !page_huge_active() if they have been
3928          * isolated for migration.
3929          */
3930         if (new_page)
3931                 set_page_huge_active(page);
3932
3933         unlock_page(page);
3934 out:
3935         return ret;
3936
3937 backout:
3938         spin_unlock(ptl);
3939 backout_unlocked:
3940         unlock_page(page);
3941         restore_reserve_on_error(h, vma, haddr, page);
3942         put_page(page);
3943         goto out;
3944 }
3945
3946 #ifdef CONFIG_SMP
3947 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3948                             pgoff_t idx, unsigned long address)
3949 {
3950         unsigned long key[2];
3951         u32 hash;
3952
3953         key[0] = (unsigned long) mapping;
3954         key[1] = idx;
3955
3956         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3957
3958         return hash & (num_fault_mutexes - 1);
3959 }
3960 #else
3961 /*
3962  * For uniprocesor systems we always use a single mutex, so just
3963  * return 0 and avoid the hashing overhead.
3964  */
3965 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3966                             pgoff_t idx, unsigned long address)
3967 {
3968         return 0;
3969 }
3970 #endif
3971
3972 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3973                         unsigned long address, unsigned int flags)
3974 {
3975         pte_t *ptep, entry;
3976         spinlock_t *ptl;
3977         vm_fault_t ret;
3978         u32 hash;
3979         pgoff_t idx;
3980         struct page *page = NULL;
3981         struct page *pagecache_page = NULL;
3982         struct hstate *h = hstate_vma(vma);
3983         struct address_space *mapping;
3984         int need_wait_lock = 0;
3985         unsigned long haddr = address & huge_page_mask(h);
3986
3987         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3988         if (ptep) {
3989                 entry = huge_ptep_get(ptep);
3990                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3991                         migration_entry_wait_huge(vma, mm, ptep);
3992                         return 0;
3993                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3994                         return VM_FAULT_HWPOISON_LARGE |
3995                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3996         } else {
3997                 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3998                 if (!ptep)
3999                         return VM_FAULT_OOM;
4000         }
4001
4002         mapping = vma->vm_file->f_mapping;
4003         idx = vma_hugecache_offset(h, vma, haddr);
4004
4005         /*
4006          * Serialize hugepage allocation and instantiation, so that we don't
4007          * get spurious allocation failures if two CPUs race to instantiate
4008          * the same page in the page cache.
4009          */
4010         hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
4011         mutex_lock(&hugetlb_fault_mutex_table[hash]);
4012
4013         entry = huge_ptep_get(ptep);
4014         if (huge_pte_none(entry)) {
4015                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4016                 goto out_mutex;
4017         }
4018
4019         ret = 0;
4020
4021         /*
4022          * entry could be a migration/hwpoison entry at this point, so this
4023          * check prevents the kernel from going below assuming that we have
4024          * a active hugepage in pagecache. This goto expects the 2nd page fault,
4025          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
4026          * handle it.
4027          */
4028         if (!pte_present(entry))
4029                 goto out_mutex;
4030
4031         /*
4032          * If we are going to COW the mapping later, we examine the pending
4033          * reservations for this page now. This will ensure that any
4034          * allocations necessary to record that reservation occur outside the
4035          * spinlock. For private mappings, we also lookup the pagecache
4036          * page now as it is used to determine if a reservation has been
4037          * consumed.
4038          */
4039         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4040                 if (vma_needs_reservation(h, vma, haddr) < 0) {
4041                         ret = VM_FAULT_OOM;
4042                         goto out_mutex;
4043                 }
4044                 /* Just decrements count, does not deallocate */
4045                 vma_end_reservation(h, vma, haddr);
4046
4047                 if (!(vma->vm_flags & VM_MAYSHARE))
4048                         pagecache_page = hugetlbfs_pagecache_page(h,
4049                                                                 vma, haddr);
4050         }
4051
4052         ptl = huge_pte_lock(h, mm, ptep);
4053
4054         /* Check for a racing update before calling hugetlb_cow */
4055         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4056                 goto out_ptl;
4057
4058         /*
4059          * hugetlb_cow() requires page locks of pte_page(entry) and
4060          * pagecache_page, so here we need take the former one
4061          * when page != pagecache_page or !pagecache_page.
4062          */
4063         page = pte_page(entry);
4064         if (page != pagecache_page)
4065                 if (!trylock_page(page)) {
4066                         need_wait_lock = 1;
4067                         goto out_ptl;
4068                 }
4069
4070         get_page(page);
4071
4072         if (flags & FAULT_FLAG_WRITE) {
4073                 if (!huge_pte_write(entry)) {
4074                         ret = hugetlb_cow(mm, vma, address, ptep,
4075                                           pagecache_page, ptl);
4076                         goto out_put_page;
4077                 }
4078                 entry = huge_pte_mkdirty(entry);
4079         }
4080         entry = pte_mkyoung(entry);
4081         if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4082                                                 flags & FAULT_FLAG_WRITE))
4083                 update_mmu_cache(vma, haddr, ptep);
4084 out_put_page:
4085         if (page != pagecache_page)
4086                 unlock_page(page);
4087         put_page(page);
4088 out_ptl:
4089         spin_unlock(ptl);
4090
4091         if (pagecache_page) {
4092                 unlock_page(pagecache_page);
4093                 put_page(pagecache_page);
4094         }
4095 out_mutex:
4096         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4097         /*
4098          * Generally it's safe to hold refcount during waiting page lock. But
4099          * here we just wait to defer the next page fault to avoid busy loop and
4100          * the page is not used after unlocked before returning from the current
4101          * page fault. So we are safe from accessing freed page, even if we wait
4102          * here without taking refcount.
4103          */
4104         if (need_wait_lock)
4105                 wait_on_page_locked(page);
4106         return ret;
4107 }
4108
4109 /*
4110  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4111  * modifications for huge pages.
4112  */
4113 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4114                             pte_t *dst_pte,
4115                             struct vm_area_struct *dst_vma,
4116                             unsigned long dst_addr,
4117                             unsigned long src_addr,
4118                             struct page **pagep)
4119 {
4120         struct address_space *mapping;
4121         pgoff_t idx;
4122         unsigned long size;
4123         int vm_shared = dst_vma->vm_flags & VM_SHARED;
4124         struct hstate *h = hstate_vma(dst_vma);
4125         pte_t _dst_pte;
4126         spinlock_t *ptl;
4127         int ret;
4128         struct page *page;
4129
4130         if (!*pagep) {
4131                 ret = -ENOMEM;
4132                 page = alloc_huge_page(dst_vma, dst_addr, 0);
4133                 if (IS_ERR(page))
4134                         goto out;
4135
4136                 ret = copy_huge_page_from_user(page,
4137                                                 (const void __user *) src_addr,
4138                                                 pages_per_huge_page(h), false);
4139
4140                 /* fallback to copy_from_user outside mmap_sem */
4141                 if (unlikely(ret)) {
4142                         ret = -ENOENT;
4143                         *pagep = page;
4144                         /* don't free the page */
4145                         goto out;
4146                 }
4147         } else {
4148                 page = *pagep;
4149                 *pagep = NULL;
4150         }
4151
4152         /*
4153          * The memory barrier inside __SetPageUptodate makes sure that
4154          * preceding stores to the page contents become visible before
4155          * the set_pte_at() write.
4156          */
4157         __SetPageUptodate(page);
4158
4159         mapping = dst_vma->vm_file->f_mapping;
4160         idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4161
4162         /*
4163          * If shared, add to page cache
4164          */
4165         if (vm_shared) {
4166                 size = i_size_read(mapping->host) >> huge_page_shift(h);
4167                 ret = -EFAULT;
4168                 if (idx >= size)
4169                         goto out_release_nounlock;
4170
4171                 /*
4172                  * Serialization between remove_inode_hugepages() and
4173                  * huge_add_to_page_cache() below happens through the
4174                  * hugetlb_fault_mutex_table that here must be hold by
4175                  * the caller.
4176                  */
4177                 ret = huge_add_to_page_cache(page, mapping, idx);
4178                 if (ret)
4179                         goto out_release_nounlock;
4180         }
4181
4182         ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4183         spin_lock(ptl);
4184
4185         /*
4186          * Recheck the i_size after holding PT lock to make sure not
4187          * to leave any page mapped (as page_mapped()) beyond the end
4188          * of the i_size (remove_inode_hugepages() is strict about
4189          * enforcing that). If we bail out here, we'll also leave a
4190          * page in the radix tree in the vm_shared case beyond the end
4191          * of the i_size, but remove_inode_hugepages() will take care
4192          * of it as soon as we drop the hugetlb_fault_mutex_table.
4193          */
4194         size = i_size_read(mapping->host) >> huge_page_shift(h);
4195         ret = -EFAULT;
4196         if (idx >= size)
4197                 goto out_release_unlock;
4198
4199         ret = -EEXIST;
4200         if (!huge_pte_none(huge_ptep_get(dst_pte)))
4201                 goto out_release_unlock;
4202
4203         if (vm_shared) {
4204                 page_dup_rmap(page, true);
4205         } else {
4206                 ClearPagePrivate(page);
4207                 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4208         }
4209
4210         _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4211         if (dst_vma->vm_flags & VM_WRITE)
4212                 _dst_pte = huge_pte_mkdirty(_dst_pte);
4213         _dst_pte = pte_mkyoung(_dst_pte);
4214
4215         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4216
4217         (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4218                                         dst_vma->vm_flags & VM_WRITE);
4219         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4220
4221         /* No need to invalidate - it was non-present before */
4222         update_mmu_cache(dst_vma, dst_addr, dst_pte);
4223
4224         spin_unlock(ptl);
4225         set_page_huge_active(page);
4226         if (vm_shared)
4227                 unlock_page(page);
4228         ret = 0;
4229 out:
4230         return ret;
4231 out_release_unlock:
4232         spin_unlock(ptl);
4233         if (vm_shared)
4234                 unlock_page(page);
4235 out_release_nounlock:
4236         put_page(page);
4237         goto out;
4238 }
4239
4240 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4241                          struct page **pages, struct vm_area_struct **vmas,
4242                          unsigned long *position, unsigned long *nr_pages,
4243                          long i, unsigned int flags, int *nonblocking)
4244 {
4245         unsigned long pfn_offset;
4246         unsigned long vaddr = *position;
4247         unsigned long remainder = *nr_pages;
4248         struct hstate *h = hstate_vma(vma);
4249         int err = -EFAULT;
4250
4251         while (vaddr < vma->vm_end && remainder) {
4252                 pte_t *pte;
4253                 spinlock_t *ptl = NULL;
4254                 int absent;
4255                 struct page *page;
4256
4257                 /*
4258                  * If we have a pending SIGKILL, don't keep faulting pages and
4259                  * potentially allocating memory.
4260                  */
4261                 if (fatal_signal_pending(current)) {
4262                         remainder = 0;
4263                         break;
4264                 }
4265
4266                 /*
4267                  * Some archs (sparc64, sh*) have multiple pte_ts to
4268                  * each hugepage.  We have to make sure we get the
4269                  * first, for the page indexing below to work.
4270                  *
4271                  * Note that page table lock is not held when pte is null.
4272                  */
4273                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4274                                       huge_page_size(h));
4275                 if (pte)
4276                         ptl = huge_pte_lock(h, mm, pte);
4277                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4278
4279                 /*
4280                  * When coredumping, it suits get_dump_page if we just return
4281                  * an error where there's an empty slot with no huge pagecache
4282                  * to back it.  This way, we avoid allocating a hugepage, and
4283                  * the sparse dumpfile avoids allocating disk blocks, but its
4284                  * huge holes still show up with zeroes where they need to be.
4285                  */
4286                 if (absent && (flags & FOLL_DUMP) &&
4287                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4288                         if (pte)
4289                                 spin_unlock(ptl);
4290                         remainder = 0;
4291                         break;
4292                 }
4293
4294                 /*
4295                  * We need call hugetlb_fault for both hugepages under migration
4296                  * (in which case hugetlb_fault waits for the migration,) and
4297                  * hwpoisoned hugepages (in which case we need to prevent the
4298                  * caller from accessing to them.) In order to do this, we use
4299                  * here is_swap_pte instead of is_hugetlb_entry_migration and
4300                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4301                  * both cases, and because we can't follow correct pages
4302                  * directly from any kind of swap entries.
4303                  */
4304                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4305                     ((flags & FOLL_WRITE) &&
4306                       !huge_pte_write(huge_ptep_get(pte)))) {
4307                         vm_fault_t ret;
4308                         unsigned int fault_flags = 0;
4309
4310                         if (pte)
4311                                 spin_unlock(ptl);
4312                         if (flags & FOLL_WRITE)
4313                                 fault_flags |= FAULT_FLAG_WRITE;
4314                         if (nonblocking)
4315                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4316                         if (flags & FOLL_NOWAIT)
4317                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4318                                         FAULT_FLAG_RETRY_NOWAIT;
4319                         if (flags & FOLL_TRIED) {
4320                                 VM_WARN_ON_ONCE(fault_flags &
4321                                                 FAULT_FLAG_ALLOW_RETRY);
4322                                 fault_flags |= FAULT_FLAG_TRIED;
4323                         }
4324                         ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4325                         if (ret & VM_FAULT_ERROR) {
4326                                 err = vm_fault_to_errno(ret, flags);
4327                                 remainder = 0;
4328                                 break;
4329                         }
4330                         if (ret & VM_FAULT_RETRY) {
4331                                 if (nonblocking &&
4332                                     !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4333                                         *nonblocking = 0;
4334                                 *nr_pages = 0;
4335                                 /*
4336                                  * VM_FAULT_RETRY must not return an
4337                                  * error, it will return zero
4338                                  * instead.
4339                                  *
4340                                  * No need to update "position" as the
4341                                  * caller will not check it after
4342                                  * *nr_pages is set to 0.
4343                                  */
4344                                 return i;
4345                         }
4346                         continue;
4347                 }
4348
4349                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4350                 page = pte_page(huge_ptep_get(pte));
4351
4352                 /*
4353                  * Instead of doing 'try_get_page()' below in the same_page
4354                  * loop, just check the count once here.
4355                  */
4356                 if (unlikely(page_count(page) <= 0)) {
4357                         if (pages) {
4358                                 spin_unlock(ptl);
4359                                 remainder = 0;
4360                                 err = -ENOMEM;
4361                                 break;
4362                         }
4363                 }
4364 same_page:
4365                 if (pages) {
4366                         pages[i] = mem_map_offset(page, pfn_offset);
4367                         get_page(pages[i]);
4368                 }
4369
4370                 if (vmas)
4371                         vmas[i] = vma;
4372
4373                 vaddr += PAGE_SIZE;
4374                 ++pfn_offset;
4375                 --remainder;
4376                 ++i;
4377                 if (vaddr < vma->vm_end && remainder &&
4378                                 pfn_offset < pages_per_huge_page(h)) {
4379                         /*
4380                          * We use pfn_offset to avoid touching the pageframes
4381                          * of this compound page.
4382                          */
4383                         goto same_page;
4384                 }
4385                 spin_unlock(ptl);
4386         }
4387         *nr_pages = remainder;
4388         /*
4389          * setting position is actually required only if remainder is
4390          * not zero but it's faster not to add a "if (remainder)"
4391          * branch.
4392          */
4393         *position = vaddr;
4394
4395         return i ? i : err;
4396 }
4397
4398 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4399 /*
4400  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4401  * implement this.
4402  */
4403 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4404 #endif
4405
4406 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4407                 unsigned long address, unsigned long end, pgprot_t newprot)
4408 {
4409         struct mm_struct *mm = vma->vm_mm;
4410         unsigned long start = address;
4411         pte_t *ptep;
4412         pte_t pte;
4413         struct hstate *h = hstate_vma(vma);
4414         unsigned long pages = 0;
4415         bool shared_pmd = false;
4416         struct mmu_notifier_range range;
4417
4418         /*
4419          * In the case of shared PMDs, the area to flush could be beyond
4420          * start/end.  Set range.start/range.end to cover the maximum possible
4421          * range if PMD sharing is possible.
4422          */
4423         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
4424                                 0, vma, mm, start, end);
4425         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4426
4427         BUG_ON(address >= end);
4428         flush_cache_range(vma, range.start, range.end);
4429
4430         mmu_notifier_invalidate_range_start(&range);
4431         i_mmap_lock_write(vma->vm_file->f_mapping);
4432         for (; address < end; address += huge_page_size(h)) {
4433                 spinlock_t *ptl;
4434                 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4435                 if (!ptep)
4436                         continue;
4437                 ptl = huge_pte_lock(h, mm, ptep);
4438                 if (huge_pmd_unshare(mm, &address, ptep)) {
4439                         pages++;
4440                         spin_unlock(ptl);
4441                         shared_pmd = true;
4442                         continue;
4443                 }
4444                 pte = huge_ptep_get(ptep);
4445                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4446                         spin_unlock(ptl);
4447                         continue;
4448                 }
4449                 if (unlikely(is_hugetlb_entry_migration(pte))) {
4450                         swp_entry_t entry = pte_to_swp_entry(pte);
4451
4452                         if (is_write_migration_entry(entry)) {
4453                                 pte_t newpte;
4454
4455                                 make_migration_entry_read(&entry);
4456                                 newpte = swp_entry_to_pte(entry);
4457                                 set_huge_swap_pte_at(mm, address, ptep,
4458                                                      newpte, huge_page_size(h));
4459                                 pages++;
4460                         }
4461                         spin_unlock(ptl);
4462                         continue;
4463                 }
4464                 if (!huge_pte_none(pte)) {
4465                         pte_t old_pte;
4466
4467                         old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4468                         pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4469                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
4470                         huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4471                         pages++;
4472                 }
4473                 spin_unlock(ptl);
4474         }
4475         /*
4476          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4477          * may have cleared our pud entry and done put_page on the page table:
4478          * once we release i_mmap_rwsem, another task can do the final put_page
4479          * and that page table be reused and filled with junk.  If we actually
4480          * did unshare a page of pmds, flush the range corresponding to the pud.
4481          */
4482         if (shared_pmd)
4483                 flush_hugetlb_tlb_range(vma, range.start, range.end);
4484         else
4485                 flush_hugetlb_tlb_range(vma, start, end);
4486         /*
4487          * No need to call mmu_notifier_invalidate_range() we are downgrading
4488          * page table protection not changing it to point to a new page.
4489          *
4490          * See Documentation/vm/mmu_notifier.rst
4491          */
4492         i_mmap_unlock_write(vma->vm_file->f_mapping);
4493         mmu_notifier_invalidate_range_end(&range);
4494
4495         return pages << h->order;
4496 }
4497
4498 int hugetlb_reserve_pages(struct inode *inode,
4499                                         long from, long to,
4500                                         struct vm_area_struct *vma,
4501                                         vm_flags_t vm_flags)
4502 {
4503         long ret, chg;
4504         struct hstate *h = hstate_inode(inode);
4505         struct hugepage_subpool *spool = subpool_inode(inode);
4506         struct resv_map *resv_map;
4507         long gbl_reserve;
4508
4509         /* This should never happen */
4510         if (from > to) {
4511                 VM_WARN(1, "%s called with a negative range\n", __func__);
4512                 return -EINVAL;
4513         }
4514
4515         /*
4516          * Only apply hugepage reservation if asked. At fault time, an
4517          * attempt will be made for VM_NORESERVE to allocate a page
4518          * without using reserves
4519          */
4520         if (vm_flags & VM_NORESERVE)
4521                 return 0;
4522
4523         /*
4524          * Shared mappings base their reservation on the number of pages that
4525          * are already allocated on behalf of the file. Private mappings need
4526          * to reserve the full area even if read-only as mprotect() may be
4527          * called to make the mapping read-write. Assume !vma is a shm mapping
4528          */
4529         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4530                 /*
4531                  * resv_map can not be NULL as hugetlb_reserve_pages is only
4532                  * called for inodes for which resv_maps were created (see
4533                  * hugetlbfs_get_inode).
4534                  */
4535                 resv_map = inode_resv_map(inode);
4536
4537                 chg = region_chg(resv_map, from, to);
4538
4539         } else {
4540                 resv_map = resv_map_alloc();
4541                 if (!resv_map)
4542                         return -ENOMEM;
4543
4544                 chg = to - from;
4545
4546                 set_vma_resv_map(vma, resv_map);
4547                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4548         }
4549
4550         if (chg < 0) {
4551                 ret = chg;
4552                 goto out_err;
4553         }
4554
4555         /*
4556          * There must be enough pages in the subpool for the mapping. If
4557          * the subpool has a minimum size, there may be some global
4558          * reservations already in place (gbl_reserve).
4559          */
4560         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4561         if (gbl_reserve < 0) {
4562                 ret = -ENOSPC;
4563                 goto out_err;
4564         }
4565
4566         /*
4567          * Check enough hugepages are available for the reservation.
4568          * Hand the pages back to the subpool if there are not
4569          */
4570         ret = hugetlb_acct_memory(h, gbl_reserve);
4571         if (ret < 0) {
4572                 /* put back original number of pages, chg */
4573                 (void)hugepage_subpool_put_pages(spool, chg);
4574                 goto out_err;
4575         }
4576
4577         /*
4578          * Account for the reservations made. Shared mappings record regions
4579          * that have reservations as they are shared by multiple VMAs.
4580          * When the last VMA disappears, the region map says how much
4581          * the reservation was and the page cache tells how much of
4582          * the reservation was consumed. Private mappings are per-VMA and
4583          * only the consumed reservations are tracked. When the VMA
4584          * disappears, the original reservation is the VMA size and the
4585          * consumed reservations are stored in the map. Hence, nothing
4586          * else has to be done for private mappings here
4587          */
4588         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4589                 long add = region_add(resv_map, from, to);
4590
4591                 if (unlikely(chg > add)) {
4592                         /*
4593                          * pages in this range were added to the reserve
4594                          * map between region_chg and region_add.  This
4595                          * indicates a race with alloc_huge_page.  Adjust
4596                          * the subpool and reserve counts modified above
4597                          * based on the difference.
4598                          */
4599                         long rsv_adjust;
4600
4601                         rsv_adjust = hugepage_subpool_put_pages(spool,
4602                                                                 chg - add);
4603                         hugetlb_acct_memory(h, -rsv_adjust);
4604                 }
4605         }
4606         return 0;
4607 out_err:
4608         if (!vma || vma->vm_flags & VM_MAYSHARE)
4609                 /* Don't call region_abort if region_chg failed */
4610                 if (chg >= 0)
4611                         region_abort(resv_map, from, to);
4612         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4613                 kref_put(&resv_map->refs, resv_map_release);
4614         return ret;
4615 }
4616
4617 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4618                                                                 long freed)
4619 {
4620         struct hstate *h = hstate_inode(inode);
4621         struct resv_map *resv_map = inode_resv_map(inode);
4622         long chg = 0;
4623         struct hugepage_subpool *spool = subpool_inode(inode);
4624         long gbl_reserve;
4625
4626         /*
4627          * Since this routine can be called in the evict inode path for all
4628          * hugetlbfs inodes, resv_map could be NULL.
4629          */
4630         if (resv_map) {
4631                 chg = region_del(resv_map, start, end);
4632                 /*
4633                  * region_del() can fail in the rare case where a region
4634                  * must be split and another region descriptor can not be
4635                  * allocated.  If end == LONG_MAX, it will not fail.
4636                  */
4637                 if (chg < 0)
4638                         return chg;
4639         }
4640
4641         spin_lock(&inode->i_lock);
4642         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4643         spin_unlock(&inode->i_lock);
4644
4645         /*
4646          * If the subpool has a minimum size, the number of global
4647          * reservations to be released may be adjusted.
4648          */
4649         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4650         hugetlb_acct_memory(h, -gbl_reserve);
4651
4652         return 0;
4653 }
4654
4655 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4656 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4657                                 struct vm_area_struct *vma,
4658                                 unsigned long addr, pgoff_t idx)
4659 {
4660         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4661                                 svma->vm_start;
4662         unsigned long sbase = saddr & PUD_MASK;
4663         unsigned long s_end = sbase + PUD_SIZE;
4664
4665         /* Allow segments to share if only one is marked locked */
4666         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4667         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4668
4669         /*
4670          * match the virtual addresses, permission and the alignment of the
4671          * page table page.
4672          */
4673         if (pmd_index(addr) != pmd_index(saddr) ||
4674             vm_flags != svm_flags ||
4675             sbase < svma->vm_start || svma->vm_end < s_end)
4676                 return 0;
4677
4678         return saddr;
4679 }
4680
4681 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4682 {
4683         unsigned long base = addr & PUD_MASK;
4684         unsigned long end = base + PUD_SIZE;
4685
4686         /*
4687          * check on proper vm_flags and page table alignment
4688          */
4689         if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4690                 return true;
4691         return false;
4692 }
4693
4694 /*
4695  * Determine if start,end range within vma could be mapped by shared pmd.
4696  * If yes, adjust start and end to cover range associated with possible
4697  * shared pmd mappings.
4698  */
4699 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4700                                 unsigned long *start, unsigned long *end)
4701 {
4702         unsigned long check_addr = *start;
4703
4704         if (!(vma->vm_flags & VM_MAYSHARE))
4705                 return;
4706
4707         for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4708                 unsigned long a_start = check_addr & PUD_MASK;
4709                 unsigned long a_end = a_start + PUD_SIZE;
4710
4711                 /*
4712                  * If sharing is possible, adjust start/end if necessary.
4713                  */
4714                 if (range_in_vma(vma, a_start, a_end)) {
4715                         if (a_start < *start)
4716                                 *start = a_start;
4717                         if (a_end > *end)
4718                                 *end = a_end;
4719                 }
4720         }
4721 }
4722
4723 /*
4724  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4725  * and returns the corresponding pte. While this is not necessary for the
4726  * !shared pmd case because we can allocate the pmd later as well, it makes the
4727  * code much cleaner. pmd allocation is essential for the shared case because
4728  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4729  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4730  * bad pmd for sharing.
4731  */
4732 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4733 {
4734         struct vm_area_struct *vma = find_vma(mm, addr);
4735         struct address_space *mapping = vma->vm_file->f_mapping;
4736         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4737                         vma->vm_pgoff;
4738         struct vm_area_struct *svma;
4739         unsigned long saddr;
4740         pte_t *spte = NULL;
4741         pte_t *pte;
4742         spinlock_t *ptl;
4743
4744         if (!vma_shareable(vma, addr))
4745                 return (pte_t *)pmd_alloc(mm, pud, addr);
4746
4747         i_mmap_lock_write(mapping);
4748         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4749                 if (svma == vma)
4750                         continue;
4751
4752                 saddr = page_table_shareable(svma, vma, addr, idx);
4753                 if (saddr) {
4754                         spte = huge_pte_offset(svma->vm_mm, saddr,
4755                                                vma_mmu_pagesize(svma));
4756                         if (spte) {
4757                                 get_page(virt_to_page(spte));
4758                                 break;
4759                         }
4760                 }
4761         }
4762
4763         if (!spte)
4764                 goto out;
4765
4766         ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4767         if (pud_none(*pud)) {
4768                 pud_populate(mm, pud,
4769                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4770                 mm_inc_nr_pmds(mm);
4771         } else {
4772                 put_page(virt_to_page(spte));
4773         }
4774         spin_unlock(ptl);
4775 out:
4776         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4777         i_mmap_unlock_write(mapping);
4778         return pte;
4779 }
4780
4781 /*
4782  * unmap huge page backed by shared pte.
4783  *
4784  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4785  * indicated by page_count > 1, unmap is achieved by clearing pud and
4786  * decrementing the ref count. If count == 1, the pte page is not shared.
4787  *
4788  * called with page table lock held.
4789  *
4790  * returns: 1 successfully unmapped a shared pte page
4791  *          0 the underlying pte page is not shared, or it is the last user
4792  */
4793 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4794 {
4795         pgd_t *pgd = pgd_offset(mm, *addr);
4796         p4d_t *p4d = p4d_offset(pgd, *addr);
4797         pud_t *pud = pud_offset(p4d, *addr);
4798
4799         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4800         if (page_count(virt_to_page(ptep)) == 1)
4801                 return 0;
4802
4803         pud_clear(pud);
4804         put_page(virt_to_page(ptep));
4805         mm_dec_nr_pmds(mm);
4806         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4807         return 1;
4808 }
4809 #define want_pmd_share()        (1)
4810 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4811 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4812 {
4813         return NULL;
4814 }
4815
4816 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4817 {
4818         return 0;
4819 }
4820
4821 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4822                                 unsigned long *start, unsigned long *end)
4823 {
4824 }
4825 #define want_pmd_share()        (0)
4826 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4827
4828 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4829 pte_t *huge_pte_alloc(struct mm_struct *mm,
4830                         unsigned long addr, unsigned long sz)
4831 {
4832         pgd_t *pgd;
4833         p4d_t *p4d;
4834         pud_t *pud;
4835         pte_t *pte = NULL;
4836
4837         pgd = pgd_offset(mm, addr);
4838         p4d = p4d_alloc(mm, pgd, addr);
4839         if (!p4d)
4840                 return NULL;
4841         pud = pud_alloc(mm, p4d, addr);
4842         if (pud) {
4843                 if (sz == PUD_SIZE) {
4844                         pte = (pte_t *)pud;
4845                 } else {
4846                         BUG_ON(sz != PMD_SIZE);
4847                         if (want_pmd_share() && pud_none(*pud))
4848                                 pte = huge_pmd_share(mm, addr, pud);
4849                         else
4850                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4851                 }
4852         }
4853         BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4854
4855         return pte;
4856 }
4857
4858 /*
4859  * huge_pte_offset() - Walk the page table to resolve the hugepage
4860  * entry at address @addr
4861  *
4862  * Return: Pointer to page table or swap entry (PUD or PMD) for
4863  * address @addr, or NULL if a p*d_none() entry is encountered and the
4864  * size @sz doesn't match the hugepage size at this level of the page
4865  * table.
4866  */
4867 pte_t *huge_pte_offset(struct mm_struct *mm,
4868                        unsigned long addr, unsigned long sz)
4869 {
4870         pgd_t *pgd;
4871         p4d_t *p4d;
4872         pud_t *pud;
4873         pmd_t *pmd;
4874
4875         pgd = pgd_offset(mm, addr);
4876         if (!pgd_present(*pgd))
4877                 return NULL;
4878         p4d = p4d_offset(pgd, addr);
4879         if (!p4d_present(*p4d))
4880                 return NULL;
4881
4882         pud = pud_offset(p4d, addr);
4883         if (sz != PUD_SIZE && pud_none(*pud))
4884                 return NULL;
4885         /* hugepage or swap? */
4886         if (pud_huge(*pud) || !pud_present(*pud))
4887                 return (pte_t *)pud;
4888
4889         pmd = pmd_offset(pud, addr);
4890         if (sz != PMD_SIZE && pmd_none(*pmd))
4891                 return NULL;
4892         /* hugepage or swap? */
4893         if (pmd_huge(*pmd) || !pmd_present(*pmd))
4894                 return (pte_t *)pmd;
4895
4896         return NULL;
4897 }
4898
4899 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4900
4901 /*
4902  * These functions are overwritable if your architecture needs its own
4903  * behavior.
4904  */
4905 struct page * __weak
4906 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4907                               int write)
4908 {
4909         return ERR_PTR(-EINVAL);
4910 }
4911
4912 struct page * __weak
4913 follow_huge_pd(struct vm_area_struct *vma,
4914                unsigned long address, hugepd_t hpd, int flags, int pdshift)
4915 {
4916         WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4917         return NULL;
4918 }
4919
4920 struct page * __weak
4921 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4922                 pmd_t *pmd, int flags)
4923 {
4924         struct page *page = NULL;
4925         spinlock_t *ptl;
4926         pte_t pte;
4927 retry:
4928         ptl = pmd_lockptr(mm, pmd);
4929         spin_lock(ptl);
4930         /*
4931          * make sure that the address range covered by this pmd is not
4932          * unmapped from other threads.
4933          */
4934         if (!pmd_huge(*pmd))
4935                 goto out;
4936         pte = huge_ptep_get((pte_t *)pmd);
4937         if (pte_present(pte)) {
4938                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4939                 if (flags & FOLL_GET)
4940                         get_page(page);
4941         } else {
4942                 if (is_hugetlb_entry_migration(pte)) {
4943                         spin_unlock(ptl);
4944                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4945                         goto retry;
4946                 }
4947                 /*
4948                  * hwpoisoned entry is treated as no_page_table in
4949                  * follow_page_mask().
4950                  */
4951         }
4952 out:
4953         spin_unlock(ptl);
4954         return page;
4955 }
4956
4957 struct page * __weak
4958 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4959                 pud_t *pud, int flags)
4960 {
4961         if (flags & FOLL_GET)
4962                 return NULL;
4963
4964         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4965 }
4966
4967 struct page * __weak
4968 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4969 {
4970         if (flags & FOLL_GET)
4971                 return NULL;
4972
4973         return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4974 }
4975
4976 bool isolate_huge_page(struct page *page, struct list_head *list)
4977 {
4978         bool ret = true;
4979
4980         VM_BUG_ON_PAGE(!PageHead(page), page);
4981         spin_lock(&hugetlb_lock);
4982         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4983                 ret = false;
4984                 goto unlock;
4985         }
4986         clear_page_huge_active(page);
4987         list_move_tail(&page->lru, list);
4988 unlock:
4989         spin_unlock(&hugetlb_lock);
4990         return ret;
4991 }
4992
4993 void putback_active_hugepage(struct page *page)
4994 {
4995         VM_BUG_ON_PAGE(!PageHead(page), page);
4996         spin_lock(&hugetlb_lock);
4997         set_page_huge_active(page);
4998         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4999         spin_unlock(&hugetlb_lock);
5000         put_page(page);
5001 }
5002
5003 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5004 {
5005         struct hstate *h = page_hstate(oldpage);
5006
5007         hugetlb_cgroup_migrate(oldpage, newpage);
5008         set_page_owner_migrate_reason(newpage, reason);
5009
5010         /*
5011          * transfer temporary state of the new huge page. This is
5012          * reverse to other transitions because the newpage is going to
5013          * be final while the old one will be freed so it takes over
5014          * the temporary status.
5015          *
5016          * Also note that we have to transfer the per-node surplus state
5017          * here as well otherwise the global surplus count will not match
5018          * the per-node's.
5019          */
5020         if (PageHugeTemporary(newpage)) {
5021                 int old_nid = page_to_nid(oldpage);
5022                 int new_nid = page_to_nid(newpage);
5023
5024                 SetPageHugeTemporary(oldpage);
5025                 ClearPageHugeTemporary(newpage);
5026
5027                 spin_lock(&hugetlb_lock);
5028                 if (h->surplus_huge_pages_node[old_nid]) {
5029                         h->surplus_huge_pages_node[old_nid]--;
5030                         h->surplus_huge_pages_node[new_nid]++;
5031                 }
5032                 spin_unlock(&hugetlb_lock);
5033         }
5034 }