4a1d7021efaf71871417f041b48a349ade27a9b0
[linux-2.6-microblaze.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
27
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include "internal.h"
37
38 int hugepages_treat_as_movable;
39
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
43 /*
44  * Minimum page order among possible hugepage sizes, set to a proper value
45  * at boot time.
46  */
47 static unsigned int minimum_order __read_mostly = UINT_MAX;
48
49 __initdata LIST_HEAD(huge_boot_pages);
50
51 /* for command line parsing */
52 static struct hstate * __initdata parsed_hstate;
53 static unsigned long __initdata default_hstate_max_huge_pages;
54 static unsigned long __initdata default_hstate_size;
55
56 /*
57  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58  * free_huge_pages, and surplus_huge_pages.
59  */
60 DEFINE_SPINLOCK(hugetlb_lock);
61
62 /*
63  * Serializes faults on the same logical page.  This is used to
64  * prevent spurious OOMs when the hugepage pool is fully utilized.
65  */
66 static int num_fault_mutexes;
67 static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74         bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76         spin_unlock(&spool->lock);
77
78         /* If no pages are used, and no other handles to the subpool
79          * remain, give up any reservations mased on minimum size and
80          * free the subpool */
81         if (free) {
82                 if (spool->min_hpages != -1)
83                         hugetlb_acct_memory(spool->hstate,
84                                                 -spool->min_hpages);
85                 kfree(spool);
86         }
87 }
88
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90                                                 long min_hpages)
91 {
92         struct hugepage_subpool *spool;
93
94         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95         if (!spool)
96                 return NULL;
97
98         spin_lock_init(&spool->lock);
99         spool->count = 1;
100         spool->max_hpages = max_hpages;
101         spool->hstate = h;
102         spool->min_hpages = min_hpages;
103
104         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105                 kfree(spool);
106                 return NULL;
107         }
108         spool->rsv_hpages = min_hpages;
109
110         return spool;
111 }
112
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115         spin_lock(&spool->lock);
116         BUG_ON(!spool->count);
117         spool->count--;
118         unlock_or_release_subpool(spool);
119 }
120
121 /*
122  * Subpool accounting for allocating and reserving pages.
123  * Return -ENOMEM if there are not enough resources to satisfy the
124  * the request.  Otherwise, return the number of pages by which the
125  * global pools must be adjusted (upward).  The returned value may
126  * only be different than the passed value (delta) in the case where
127  * a subpool minimum size must be manitained.
128  */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130                                       long delta)
131 {
132         long ret = delta;
133
134         if (!spool)
135                 return ret;
136
137         spin_lock(&spool->lock);
138
139         if (spool->max_hpages != -1) {          /* maximum size accounting */
140                 if ((spool->used_hpages + delta) <= spool->max_hpages)
141                         spool->used_hpages += delta;
142                 else {
143                         ret = -ENOMEM;
144                         goto unlock_ret;
145                 }
146         }
147
148         if (spool->min_hpages != -1) {          /* minimum size accounting */
149                 if (delta > spool->rsv_hpages) {
150                         /*
151                          * Asking for more reserves than those already taken on
152                          * behalf of subpool.  Return difference.
153                          */
154                         ret = delta - spool->rsv_hpages;
155                         spool->rsv_hpages = 0;
156                 } else {
157                         ret = 0;        /* reserves already accounted for */
158                         spool->rsv_hpages -= delta;
159                 }
160         }
161
162 unlock_ret:
163         spin_unlock(&spool->lock);
164         return ret;
165 }
166
167 /*
168  * Subpool accounting for freeing and unreserving pages.
169  * Return the number of global page reservations that must be dropped.
170  * The return value may only be different than the passed value (delta)
171  * in the case where a subpool minimum size must be maintained.
172  */
173 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
174                                        long delta)
175 {
176         long ret = delta;
177
178         if (!spool)
179                 return delta;
180
181         spin_lock(&spool->lock);
182
183         if (spool->max_hpages != -1)            /* maximum size accounting */
184                 spool->used_hpages -= delta;
185
186         if (spool->min_hpages != -1) {          /* minimum size accounting */
187                 if (spool->rsv_hpages + delta <= spool->min_hpages)
188                         ret = 0;
189                 else
190                         ret = spool->rsv_hpages + delta - spool->min_hpages;
191
192                 spool->rsv_hpages += delta;
193                 if (spool->rsv_hpages > spool->min_hpages)
194                         spool->rsv_hpages = spool->min_hpages;
195         }
196
197         /*
198          * If hugetlbfs_put_super couldn't free spool due to an outstanding
199          * quota reference, free it now.
200          */
201         unlock_or_release_subpool(spool);
202
203         return ret;
204 }
205
206 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
207 {
208         return HUGETLBFS_SB(inode->i_sb)->spool;
209 }
210
211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
212 {
213         return subpool_inode(file_inode(vma->vm_file));
214 }
215
216 /*
217  * Region tracking -- allows tracking of reservations and instantiated pages
218  *                    across the pages in a mapping.
219  *
220  * The region data structures are embedded into a resv_map and protected
221  * by a resv_map's lock.  The set of regions within the resv_map represent
222  * reservations for huge pages, or huge pages that have already been
223  * instantiated within the map.  The from and to elements are huge page
224  * indicies into the associated mapping.  from indicates the starting index
225  * of the region.  to represents the first index past the end of  the region.
226  *
227  * For example, a file region structure with from == 0 and to == 4 represents
228  * four huge pages in a mapping.  It is important to note that the to element
229  * represents the first element past the end of the region. This is used in
230  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
231  *
232  * Interval notation of the form [from, to) will be used to indicate that
233  * the endpoint from is inclusive and to is exclusive.
234  */
235 struct file_region {
236         struct list_head link;
237         long from;
238         long to;
239 };
240
241 /*
242  * Add the huge page range represented by [f, t) to the reserve
243  * map.  Existing regions will be expanded to accommodate the
244  * specified range.  We know only existing regions need to be
245  * expanded, because region_add is only called after region_chg
246  * with the same range.  If a new file_region structure must
247  * be allocated, it is done in region_chg.
248  */
249 static long region_add(struct resv_map *resv, long f, long t)
250 {
251         struct list_head *head = &resv->regions;
252         struct file_region *rg, *nrg, *trg;
253
254         spin_lock(&resv->lock);
255         /* Locate the region we are either in or before. */
256         list_for_each_entry(rg, head, link)
257                 if (f <= rg->to)
258                         break;
259
260         /* Round our left edge to the current segment if it encloses us. */
261         if (f > rg->from)
262                 f = rg->from;
263
264         /* Check for and consume any regions we now overlap with. */
265         nrg = rg;
266         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
267                 if (&rg->link == head)
268                         break;
269                 if (rg->from > t)
270                         break;
271
272                 /* If this area reaches higher then extend our area to
273                  * include it completely.  If this is not the first area
274                  * which we intend to reuse, free it. */
275                 if (rg->to > t)
276                         t = rg->to;
277                 if (rg != nrg) {
278                         list_del(&rg->link);
279                         kfree(rg);
280                 }
281         }
282         nrg->from = f;
283         nrg->to = t;
284         spin_unlock(&resv->lock);
285         return 0;
286 }
287
288 /*
289  * Examine the existing reserve map and determine how many
290  * huge pages in the specified range [f, t) are NOT currently
291  * represented.  This routine is called before a subsequent
292  * call to region_add that will actually modify the reserve
293  * map to add the specified range [f, t).  region_chg does
294  * not change the number of huge pages represented by the
295  * map.  However, if the existing regions in the map can not
296  * be expanded to represent the new range, a new file_region
297  * structure is added to the map as a placeholder.  This is
298  * so that the subsequent region_add call will have all the
299  * regions it needs and will not fail.
300  *
301  * Returns the number of huge pages that need to be added
302  * to the existing reservation map for the range [f, t).
303  * This number is greater or equal to zero.  -ENOMEM is
304  * returned if a new file_region structure is needed and can
305  * not be allocated.
306  */
307 static long region_chg(struct resv_map *resv, long f, long t)
308 {
309         struct list_head *head = &resv->regions;
310         struct file_region *rg, *nrg = NULL;
311         long chg = 0;
312
313 retry:
314         spin_lock(&resv->lock);
315         /* Locate the region we are before or in. */
316         list_for_each_entry(rg, head, link)
317                 if (f <= rg->to)
318                         break;
319
320         /* If we are below the current region then a new region is required.
321          * Subtle, allocate a new region at the position but make it zero
322          * size such that we can guarantee to record the reservation. */
323         if (&rg->link == head || t < rg->from) {
324                 if (!nrg) {
325                         spin_unlock(&resv->lock);
326                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
327                         if (!nrg)
328                                 return -ENOMEM;
329
330                         nrg->from = f;
331                         nrg->to   = f;
332                         INIT_LIST_HEAD(&nrg->link);
333                         goto retry;
334                 }
335
336                 list_add(&nrg->link, rg->link.prev);
337                 chg = t - f;
338                 goto out_nrg;
339         }
340
341         /* Round our left edge to the current segment if it encloses us. */
342         if (f > rg->from)
343                 f = rg->from;
344         chg = t - f;
345
346         /* Check for and consume any regions we now overlap with. */
347         list_for_each_entry(rg, rg->link.prev, link) {
348                 if (&rg->link == head)
349                         break;
350                 if (rg->from > t)
351                         goto out;
352
353                 /* We overlap with this area, if it extends further than
354                  * us then we must extend ourselves.  Account for its
355                  * existing reservation. */
356                 if (rg->to > t) {
357                         chg += rg->to - t;
358                         t = rg->to;
359                 }
360                 chg -= rg->to - rg->from;
361         }
362
363 out:
364         spin_unlock(&resv->lock);
365         /*  We already know we raced and no longer need the new region */
366         kfree(nrg);
367         return chg;
368 out_nrg:
369         spin_unlock(&resv->lock);
370         return chg;
371 }
372
373 /*
374  * Truncate the reserve map at index 'end'.  Modify/truncate any
375  * region which contains end.  Delete any regions past end.
376  * Return the number of huge pages removed from the map.
377  */
378 static long region_truncate(struct resv_map *resv, long end)
379 {
380         struct list_head *head = &resv->regions;
381         struct file_region *rg, *trg;
382         long chg = 0;
383
384         spin_lock(&resv->lock);
385         /* Locate the region we are either in or before. */
386         list_for_each_entry(rg, head, link)
387                 if (end <= rg->to)
388                         break;
389         if (&rg->link == head)
390                 goto out;
391
392         /* If we are in the middle of a region then adjust it. */
393         if (end > rg->from) {
394                 chg = rg->to - end;
395                 rg->to = end;
396                 rg = list_entry(rg->link.next, typeof(*rg), link);
397         }
398
399         /* Drop any remaining regions. */
400         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
401                 if (&rg->link == head)
402                         break;
403                 chg += rg->to - rg->from;
404                 list_del(&rg->link);
405                 kfree(rg);
406         }
407
408 out:
409         spin_unlock(&resv->lock);
410         return chg;
411 }
412
413 /*
414  * Count and return the number of huge pages in the reserve map
415  * that intersect with the range [f, t).
416  */
417 static long region_count(struct resv_map *resv, long f, long t)
418 {
419         struct list_head *head = &resv->regions;
420         struct file_region *rg;
421         long chg = 0;
422
423         spin_lock(&resv->lock);
424         /* Locate each segment we overlap with, and count that overlap. */
425         list_for_each_entry(rg, head, link) {
426                 long seg_from;
427                 long seg_to;
428
429                 if (rg->to <= f)
430                         continue;
431                 if (rg->from >= t)
432                         break;
433
434                 seg_from = max(rg->from, f);
435                 seg_to = min(rg->to, t);
436
437                 chg += seg_to - seg_from;
438         }
439         spin_unlock(&resv->lock);
440
441         return chg;
442 }
443
444 /*
445  * Convert the address within this vma to the page offset within
446  * the mapping, in pagecache page units; huge pages here.
447  */
448 static pgoff_t vma_hugecache_offset(struct hstate *h,
449                         struct vm_area_struct *vma, unsigned long address)
450 {
451         return ((address - vma->vm_start) >> huge_page_shift(h)) +
452                         (vma->vm_pgoff >> huge_page_order(h));
453 }
454
455 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
456                                      unsigned long address)
457 {
458         return vma_hugecache_offset(hstate_vma(vma), vma, address);
459 }
460
461 /*
462  * Return the size of the pages allocated when backing a VMA. In the majority
463  * cases this will be same size as used by the page table entries.
464  */
465 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
466 {
467         struct hstate *hstate;
468
469         if (!is_vm_hugetlb_page(vma))
470                 return PAGE_SIZE;
471
472         hstate = hstate_vma(vma);
473
474         return 1UL << huge_page_shift(hstate);
475 }
476 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
477
478 /*
479  * Return the page size being used by the MMU to back a VMA. In the majority
480  * of cases, the page size used by the kernel matches the MMU size. On
481  * architectures where it differs, an architecture-specific version of this
482  * function is required.
483  */
484 #ifndef vma_mmu_pagesize
485 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
486 {
487         return vma_kernel_pagesize(vma);
488 }
489 #endif
490
491 /*
492  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
493  * bits of the reservation map pointer, which are always clear due to
494  * alignment.
495  */
496 #define HPAGE_RESV_OWNER    (1UL << 0)
497 #define HPAGE_RESV_UNMAPPED (1UL << 1)
498 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
499
500 /*
501  * These helpers are used to track how many pages are reserved for
502  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
503  * is guaranteed to have their future faults succeed.
504  *
505  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
506  * the reserve counters are updated with the hugetlb_lock held. It is safe
507  * to reset the VMA at fork() time as it is not in use yet and there is no
508  * chance of the global counters getting corrupted as a result of the values.
509  *
510  * The private mapping reservation is represented in a subtly different
511  * manner to a shared mapping.  A shared mapping has a region map associated
512  * with the underlying file, this region map represents the backing file
513  * pages which have ever had a reservation assigned which this persists even
514  * after the page is instantiated.  A private mapping has a region map
515  * associated with the original mmap which is attached to all VMAs which
516  * reference it, this region map represents those offsets which have consumed
517  * reservation ie. where pages have been instantiated.
518  */
519 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
520 {
521         return (unsigned long)vma->vm_private_data;
522 }
523
524 static void set_vma_private_data(struct vm_area_struct *vma,
525                                                         unsigned long value)
526 {
527         vma->vm_private_data = (void *)value;
528 }
529
530 struct resv_map *resv_map_alloc(void)
531 {
532         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
533         if (!resv_map)
534                 return NULL;
535
536         kref_init(&resv_map->refs);
537         spin_lock_init(&resv_map->lock);
538         INIT_LIST_HEAD(&resv_map->regions);
539
540         return resv_map;
541 }
542
543 void resv_map_release(struct kref *ref)
544 {
545         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
546
547         /* Clear out any active regions before we release the map. */
548         region_truncate(resv_map, 0);
549         kfree(resv_map);
550 }
551
552 static inline struct resv_map *inode_resv_map(struct inode *inode)
553 {
554         return inode->i_mapping->private_data;
555 }
556
557 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
558 {
559         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
560         if (vma->vm_flags & VM_MAYSHARE) {
561                 struct address_space *mapping = vma->vm_file->f_mapping;
562                 struct inode *inode = mapping->host;
563
564                 return inode_resv_map(inode);
565
566         } else {
567                 return (struct resv_map *)(get_vma_private_data(vma) &
568                                                         ~HPAGE_RESV_MASK);
569         }
570 }
571
572 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
573 {
574         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
575         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
576
577         set_vma_private_data(vma, (get_vma_private_data(vma) &
578                                 HPAGE_RESV_MASK) | (unsigned long)map);
579 }
580
581 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
582 {
583         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
584         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
585
586         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
587 }
588
589 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
590 {
591         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
592
593         return (get_vma_private_data(vma) & flag) != 0;
594 }
595
596 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
597 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
598 {
599         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
600         if (!(vma->vm_flags & VM_MAYSHARE))
601                 vma->vm_private_data = (void *)0;
602 }
603
604 /* Returns true if the VMA has associated reserve pages */
605 static int vma_has_reserves(struct vm_area_struct *vma, long chg)
606 {
607         if (vma->vm_flags & VM_NORESERVE) {
608                 /*
609                  * This address is already reserved by other process(chg == 0),
610                  * so, we should decrement reserved count. Without decrementing,
611                  * reserve count remains after releasing inode, because this
612                  * allocated page will go into page cache and is regarded as
613                  * coming from reserved pool in releasing step.  Currently, we
614                  * don't have any other solution to deal with this situation
615                  * properly, so add work-around here.
616                  */
617                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
618                         return 1;
619                 else
620                         return 0;
621         }
622
623         /* Shared mappings always use reserves */
624         if (vma->vm_flags & VM_MAYSHARE)
625                 return 1;
626
627         /*
628          * Only the process that called mmap() has reserves for
629          * private mappings.
630          */
631         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
632                 return 1;
633
634         return 0;
635 }
636
637 static void enqueue_huge_page(struct hstate *h, struct page *page)
638 {
639         int nid = page_to_nid(page);
640         list_move(&page->lru, &h->hugepage_freelists[nid]);
641         h->free_huge_pages++;
642         h->free_huge_pages_node[nid]++;
643 }
644
645 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
646 {
647         struct page *page;
648
649         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
650                 if (!is_migrate_isolate_page(page))
651                         break;
652         /*
653          * if 'non-isolated free hugepage' not found on the list,
654          * the allocation fails.
655          */
656         if (&h->hugepage_freelists[nid] == &page->lru)
657                 return NULL;
658         list_move(&page->lru, &h->hugepage_activelist);
659         set_page_refcounted(page);
660         h->free_huge_pages--;
661         h->free_huge_pages_node[nid]--;
662         return page;
663 }
664
665 /* Movability of hugepages depends on migration support. */
666 static inline gfp_t htlb_alloc_mask(struct hstate *h)
667 {
668         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
669                 return GFP_HIGHUSER_MOVABLE;
670         else
671                 return GFP_HIGHUSER;
672 }
673
674 static struct page *dequeue_huge_page_vma(struct hstate *h,
675                                 struct vm_area_struct *vma,
676                                 unsigned long address, int avoid_reserve,
677                                 long chg)
678 {
679         struct page *page = NULL;
680         struct mempolicy *mpol;
681         nodemask_t *nodemask;
682         struct zonelist *zonelist;
683         struct zone *zone;
684         struct zoneref *z;
685         unsigned int cpuset_mems_cookie;
686
687         /*
688          * A child process with MAP_PRIVATE mappings created by their parent
689          * have no page reserves. This check ensures that reservations are
690          * not "stolen". The child may still get SIGKILLed
691          */
692         if (!vma_has_reserves(vma, chg) &&
693                         h->free_huge_pages - h->resv_huge_pages == 0)
694                 goto err;
695
696         /* If reserves cannot be used, ensure enough pages are in the pool */
697         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
698                 goto err;
699
700 retry_cpuset:
701         cpuset_mems_cookie = read_mems_allowed_begin();
702         zonelist = huge_zonelist(vma, address,
703                                         htlb_alloc_mask(h), &mpol, &nodemask);
704
705         for_each_zone_zonelist_nodemask(zone, z, zonelist,
706                                                 MAX_NR_ZONES - 1, nodemask) {
707                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
708                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
709                         if (page) {
710                                 if (avoid_reserve)
711                                         break;
712                                 if (!vma_has_reserves(vma, chg))
713                                         break;
714
715                                 SetPagePrivate(page);
716                                 h->resv_huge_pages--;
717                                 break;
718                         }
719                 }
720         }
721
722         mpol_cond_put(mpol);
723         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
724                 goto retry_cpuset;
725         return page;
726
727 err:
728         return NULL;
729 }
730
731 /*
732  * common helper functions for hstate_next_node_to_{alloc|free}.
733  * We may have allocated or freed a huge page based on a different
734  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
735  * be outside of *nodes_allowed.  Ensure that we use an allowed
736  * node for alloc or free.
737  */
738 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
739 {
740         nid = next_node(nid, *nodes_allowed);
741         if (nid == MAX_NUMNODES)
742                 nid = first_node(*nodes_allowed);
743         VM_BUG_ON(nid >= MAX_NUMNODES);
744
745         return nid;
746 }
747
748 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
749 {
750         if (!node_isset(nid, *nodes_allowed))
751                 nid = next_node_allowed(nid, nodes_allowed);
752         return nid;
753 }
754
755 /*
756  * returns the previously saved node ["this node"] from which to
757  * allocate a persistent huge page for the pool and advance the
758  * next node from which to allocate, handling wrap at end of node
759  * mask.
760  */
761 static int hstate_next_node_to_alloc(struct hstate *h,
762                                         nodemask_t *nodes_allowed)
763 {
764         int nid;
765
766         VM_BUG_ON(!nodes_allowed);
767
768         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
769         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
770
771         return nid;
772 }
773
774 /*
775  * helper for free_pool_huge_page() - return the previously saved
776  * node ["this node"] from which to free a huge page.  Advance the
777  * next node id whether or not we find a free huge page to free so
778  * that the next attempt to free addresses the next node.
779  */
780 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
781 {
782         int nid;
783
784         VM_BUG_ON(!nodes_allowed);
785
786         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
787         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
788
789         return nid;
790 }
791
792 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
793         for (nr_nodes = nodes_weight(*mask);                            \
794                 nr_nodes > 0 &&                                         \
795                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
796                 nr_nodes--)
797
798 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
799         for (nr_nodes = nodes_weight(*mask);                            \
800                 nr_nodes > 0 &&                                         \
801                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
802                 nr_nodes--)
803
804 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
805 static void destroy_compound_gigantic_page(struct page *page,
806                                         unsigned long order)
807 {
808         int i;
809         int nr_pages = 1 << order;
810         struct page *p = page + 1;
811
812         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
813                 __ClearPageTail(p);
814                 set_page_refcounted(p);
815                 p->first_page = NULL;
816         }
817
818         set_compound_order(page, 0);
819         __ClearPageHead(page);
820 }
821
822 static void free_gigantic_page(struct page *page, unsigned order)
823 {
824         free_contig_range(page_to_pfn(page), 1 << order);
825 }
826
827 static int __alloc_gigantic_page(unsigned long start_pfn,
828                                 unsigned long nr_pages)
829 {
830         unsigned long end_pfn = start_pfn + nr_pages;
831         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
832 }
833
834 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
835                                 unsigned long nr_pages)
836 {
837         unsigned long i, end_pfn = start_pfn + nr_pages;
838         struct page *page;
839
840         for (i = start_pfn; i < end_pfn; i++) {
841                 if (!pfn_valid(i))
842                         return false;
843
844                 page = pfn_to_page(i);
845
846                 if (PageReserved(page))
847                         return false;
848
849                 if (page_count(page) > 0)
850                         return false;
851
852                 if (PageHuge(page))
853                         return false;
854         }
855
856         return true;
857 }
858
859 static bool zone_spans_last_pfn(const struct zone *zone,
860                         unsigned long start_pfn, unsigned long nr_pages)
861 {
862         unsigned long last_pfn = start_pfn + nr_pages - 1;
863         return zone_spans_pfn(zone, last_pfn);
864 }
865
866 static struct page *alloc_gigantic_page(int nid, unsigned order)
867 {
868         unsigned long nr_pages = 1 << order;
869         unsigned long ret, pfn, flags;
870         struct zone *z;
871
872         z = NODE_DATA(nid)->node_zones;
873         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
874                 spin_lock_irqsave(&z->lock, flags);
875
876                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
877                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
878                         if (pfn_range_valid_gigantic(pfn, nr_pages)) {
879                                 /*
880                                  * We release the zone lock here because
881                                  * alloc_contig_range() will also lock the zone
882                                  * at some point. If there's an allocation
883                                  * spinning on this lock, it may win the race
884                                  * and cause alloc_contig_range() to fail...
885                                  */
886                                 spin_unlock_irqrestore(&z->lock, flags);
887                                 ret = __alloc_gigantic_page(pfn, nr_pages);
888                                 if (!ret)
889                                         return pfn_to_page(pfn);
890                                 spin_lock_irqsave(&z->lock, flags);
891                         }
892                         pfn += nr_pages;
893                 }
894
895                 spin_unlock_irqrestore(&z->lock, flags);
896         }
897
898         return NULL;
899 }
900
901 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
902 static void prep_compound_gigantic_page(struct page *page, unsigned long order);
903
904 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
905 {
906         struct page *page;
907
908         page = alloc_gigantic_page(nid, huge_page_order(h));
909         if (page) {
910                 prep_compound_gigantic_page(page, huge_page_order(h));
911                 prep_new_huge_page(h, page, nid);
912         }
913
914         return page;
915 }
916
917 static int alloc_fresh_gigantic_page(struct hstate *h,
918                                 nodemask_t *nodes_allowed)
919 {
920         struct page *page = NULL;
921         int nr_nodes, node;
922
923         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
924                 page = alloc_fresh_gigantic_page_node(h, node);
925                 if (page)
926                         return 1;
927         }
928
929         return 0;
930 }
931
932 static inline bool gigantic_page_supported(void) { return true; }
933 #else
934 static inline bool gigantic_page_supported(void) { return false; }
935 static inline void free_gigantic_page(struct page *page, unsigned order) { }
936 static inline void destroy_compound_gigantic_page(struct page *page,
937                                                 unsigned long order) { }
938 static inline int alloc_fresh_gigantic_page(struct hstate *h,
939                                         nodemask_t *nodes_allowed) { return 0; }
940 #endif
941
942 static void update_and_free_page(struct hstate *h, struct page *page)
943 {
944         int i;
945
946         if (hstate_is_gigantic(h) && !gigantic_page_supported())
947                 return;
948
949         h->nr_huge_pages--;
950         h->nr_huge_pages_node[page_to_nid(page)]--;
951         for (i = 0; i < pages_per_huge_page(h); i++) {
952                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
953                                 1 << PG_referenced | 1 << PG_dirty |
954                                 1 << PG_active | 1 << PG_private |
955                                 1 << PG_writeback);
956         }
957         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
958         set_compound_page_dtor(page, NULL);
959         set_page_refcounted(page);
960         if (hstate_is_gigantic(h)) {
961                 destroy_compound_gigantic_page(page, huge_page_order(h));
962                 free_gigantic_page(page, huge_page_order(h));
963         } else {
964                 arch_release_hugepage(page);
965                 __free_pages(page, huge_page_order(h));
966         }
967 }
968
969 struct hstate *size_to_hstate(unsigned long size)
970 {
971         struct hstate *h;
972
973         for_each_hstate(h) {
974                 if (huge_page_size(h) == size)
975                         return h;
976         }
977         return NULL;
978 }
979
980 /*
981  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
982  * to hstate->hugepage_activelist.)
983  *
984  * This function can be called for tail pages, but never returns true for them.
985  */
986 bool page_huge_active(struct page *page)
987 {
988         VM_BUG_ON_PAGE(!PageHuge(page), page);
989         return PageHead(page) && PagePrivate(&page[1]);
990 }
991
992 /* never called for tail page */
993 static void set_page_huge_active(struct page *page)
994 {
995         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
996         SetPagePrivate(&page[1]);
997 }
998
999 static void clear_page_huge_active(struct page *page)
1000 {
1001         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1002         ClearPagePrivate(&page[1]);
1003 }
1004
1005 void free_huge_page(struct page *page)
1006 {
1007         /*
1008          * Can't pass hstate in here because it is called from the
1009          * compound page destructor.
1010          */
1011         struct hstate *h = page_hstate(page);
1012         int nid = page_to_nid(page);
1013         struct hugepage_subpool *spool =
1014                 (struct hugepage_subpool *)page_private(page);
1015         bool restore_reserve;
1016
1017         set_page_private(page, 0);
1018         page->mapping = NULL;
1019         BUG_ON(page_count(page));
1020         BUG_ON(page_mapcount(page));
1021         restore_reserve = PagePrivate(page);
1022         ClearPagePrivate(page);
1023
1024         /*
1025          * A return code of zero implies that the subpool will be under its
1026          * minimum size if the reservation is not restored after page is free.
1027          * Therefore, force restore_reserve operation.
1028          */
1029         if (hugepage_subpool_put_pages(spool, 1) == 0)
1030                 restore_reserve = true;
1031
1032         spin_lock(&hugetlb_lock);
1033         clear_page_huge_active(page);
1034         hugetlb_cgroup_uncharge_page(hstate_index(h),
1035                                      pages_per_huge_page(h), page);
1036         if (restore_reserve)
1037                 h->resv_huge_pages++;
1038
1039         if (h->surplus_huge_pages_node[nid]) {
1040                 /* remove the page from active list */
1041                 list_del(&page->lru);
1042                 update_and_free_page(h, page);
1043                 h->surplus_huge_pages--;
1044                 h->surplus_huge_pages_node[nid]--;
1045         } else {
1046                 arch_clear_hugepage_flags(page);
1047                 enqueue_huge_page(h, page);
1048         }
1049         spin_unlock(&hugetlb_lock);
1050 }
1051
1052 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1053 {
1054         INIT_LIST_HEAD(&page->lru);
1055         set_compound_page_dtor(page, free_huge_page);
1056         spin_lock(&hugetlb_lock);
1057         set_hugetlb_cgroup(page, NULL);
1058         h->nr_huge_pages++;
1059         h->nr_huge_pages_node[nid]++;
1060         spin_unlock(&hugetlb_lock);
1061         put_page(page); /* free it into the hugepage allocator */
1062 }
1063
1064 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
1065 {
1066         int i;
1067         int nr_pages = 1 << order;
1068         struct page *p = page + 1;
1069
1070         /* we rely on prep_new_huge_page to set the destructor */
1071         set_compound_order(page, order);
1072         __SetPageHead(page);
1073         __ClearPageReserved(page);
1074         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1075                 /*
1076                  * For gigantic hugepages allocated through bootmem at
1077                  * boot, it's safer to be consistent with the not-gigantic
1078                  * hugepages and clear the PG_reserved bit from all tail pages
1079                  * too.  Otherwse drivers using get_user_pages() to access tail
1080                  * pages may get the reference counting wrong if they see
1081                  * PG_reserved set on a tail page (despite the head page not
1082                  * having PG_reserved set).  Enforcing this consistency between
1083                  * head and tail pages allows drivers to optimize away a check
1084                  * on the head page when they need know if put_page() is needed
1085                  * after get_user_pages().
1086                  */
1087                 __ClearPageReserved(p);
1088                 set_page_count(p, 0);
1089                 p->first_page = page;
1090                 /* Make sure p->first_page is always valid for PageTail() */
1091                 smp_wmb();
1092                 __SetPageTail(p);
1093         }
1094 }
1095
1096 /*
1097  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1098  * transparent huge pages.  See the PageTransHuge() documentation for more
1099  * details.
1100  */
1101 int PageHuge(struct page *page)
1102 {
1103         if (!PageCompound(page))
1104                 return 0;
1105
1106         page = compound_head(page);
1107         return get_compound_page_dtor(page) == free_huge_page;
1108 }
1109 EXPORT_SYMBOL_GPL(PageHuge);
1110
1111 /*
1112  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1113  * normal or transparent huge pages.
1114  */
1115 int PageHeadHuge(struct page *page_head)
1116 {
1117         if (!PageHead(page_head))
1118                 return 0;
1119
1120         return get_compound_page_dtor(page_head) == free_huge_page;
1121 }
1122
1123 pgoff_t __basepage_index(struct page *page)
1124 {
1125         struct page *page_head = compound_head(page);
1126         pgoff_t index = page_index(page_head);
1127         unsigned long compound_idx;
1128
1129         if (!PageHuge(page_head))
1130                 return page_index(page);
1131
1132         if (compound_order(page_head) >= MAX_ORDER)
1133                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1134         else
1135                 compound_idx = page - page_head;
1136
1137         return (index << compound_order(page_head)) + compound_idx;
1138 }
1139
1140 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1141 {
1142         struct page *page;
1143
1144         page = alloc_pages_exact_node(nid,
1145                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1146                                                 __GFP_REPEAT|__GFP_NOWARN,
1147                 huge_page_order(h));
1148         if (page) {
1149                 if (arch_prepare_hugepage(page)) {
1150                         __free_pages(page, huge_page_order(h));
1151                         return NULL;
1152                 }
1153                 prep_new_huge_page(h, page, nid);
1154         }
1155
1156         return page;
1157 }
1158
1159 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1160 {
1161         struct page *page;
1162         int nr_nodes, node;
1163         int ret = 0;
1164
1165         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1166                 page = alloc_fresh_huge_page_node(h, node);
1167                 if (page) {
1168                         ret = 1;
1169                         break;
1170                 }
1171         }
1172
1173         if (ret)
1174                 count_vm_event(HTLB_BUDDY_PGALLOC);
1175         else
1176                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1177
1178         return ret;
1179 }
1180
1181 /*
1182  * Free huge page from pool from next node to free.
1183  * Attempt to keep persistent huge pages more or less
1184  * balanced over allowed nodes.
1185  * Called with hugetlb_lock locked.
1186  */
1187 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1188                                                          bool acct_surplus)
1189 {
1190         int nr_nodes, node;
1191         int ret = 0;
1192
1193         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1194                 /*
1195                  * If we're returning unused surplus pages, only examine
1196                  * nodes with surplus pages.
1197                  */
1198                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1199                     !list_empty(&h->hugepage_freelists[node])) {
1200                         struct page *page =
1201                                 list_entry(h->hugepage_freelists[node].next,
1202                                           struct page, lru);
1203                         list_del(&page->lru);
1204                         h->free_huge_pages--;
1205                         h->free_huge_pages_node[node]--;
1206                         if (acct_surplus) {
1207                                 h->surplus_huge_pages--;
1208                                 h->surplus_huge_pages_node[node]--;
1209                         }
1210                         update_and_free_page(h, page);
1211                         ret = 1;
1212                         break;
1213                 }
1214         }
1215
1216         return ret;
1217 }
1218
1219 /*
1220  * Dissolve a given free hugepage into free buddy pages. This function does
1221  * nothing for in-use (including surplus) hugepages.
1222  */
1223 static void dissolve_free_huge_page(struct page *page)
1224 {
1225         spin_lock(&hugetlb_lock);
1226         if (PageHuge(page) && !page_count(page)) {
1227                 struct hstate *h = page_hstate(page);
1228                 int nid = page_to_nid(page);
1229                 list_del(&page->lru);
1230                 h->free_huge_pages--;
1231                 h->free_huge_pages_node[nid]--;
1232                 update_and_free_page(h, page);
1233         }
1234         spin_unlock(&hugetlb_lock);
1235 }
1236
1237 /*
1238  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1239  * make specified memory blocks removable from the system.
1240  * Note that start_pfn should aligned with (minimum) hugepage size.
1241  */
1242 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1243 {
1244         unsigned long pfn;
1245
1246         if (!hugepages_supported())
1247                 return;
1248
1249         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1250         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1251                 dissolve_free_huge_page(pfn_to_page(pfn));
1252 }
1253
1254 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1255 {
1256         struct page *page;
1257         unsigned int r_nid;
1258
1259         if (hstate_is_gigantic(h))
1260                 return NULL;
1261
1262         /*
1263          * Assume we will successfully allocate the surplus page to
1264          * prevent racing processes from causing the surplus to exceed
1265          * overcommit
1266          *
1267          * This however introduces a different race, where a process B
1268          * tries to grow the static hugepage pool while alloc_pages() is
1269          * called by process A. B will only examine the per-node
1270          * counters in determining if surplus huge pages can be
1271          * converted to normal huge pages in adjust_pool_surplus(). A
1272          * won't be able to increment the per-node counter, until the
1273          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1274          * no more huge pages can be converted from surplus to normal
1275          * state (and doesn't try to convert again). Thus, we have a
1276          * case where a surplus huge page exists, the pool is grown, and
1277          * the surplus huge page still exists after, even though it
1278          * should just have been converted to a normal huge page. This
1279          * does not leak memory, though, as the hugepage will be freed
1280          * once it is out of use. It also does not allow the counters to
1281          * go out of whack in adjust_pool_surplus() as we don't modify
1282          * the node values until we've gotten the hugepage and only the
1283          * per-node value is checked there.
1284          */
1285         spin_lock(&hugetlb_lock);
1286         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1287                 spin_unlock(&hugetlb_lock);
1288                 return NULL;
1289         } else {
1290                 h->nr_huge_pages++;
1291                 h->surplus_huge_pages++;
1292         }
1293         spin_unlock(&hugetlb_lock);
1294
1295         if (nid == NUMA_NO_NODE)
1296                 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1297                                    __GFP_REPEAT|__GFP_NOWARN,
1298                                    huge_page_order(h));
1299         else
1300                 page = alloc_pages_exact_node(nid,
1301                         htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1302                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1303
1304         if (page && arch_prepare_hugepage(page)) {
1305                 __free_pages(page, huge_page_order(h));
1306                 page = NULL;
1307         }
1308
1309         spin_lock(&hugetlb_lock);
1310         if (page) {
1311                 INIT_LIST_HEAD(&page->lru);
1312                 r_nid = page_to_nid(page);
1313                 set_compound_page_dtor(page, free_huge_page);
1314                 set_hugetlb_cgroup(page, NULL);
1315                 /*
1316                  * We incremented the global counters already
1317                  */
1318                 h->nr_huge_pages_node[r_nid]++;
1319                 h->surplus_huge_pages_node[r_nid]++;
1320                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1321         } else {
1322                 h->nr_huge_pages--;
1323                 h->surplus_huge_pages--;
1324                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1325         }
1326         spin_unlock(&hugetlb_lock);
1327
1328         return page;
1329 }
1330
1331 /*
1332  * This allocation function is useful in the context where vma is irrelevant.
1333  * E.g. soft-offlining uses this function because it only cares physical
1334  * address of error page.
1335  */
1336 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1337 {
1338         struct page *page = NULL;
1339
1340         spin_lock(&hugetlb_lock);
1341         if (h->free_huge_pages - h->resv_huge_pages > 0)
1342                 page = dequeue_huge_page_node(h, nid);
1343         spin_unlock(&hugetlb_lock);
1344
1345         if (!page)
1346                 page = alloc_buddy_huge_page(h, nid);
1347
1348         return page;
1349 }
1350
1351 /*
1352  * Increase the hugetlb pool such that it can accommodate a reservation
1353  * of size 'delta'.
1354  */
1355 static int gather_surplus_pages(struct hstate *h, int delta)
1356 {
1357         struct list_head surplus_list;
1358         struct page *page, *tmp;
1359         int ret, i;
1360         int needed, allocated;
1361         bool alloc_ok = true;
1362
1363         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1364         if (needed <= 0) {
1365                 h->resv_huge_pages += delta;
1366                 return 0;
1367         }
1368
1369         allocated = 0;
1370         INIT_LIST_HEAD(&surplus_list);
1371
1372         ret = -ENOMEM;
1373 retry:
1374         spin_unlock(&hugetlb_lock);
1375         for (i = 0; i < needed; i++) {
1376                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1377                 if (!page) {
1378                         alloc_ok = false;
1379                         break;
1380                 }
1381                 list_add(&page->lru, &surplus_list);
1382         }
1383         allocated += i;
1384
1385         /*
1386          * After retaking hugetlb_lock, we need to recalculate 'needed'
1387          * because either resv_huge_pages or free_huge_pages may have changed.
1388          */
1389         spin_lock(&hugetlb_lock);
1390         needed = (h->resv_huge_pages + delta) -
1391                         (h->free_huge_pages + allocated);
1392         if (needed > 0) {
1393                 if (alloc_ok)
1394                         goto retry;
1395                 /*
1396                  * We were not able to allocate enough pages to
1397                  * satisfy the entire reservation so we free what
1398                  * we've allocated so far.
1399                  */
1400                 goto free;
1401         }
1402         /*
1403          * The surplus_list now contains _at_least_ the number of extra pages
1404          * needed to accommodate the reservation.  Add the appropriate number
1405          * of pages to the hugetlb pool and free the extras back to the buddy
1406          * allocator.  Commit the entire reservation here to prevent another
1407          * process from stealing the pages as they are added to the pool but
1408          * before they are reserved.
1409          */
1410         needed += allocated;
1411         h->resv_huge_pages += delta;
1412         ret = 0;
1413
1414         /* Free the needed pages to the hugetlb pool */
1415         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1416                 if ((--needed) < 0)
1417                         break;
1418                 /*
1419                  * This page is now managed by the hugetlb allocator and has
1420                  * no users -- drop the buddy allocator's reference.
1421                  */
1422                 put_page_testzero(page);
1423                 VM_BUG_ON_PAGE(page_count(page), page);
1424                 enqueue_huge_page(h, page);
1425         }
1426 free:
1427         spin_unlock(&hugetlb_lock);
1428
1429         /* Free unnecessary surplus pages to the buddy allocator */
1430         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1431                 put_page(page);
1432         spin_lock(&hugetlb_lock);
1433
1434         return ret;
1435 }
1436
1437 /*
1438  * When releasing a hugetlb pool reservation, any surplus pages that were
1439  * allocated to satisfy the reservation must be explicitly freed if they were
1440  * never used.
1441  * Called with hugetlb_lock held.
1442  */
1443 static void return_unused_surplus_pages(struct hstate *h,
1444                                         unsigned long unused_resv_pages)
1445 {
1446         unsigned long nr_pages;
1447
1448         /* Uncommit the reservation */
1449         h->resv_huge_pages -= unused_resv_pages;
1450
1451         /* Cannot return gigantic pages currently */
1452         if (hstate_is_gigantic(h))
1453                 return;
1454
1455         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1456
1457         /*
1458          * We want to release as many surplus pages as possible, spread
1459          * evenly across all nodes with memory. Iterate across these nodes
1460          * until we can no longer free unreserved surplus pages. This occurs
1461          * when the nodes with surplus pages have no free pages.
1462          * free_pool_huge_page() will balance the the freed pages across the
1463          * on-line nodes with memory and will handle the hstate accounting.
1464          */
1465         while (nr_pages--) {
1466                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1467                         break;
1468                 cond_resched_lock(&hugetlb_lock);
1469         }
1470 }
1471
1472 /*
1473  * Determine if the huge page at addr within the vma has an associated
1474  * reservation.  Where it does not we will need to logically increase
1475  * reservation and actually increase subpool usage before an allocation
1476  * can occur.  Where any new reservation would be required the
1477  * reservation change is prepared, but not committed.  Once the page
1478  * has been allocated from the subpool and instantiated the change should
1479  * be committed via vma_commit_reservation.  No action is required on
1480  * failure.
1481  */
1482 static long vma_needs_reservation(struct hstate *h,
1483                         struct vm_area_struct *vma, unsigned long addr)
1484 {
1485         struct resv_map *resv;
1486         pgoff_t idx;
1487         long chg;
1488
1489         resv = vma_resv_map(vma);
1490         if (!resv)
1491                 return 1;
1492
1493         idx = vma_hugecache_offset(h, vma, addr);
1494         chg = region_chg(resv, idx, idx + 1);
1495
1496         if (vma->vm_flags & VM_MAYSHARE)
1497                 return chg;
1498         else
1499                 return chg < 0 ? chg : 0;
1500 }
1501 static void vma_commit_reservation(struct hstate *h,
1502                         struct vm_area_struct *vma, unsigned long addr)
1503 {
1504         struct resv_map *resv;
1505         pgoff_t idx;
1506
1507         resv = vma_resv_map(vma);
1508         if (!resv)
1509                 return;
1510
1511         idx = vma_hugecache_offset(h, vma, addr);
1512         region_add(resv, idx, idx + 1);
1513 }
1514
1515 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1516                                     unsigned long addr, int avoid_reserve)
1517 {
1518         struct hugepage_subpool *spool = subpool_vma(vma);
1519         struct hstate *h = hstate_vma(vma);
1520         struct page *page;
1521         long chg;
1522         int ret, idx;
1523         struct hugetlb_cgroup *h_cg;
1524
1525         idx = hstate_index(h);
1526         /*
1527          * Processes that did not create the mapping will have no
1528          * reserves and will not have accounted against subpool
1529          * limit. Check that the subpool limit can be made before
1530          * satisfying the allocation MAP_NORESERVE mappings may also
1531          * need pages and subpool limit allocated allocated if no reserve
1532          * mapping overlaps.
1533          */
1534         chg = vma_needs_reservation(h, vma, addr);
1535         if (chg < 0)
1536                 return ERR_PTR(-ENOMEM);
1537         if (chg || avoid_reserve)
1538                 if (hugepage_subpool_get_pages(spool, 1) < 0)
1539                         return ERR_PTR(-ENOSPC);
1540
1541         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1542         if (ret)
1543                 goto out_subpool_put;
1544
1545         spin_lock(&hugetlb_lock);
1546         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1547         if (!page) {
1548                 spin_unlock(&hugetlb_lock);
1549                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1550                 if (!page)
1551                         goto out_uncharge_cgroup;
1552
1553                 spin_lock(&hugetlb_lock);
1554                 list_move(&page->lru, &h->hugepage_activelist);
1555                 /* Fall through */
1556         }
1557         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1558         spin_unlock(&hugetlb_lock);
1559
1560         set_page_private(page, (unsigned long)spool);
1561
1562         vma_commit_reservation(h, vma, addr);
1563         return page;
1564
1565 out_uncharge_cgroup:
1566         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1567 out_subpool_put:
1568         if (chg || avoid_reserve)
1569                 hugepage_subpool_put_pages(spool, 1);
1570         return ERR_PTR(-ENOSPC);
1571 }
1572
1573 /*
1574  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1575  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1576  * where no ERR_VALUE is expected to be returned.
1577  */
1578 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1579                                 unsigned long addr, int avoid_reserve)
1580 {
1581         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1582         if (IS_ERR(page))
1583                 page = NULL;
1584         return page;
1585 }
1586
1587 int __weak alloc_bootmem_huge_page(struct hstate *h)
1588 {
1589         struct huge_bootmem_page *m;
1590         int nr_nodes, node;
1591
1592         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1593                 void *addr;
1594
1595                 addr = memblock_virt_alloc_try_nid_nopanic(
1596                                 huge_page_size(h), huge_page_size(h),
1597                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1598                 if (addr) {
1599                         /*
1600                          * Use the beginning of the huge page to store the
1601                          * huge_bootmem_page struct (until gather_bootmem
1602                          * puts them into the mem_map).
1603                          */
1604                         m = addr;
1605                         goto found;
1606                 }
1607         }
1608         return 0;
1609
1610 found:
1611         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1612         /* Put them into a private list first because mem_map is not up yet */
1613         list_add(&m->list, &huge_boot_pages);
1614         m->hstate = h;
1615         return 1;
1616 }
1617
1618 static void __init prep_compound_huge_page(struct page *page, int order)
1619 {
1620         if (unlikely(order > (MAX_ORDER - 1)))
1621                 prep_compound_gigantic_page(page, order);
1622         else
1623                 prep_compound_page(page, order);
1624 }
1625
1626 /* Put bootmem huge pages into the standard lists after mem_map is up */
1627 static void __init gather_bootmem_prealloc(void)
1628 {
1629         struct huge_bootmem_page *m;
1630
1631         list_for_each_entry(m, &huge_boot_pages, list) {
1632                 struct hstate *h = m->hstate;
1633                 struct page *page;
1634
1635 #ifdef CONFIG_HIGHMEM
1636                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1637                 memblock_free_late(__pa(m),
1638                                    sizeof(struct huge_bootmem_page));
1639 #else
1640                 page = virt_to_page(m);
1641 #endif
1642                 WARN_ON(page_count(page) != 1);
1643                 prep_compound_huge_page(page, h->order);
1644                 WARN_ON(PageReserved(page));
1645                 prep_new_huge_page(h, page, page_to_nid(page));
1646                 /*
1647                  * If we had gigantic hugepages allocated at boot time, we need
1648                  * to restore the 'stolen' pages to totalram_pages in order to
1649                  * fix confusing memory reports from free(1) and another
1650                  * side-effects, like CommitLimit going negative.
1651                  */
1652                 if (hstate_is_gigantic(h))
1653                         adjust_managed_page_count(page, 1 << h->order);
1654         }
1655 }
1656
1657 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1658 {
1659         unsigned long i;
1660
1661         for (i = 0; i < h->max_huge_pages; ++i) {
1662                 if (hstate_is_gigantic(h)) {
1663                         if (!alloc_bootmem_huge_page(h))
1664                                 break;
1665                 } else if (!alloc_fresh_huge_page(h,
1666                                          &node_states[N_MEMORY]))
1667                         break;
1668         }
1669         h->max_huge_pages = i;
1670 }
1671
1672 static void __init hugetlb_init_hstates(void)
1673 {
1674         struct hstate *h;
1675
1676         for_each_hstate(h) {
1677                 if (minimum_order > huge_page_order(h))
1678                         minimum_order = huge_page_order(h);
1679
1680                 /* oversize hugepages were init'ed in early boot */
1681                 if (!hstate_is_gigantic(h))
1682                         hugetlb_hstate_alloc_pages(h);
1683         }
1684         VM_BUG_ON(minimum_order == UINT_MAX);
1685 }
1686
1687 static char * __init memfmt(char *buf, unsigned long n)
1688 {
1689         if (n >= (1UL << 30))
1690                 sprintf(buf, "%lu GB", n >> 30);
1691         else if (n >= (1UL << 20))
1692                 sprintf(buf, "%lu MB", n >> 20);
1693         else
1694                 sprintf(buf, "%lu KB", n >> 10);
1695         return buf;
1696 }
1697
1698 static void __init report_hugepages(void)
1699 {
1700         struct hstate *h;
1701
1702         for_each_hstate(h) {
1703                 char buf[32];
1704                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1705                         memfmt(buf, huge_page_size(h)),
1706                         h->free_huge_pages);
1707         }
1708 }
1709
1710 #ifdef CONFIG_HIGHMEM
1711 static void try_to_free_low(struct hstate *h, unsigned long count,
1712                                                 nodemask_t *nodes_allowed)
1713 {
1714         int i;
1715
1716         if (hstate_is_gigantic(h))
1717                 return;
1718
1719         for_each_node_mask(i, *nodes_allowed) {
1720                 struct page *page, *next;
1721                 struct list_head *freel = &h->hugepage_freelists[i];
1722                 list_for_each_entry_safe(page, next, freel, lru) {
1723                         if (count >= h->nr_huge_pages)
1724                                 return;
1725                         if (PageHighMem(page))
1726                                 continue;
1727                         list_del(&page->lru);
1728                         update_and_free_page(h, page);
1729                         h->free_huge_pages--;
1730                         h->free_huge_pages_node[page_to_nid(page)]--;
1731                 }
1732         }
1733 }
1734 #else
1735 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1736                                                 nodemask_t *nodes_allowed)
1737 {
1738 }
1739 #endif
1740
1741 /*
1742  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1743  * balanced by operating on them in a round-robin fashion.
1744  * Returns 1 if an adjustment was made.
1745  */
1746 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1747                                 int delta)
1748 {
1749         int nr_nodes, node;
1750
1751         VM_BUG_ON(delta != -1 && delta != 1);
1752
1753         if (delta < 0) {
1754                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1755                         if (h->surplus_huge_pages_node[node])
1756                                 goto found;
1757                 }
1758         } else {
1759                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1760                         if (h->surplus_huge_pages_node[node] <
1761                                         h->nr_huge_pages_node[node])
1762                                 goto found;
1763                 }
1764         }
1765         return 0;
1766
1767 found:
1768         h->surplus_huge_pages += delta;
1769         h->surplus_huge_pages_node[node] += delta;
1770         return 1;
1771 }
1772
1773 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1774 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1775                                                 nodemask_t *nodes_allowed)
1776 {
1777         unsigned long min_count, ret;
1778
1779         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1780                 return h->max_huge_pages;
1781
1782         /*
1783          * Increase the pool size
1784          * First take pages out of surplus state.  Then make up the
1785          * remaining difference by allocating fresh huge pages.
1786          *
1787          * We might race with alloc_buddy_huge_page() here and be unable
1788          * to convert a surplus huge page to a normal huge page. That is
1789          * not critical, though, it just means the overall size of the
1790          * pool might be one hugepage larger than it needs to be, but
1791          * within all the constraints specified by the sysctls.
1792          */
1793         spin_lock(&hugetlb_lock);
1794         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1795                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1796                         break;
1797         }
1798
1799         while (count > persistent_huge_pages(h)) {
1800                 /*
1801                  * If this allocation races such that we no longer need the
1802                  * page, free_huge_page will handle it by freeing the page
1803                  * and reducing the surplus.
1804                  */
1805                 spin_unlock(&hugetlb_lock);
1806                 if (hstate_is_gigantic(h))
1807                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1808                 else
1809                         ret = alloc_fresh_huge_page(h, nodes_allowed);
1810                 spin_lock(&hugetlb_lock);
1811                 if (!ret)
1812                         goto out;
1813
1814                 /* Bail for signals. Probably ctrl-c from user */
1815                 if (signal_pending(current))
1816                         goto out;
1817         }
1818
1819         /*
1820          * Decrease the pool size
1821          * First return free pages to the buddy allocator (being careful
1822          * to keep enough around to satisfy reservations).  Then place
1823          * pages into surplus state as needed so the pool will shrink
1824          * to the desired size as pages become free.
1825          *
1826          * By placing pages into the surplus state independent of the
1827          * overcommit value, we are allowing the surplus pool size to
1828          * exceed overcommit. There are few sane options here. Since
1829          * alloc_buddy_huge_page() is checking the global counter,
1830          * though, we'll note that we're not allowed to exceed surplus
1831          * and won't grow the pool anywhere else. Not until one of the
1832          * sysctls are changed, or the surplus pages go out of use.
1833          */
1834         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1835         min_count = max(count, min_count);
1836         try_to_free_low(h, min_count, nodes_allowed);
1837         while (min_count < persistent_huge_pages(h)) {
1838                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1839                         break;
1840                 cond_resched_lock(&hugetlb_lock);
1841         }
1842         while (count < persistent_huge_pages(h)) {
1843                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1844                         break;
1845         }
1846 out:
1847         ret = persistent_huge_pages(h);
1848         spin_unlock(&hugetlb_lock);
1849         return ret;
1850 }
1851
1852 #define HSTATE_ATTR_RO(_name) \
1853         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1854
1855 #define HSTATE_ATTR(_name) \
1856         static struct kobj_attribute _name##_attr = \
1857                 __ATTR(_name, 0644, _name##_show, _name##_store)
1858
1859 static struct kobject *hugepages_kobj;
1860 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1861
1862 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1863
1864 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1865 {
1866         int i;
1867
1868         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1869                 if (hstate_kobjs[i] == kobj) {
1870                         if (nidp)
1871                                 *nidp = NUMA_NO_NODE;
1872                         return &hstates[i];
1873                 }
1874
1875         return kobj_to_node_hstate(kobj, nidp);
1876 }
1877
1878 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1879                                         struct kobj_attribute *attr, char *buf)
1880 {
1881         struct hstate *h;
1882         unsigned long nr_huge_pages;
1883         int nid;
1884
1885         h = kobj_to_hstate(kobj, &nid);
1886         if (nid == NUMA_NO_NODE)
1887                 nr_huge_pages = h->nr_huge_pages;
1888         else
1889                 nr_huge_pages = h->nr_huge_pages_node[nid];
1890
1891         return sprintf(buf, "%lu\n", nr_huge_pages);
1892 }
1893
1894 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1895                                            struct hstate *h, int nid,
1896                                            unsigned long count, size_t len)
1897 {
1898         int err;
1899         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1900
1901         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
1902                 err = -EINVAL;
1903                 goto out;
1904         }
1905
1906         if (nid == NUMA_NO_NODE) {
1907                 /*
1908                  * global hstate attribute
1909                  */
1910                 if (!(obey_mempolicy &&
1911                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1912                         NODEMASK_FREE(nodes_allowed);
1913                         nodes_allowed = &node_states[N_MEMORY];
1914                 }
1915         } else if (nodes_allowed) {
1916                 /*
1917                  * per node hstate attribute: adjust count to global,
1918                  * but restrict alloc/free to the specified node.
1919                  */
1920                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1921                 init_nodemask_of_node(nodes_allowed, nid);
1922         } else
1923                 nodes_allowed = &node_states[N_MEMORY];
1924
1925         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1926
1927         if (nodes_allowed != &node_states[N_MEMORY])
1928                 NODEMASK_FREE(nodes_allowed);
1929
1930         return len;
1931 out:
1932         NODEMASK_FREE(nodes_allowed);
1933         return err;
1934 }
1935
1936 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1937                                          struct kobject *kobj, const char *buf,
1938                                          size_t len)
1939 {
1940         struct hstate *h;
1941         unsigned long count;
1942         int nid;
1943         int err;
1944
1945         err = kstrtoul(buf, 10, &count);
1946         if (err)
1947                 return err;
1948
1949         h = kobj_to_hstate(kobj, &nid);
1950         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1951 }
1952
1953 static ssize_t nr_hugepages_show(struct kobject *kobj,
1954                                        struct kobj_attribute *attr, char *buf)
1955 {
1956         return nr_hugepages_show_common(kobj, attr, buf);
1957 }
1958
1959 static ssize_t nr_hugepages_store(struct kobject *kobj,
1960                struct kobj_attribute *attr, const char *buf, size_t len)
1961 {
1962         return nr_hugepages_store_common(false, kobj, buf, len);
1963 }
1964 HSTATE_ATTR(nr_hugepages);
1965
1966 #ifdef CONFIG_NUMA
1967
1968 /*
1969  * hstate attribute for optionally mempolicy-based constraint on persistent
1970  * huge page alloc/free.
1971  */
1972 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1973                                        struct kobj_attribute *attr, char *buf)
1974 {
1975         return nr_hugepages_show_common(kobj, attr, buf);
1976 }
1977
1978 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1979                struct kobj_attribute *attr, const char *buf, size_t len)
1980 {
1981         return nr_hugepages_store_common(true, kobj, buf, len);
1982 }
1983 HSTATE_ATTR(nr_hugepages_mempolicy);
1984 #endif
1985
1986
1987 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1988                                         struct kobj_attribute *attr, char *buf)
1989 {
1990         struct hstate *h = kobj_to_hstate(kobj, NULL);
1991         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1992 }
1993
1994 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1995                 struct kobj_attribute *attr, const char *buf, size_t count)
1996 {
1997         int err;
1998         unsigned long input;
1999         struct hstate *h = kobj_to_hstate(kobj, NULL);
2000
2001         if (hstate_is_gigantic(h))
2002                 return -EINVAL;
2003
2004         err = kstrtoul(buf, 10, &input);
2005         if (err)
2006                 return err;
2007
2008         spin_lock(&hugetlb_lock);
2009         h->nr_overcommit_huge_pages = input;
2010         spin_unlock(&hugetlb_lock);
2011
2012         return count;
2013 }
2014 HSTATE_ATTR(nr_overcommit_hugepages);
2015
2016 static ssize_t free_hugepages_show(struct kobject *kobj,
2017                                         struct kobj_attribute *attr, char *buf)
2018 {
2019         struct hstate *h;
2020         unsigned long free_huge_pages;
2021         int nid;
2022
2023         h = kobj_to_hstate(kobj, &nid);
2024         if (nid == NUMA_NO_NODE)
2025                 free_huge_pages = h->free_huge_pages;
2026         else
2027                 free_huge_pages = h->free_huge_pages_node[nid];
2028
2029         return sprintf(buf, "%lu\n", free_huge_pages);
2030 }
2031 HSTATE_ATTR_RO(free_hugepages);
2032
2033 static ssize_t resv_hugepages_show(struct kobject *kobj,
2034                                         struct kobj_attribute *attr, char *buf)
2035 {
2036         struct hstate *h = kobj_to_hstate(kobj, NULL);
2037         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2038 }
2039 HSTATE_ATTR_RO(resv_hugepages);
2040
2041 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2042                                         struct kobj_attribute *attr, char *buf)
2043 {
2044         struct hstate *h;
2045         unsigned long surplus_huge_pages;
2046         int nid;
2047
2048         h = kobj_to_hstate(kobj, &nid);
2049         if (nid == NUMA_NO_NODE)
2050                 surplus_huge_pages = h->surplus_huge_pages;
2051         else
2052                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2053
2054         return sprintf(buf, "%lu\n", surplus_huge_pages);
2055 }
2056 HSTATE_ATTR_RO(surplus_hugepages);
2057
2058 static struct attribute *hstate_attrs[] = {
2059         &nr_hugepages_attr.attr,
2060         &nr_overcommit_hugepages_attr.attr,
2061         &free_hugepages_attr.attr,
2062         &resv_hugepages_attr.attr,
2063         &surplus_hugepages_attr.attr,
2064 #ifdef CONFIG_NUMA
2065         &nr_hugepages_mempolicy_attr.attr,
2066 #endif
2067         NULL,
2068 };
2069
2070 static struct attribute_group hstate_attr_group = {
2071         .attrs = hstate_attrs,
2072 };
2073
2074 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2075                                     struct kobject **hstate_kobjs,
2076                                     struct attribute_group *hstate_attr_group)
2077 {
2078         int retval;
2079         int hi = hstate_index(h);
2080
2081         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2082         if (!hstate_kobjs[hi])
2083                 return -ENOMEM;
2084
2085         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2086         if (retval)
2087                 kobject_put(hstate_kobjs[hi]);
2088
2089         return retval;
2090 }
2091
2092 static void __init hugetlb_sysfs_init(void)
2093 {
2094         struct hstate *h;
2095         int err;
2096
2097         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2098         if (!hugepages_kobj)
2099                 return;
2100
2101         for_each_hstate(h) {
2102                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2103                                          hstate_kobjs, &hstate_attr_group);
2104                 if (err)
2105                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2106         }
2107 }
2108
2109 #ifdef CONFIG_NUMA
2110
2111 /*
2112  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2113  * with node devices in node_devices[] using a parallel array.  The array
2114  * index of a node device or _hstate == node id.
2115  * This is here to avoid any static dependency of the node device driver, in
2116  * the base kernel, on the hugetlb module.
2117  */
2118 struct node_hstate {
2119         struct kobject          *hugepages_kobj;
2120         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2121 };
2122 struct node_hstate node_hstates[MAX_NUMNODES];
2123
2124 /*
2125  * A subset of global hstate attributes for node devices
2126  */
2127 static struct attribute *per_node_hstate_attrs[] = {
2128         &nr_hugepages_attr.attr,
2129         &free_hugepages_attr.attr,
2130         &surplus_hugepages_attr.attr,
2131         NULL,
2132 };
2133
2134 static struct attribute_group per_node_hstate_attr_group = {
2135         .attrs = per_node_hstate_attrs,
2136 };
2137
2138 /*
2139  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2140  * Returns node id via non-NULL nidp.
2141  */
2142 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2143 {
2144         int nid;
2145
2146         for (nid = 0; nid < nr_node_ids; nid++) {
2147                 struct node_hstate *nhs = &node_hstates[nid];
2148                 int i;
2149                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2150                         if (nhs->hstate_kobjs[i] == kobj) {
2151                                 if (nidp)
2152                                         *nidp = nid;
2153                                 return &hstates[i];
2154                         }
2155         }
2156
2157         BUG();
2158         return NULL;
2159 }
2160
2161 /*
2162  * Unregister hstate attributes from a single node device.
2163  * No-op if no hstate attributes attached.
2164  */
2165 static void hugetlb_unregister_node(struct node *node)
2166 {
2167         struct hstate *h;
2168         struct node_hstate *nhs = &node_hstates[node->dev.id];
2169
2170         if (!nhs->hugepages_kobj)
2171                 return;         /* no hstate attributes */
2172
2173         for_each_hstate(h) {
2174                 int idx = hstate_index(h);
2175                 if (nhs->hstate_kobjs[idx]) {
2176                         kobject_put(nhs->hstate_kobjs[idx]);
2177                         nhs->hstate_kobjs[idx] = NULL;
2178                 }
2179         }
2180
2181         kobject_put(nhs->hugepages_kobj);
2182         nhs->hugepages_kobj = NULL;
2183 }
2184
2185 /*
2186  * hugetlb module exit:  unregister hstate attributes from node devices
2187  * that have them.
2188  */
2189 static void hugetlb_unregister_all_nodes(void)
2190 {
2191         int nid;
2192
2193         /*
2194          * disable node device registrations.
2195          */
2196         register_hugetlbfs_with_node(NULL, NULL);
2197
2198         /*
2199          * remove hstate attributes from any nodes that have them.
2200          */
2201         for (nid = 0; nid < nr_node_ids; nid++)
2202                 hugetlb_unregister_node(node_devices[nid]);
2203 }
2204
2205 /*
2206  * Register hstate attributes for a single node device.
2207  * No-op if attributes already registered.
2208  */
2209 static void hugetlb_register_node(struct node *node)
2210 {
2211         struct hstate *h;
2212         struct node_hstate *nhs = &node_hstates[node->dev.id];
2213         int err;
2214
2215         if (nhs->hugepages_kobj)
2216                 return;         /* already allocated */
2217
2218         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2219                                                         &node->dev.kobj);
2220         if (!nhs->hugepages_kobj)
2221                 return;
2222
2223         for_each_hstate(h) {
2224                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2225                                                 nhs->hstate_kobjs,
2226                                                 &per_node_hstate_attr_group);
2227                 if (err) {
2228                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2229                                 h->name, node->dev.id);
2230                         hugetlb_unregister_node(node);
2231                         break;
2232                 }
2233         }
2234 }
2235
2236 /*
2237  * hugetlb init time:  register hstate attributes for all registered node
2238  * devices of nodes that have memory.  All on-line nodes should have
2239  * registered their associated device by this time.
2240  */
2241 static void __init hugetlb_register_all_nodes(void)
2242 {
2243         int nid;
2244
2245         for_each_node_state(nid, N_MEMORY) {
2246                 struct node *node = node_devices[nid];
2247                 if (node->dev.id == nid)
2248                         hugetlb_register_node(node);
2249         }
2250
2251         /*
2252          * Let the node device driver know we're here so it can
2253          * [un]register hstate attributes on node hotplug.
2254          */
2255         register_hugetlbfs_with_node(hugetlb_register_node,
2256                                      hugetlb_unregister_node);
2257 }
2258 #else   /* !CONFIG_NUMA */
2259
2260 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2261 {
2262         BUG();
2263         if (nidp)
2264                 *nidp = -1;
2265         return NULL;
2266 }
2267
2268 static void hugetlb_unregister_all_nodes(void) { }
2269
2270 static void hugetlb_register_all_nodes(void) { }
2271
2272 #endif
2273
2274 static void __exit hugetlb_exit(void)
2275 {
2276         struct hstate *h;
2277
2278         hugetlb_unregister_all_nodes();
2279
2280         for_each_hstate(h) {
2281                 kobject_put(hstate_kobjs[hstate_index(h)]);
2282         }
2283
2284         kobject_put(hugepages_kobj);
2285         kfree(htlb_fault_mutex_table);
2286 }
2287 module_exit(hugetlb_exit);
2288
2289 static int __init hugetlb_init(void)
2290 {
2291         int i;
2292
2293         if (!hugepages_supported())
2294                 return 0;
2295
2296         if (!size_to_hstate(default_hstate_size)) {
2297                 default_hstate_size = HPAGE_SIZE;
2298                 if (!size_to_hstate(default_hstate_size))
2299                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2300         }
2301         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2302         if (default_hstate_max_huge_pages)
2303                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2304
2305         hugetlb_init_hstates();
2306         gather_bootmem_prealloc();
2307         report_hugepages();
2308
2309         hugetlb_sysfs_init();
2310         hugetlb_register_all_nodes();
2311         hugetlb_cgroup_file_init();
2312
2313 #ifdef CONFIG_SMP
2314         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2315 #else
2316         num_fault_mutexes = 1;
2317 #endif
2318         htlb_fault_mutex_table =
2319                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2320         BUG_ON(!htlb_fault_mutex_table);
2321
2322         for (i = 0; i < num_fault_mutexes; i++)
2323                 mutex_init(&htlb_fault_mutex_table[i]);
2324         return 0;
2325 }
2326 module_init(hugetlb_init);
2327
2328 /* Should be called on processing a hugepagesz=... option */
2329 void __init hugetlb_add_hstate(unsigned order)
2330 {
2331         struct hstate *h;
2332         unsigned long i;
2333
2334         if (size_to_hstate(PAGE_SIZE << order)) {
2335                 pr_warning("hugepagesz= specified twice, ignoring\n");
2336                 return;
2337         }
2338         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2339         BUG_ON(order == 0);
2340         h = &hstates[hugetlb_max_hstate++];
2341         h->order = order;
2342         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2343         h->nr_huge_pages = 0;
2344         h->free_huge_pages = 0;
2345         for (i = 0; i < MAX_NUMNODES; ++i)
2346                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2347         INIT_LIST_HEAD(&h->hugepage_activelist);
2348         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2349         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2350         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2351                                         huge_page_size(h)/1024);
2352
2353         parsed_hstate = h;
2354 }
2355
2356 static int __init hugetlb_nrpages_setup(char *s)
2357 {
2358         unsigned long *mhp;
2359         static unsigned long *last_mhp;
2360
2361         /*
2362          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2363          * so this hugepages= parameter goes to the "default hstate".
2364          */
2365         if (!hugetlb_max_hstate)
2366                 mhp = &default_hstate_max_huge_pages;
2367         else
2368                 mhp = &parsed_hstate->max_huge_pages;
2369
2370         if (mhp == last_mhp) {
2371                 pr_warning("hugepages= specified twice without "
2372                            "interleaving hugepagesz=, ignoring\n");
2373                 return 1;
2374         }
2375
2376         if (sscanf(s, "%lu", mhp) <= 0)
2377                 *mhp = 0;
2378
2379         /*
2380          * Global state is always initialized later in hugetlb_init.
2381          * But we need to allocate >= MAX_ORDER hstates here early to still
2382          * use the bootmem allocator.
2383          */
2384         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2385                 hugetlb_hstate_alloc_pages(parsed_hstate);
2386
2387         last_mhp = mhp;
2388
2389         return 1;
2390 }
2391 __setup("hugepages=", hugetlb_nrpages_setup);
2392
2393 static int __init hugetlb_default_setup(char *s)
2394 {
2395         default_hstate_size = memparse(s, &s);
2396         return 1;
2397 }
2398 __setup("default_hugepagesz=", hugetlb_default_setup);
2399
2400 static unsigned int cpuset_mems_nr(unsigned int *array)
2401 {
2402         int node;
2403         unsigned int nr = 0;
2404
2405         for_each_node_mask(node, cpuset_current_mems_allowed)
2406                 nr += array[node];
2407
2408         return nr;
2409 }
2410
2411 #ifdef CONFIG_SYSCTL
2412 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2413                          struct ctl_table *table, int write,
2414                          void __user *buffer, size_t *length, loff_t *ppos)
2415 {
2416         struct hstate *h = &default_hstate;
2417         unsigned long tmp = h->max_huge_pages;
2418         int ret;
2419
2420         if (!hugepages_supported())
2421                 return -ENOTSUPP;
2422
2423         table->data = &tmp;
2424         table->maxlen = sizeof(unsigned long);
2425         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2426         if (ret)
2427                 goto out;
2428
2429         if (write)
2430                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2431                                                   NUMA_NO_NODE, tmp, *length);
2432 out:
2433         return ret;
2434 }
2435
2436 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2437                           void __user *buffer, size_t *length, loff_t *ppos)
2438 {
2439
2440         return hugetlb_sysctl_handler_common(false, table, write,
2441                                                         buffer, length, ppos);
2442 }
2443
2444 #ifdef CONFIG_NUMA
2445 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2446                           void __user *buffer, size_t *length, loff_t *ppos)
2447 {
2448         return hugetlb_sysctl_handler_common(true, table, write,
2449                                                         buffer, length, ppos);
2450 }
2451 #endif /* CONFIG_NUMA */
2452
2453 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2454                         void __user *buffer,
2455                         size_t *length, loff_t *ppos)
2456 {
2457         struct hstate *h = &default_hstate;
2458         unsigned long tmp;
2459         int ret;
2460
2461         if (!hugepages_supported())
2462                 return -ENOTSUPP;
2463
2464         tmp = h->nr_overcommit_huge_pages;
2465
2466         if (write && hstate_is_gigantic(h))
2467                 return -EINVAL;
2468
2469         table->data = &tmp;
2470         table->maxlen = sizeof(unsigned long);
2471         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2472         if (ret)
2473                 goto out;
2474
2475         if (write) {
2476                 spin_lock(&hugetlb_lock);
2477                 h->nr_overcommit_huge_pages = tmp;
2478                 spin_unlock(&hugetlb_lock);
2479         }
2480 out:
2481         return ret;
2482 }
2483
2484 #endif /* CONFIG_SYSCTL */
2485
2486 void hugetlb_report_meminfo(struct seq_file *m)
2487 {
2488         struct hstate *h = &default_hstate;
2489         if (!hugepages_supported())
2490                 return;
2491         seq_printf(m,
2492                         "HugePages_Total:   %5lu\n"
2493                         "HugePages_Free:    %5lu\n"
2494                         "HugePages_Rsvd:    %5lu\n"
2495                         "HugePages_Surp:    %5lu\n"
2496                         "Hugepagesize:   %8lu kB\n",
2497                         h->nr_huge_pages,
2498                         h->free_huge_pages,
2499                         h->resv_huge_pages,
2500                         h->surplus_huge_pages,
2501                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2502 }
2503
2504 int hugetlb_report_node_meminfo(int nid, char *buf)
2505 {
2506         struct hstate *h = &default_hstate;
2507         if (!hugepages_supported())
2508                 return 0;
2509         return sprintf(buf,
2510                 "Node %d HugePages_Total: %5u\n"
2511                 "Node %d HugePages_Free:  %5u\n"
2512                 "Node %d HugePages_Surp:  %5u\n",
2513                 nid, h->nr_huge_pages_node[nid],
2514                 nid, h->free_huge_pages_node[nid],
2515                 nid, h->surplus_huge_pages_node[nid]);
2516 }
2517
2518 void hugetlb_show_meminfo(void)
2519 {
2520         struct hstate *h;
2521         int nid;
2522
2523         if (!hugepages_supported())
2524                 return;
2525
2526         for_each_node_state(nid, N_MEMORY)
2527                 for_each_hstate(h)
2528                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2529                                 nid,
2530                                 h->nr_huge_pages_node[nid],
2531                                 h->free_huge_pages_node[nid],
2532                                 h->surplus_huge_pages_node[nid],
2533                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2534 }
2535
2536 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2537 unsigned long hugetlb_total_pages(void)
2538 {
2539         struct hstate *h;
2540         unsigned long nr_total_pages = 0;
2541
2542         for_each_hstate(h)
2543                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2544         return nr_total_pages;
2545 }
2546
2547 static int hugetlb_acct_memory(struct hstate *h, long delta)
2548 {
2549         int ret = -ENOMEM;
2550
2551         spin_lock(&hugetlb_lock);
2552         /*
2553          * When cpuset is configured, it breaks the strict hugetlb page
2554          * reservation as the accounting is done on a global variable. Such
2555          * reservation is completely rubbish in the presence of cpuset because
2556          * the reservation is not checked against page availability for the
2557          * current cpuset. Application can still potentially OOM'ed by kernel
2558          * with lack of free htlb page in cpuset that the task is in.
2559          * Attempt to enforce strict accounting with cpuset is almost
2560          * impossible (or too ugly) because cpuset is too fluid that
2561          * task or memory node can be dynamically moved between cpusets.
2562          *
2563          * The change of semantics for shared hugetlb mapping with cpuset is
2564          * undesirable. However, in order to preserve some of the semantics,
2565          * we fall back to check against current free page availability as
2566          * a best attempt and hopefully to minimize the impact of changing
2567          * semantics that cpuset has.
2568          */
2569         if (delta > 0) {
2570                 if (gather_surplus_pages(h, delta) < 0)
2571                         goto out;
2572
2573                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2574                         return_unused_surplus_pages(h, delta);
2575                         goto out;
2576                 }
2577         }
2578
2579         ret = 0;
2580         if (delta < 0)
2581                 return_unused_surplus_pages(h, (unsigned long) -delta);
2582
2583 out:
2584         spin_unlock(&hugetlb_lock);
2585         return ret;
2586 }
2587
2588 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2589 {
2590         struct resv_map *resv = vma_resv_map(vma);
2591
2592         /*
2593          * This new VMA should share its siblings reservation map if present.
2594          * The VMA will only ever have a valid reservation map pointer where
2595          * it is being copied for another still existing VMA.  As that VMA
2596          * has a reference to the reservation map it cannot disappear until
2597          * after this open call completes.  It is therefore safe to take a
2598          * new reference here without additional locking.
2599          */
2600         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2601                 kref_get(&resv->refs);
2602 }
2603
2604 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2605 {
2606         struct hstate *h = hstate_vma(vma);
2607         struct resv_map *resv = vma_resv_map(vma);
2608         struct hugepage_subpool *spool = subpool_vma(vma);
2609         unsigned long reserve, start, end;
2610         long gbl_reserve;
2611
2612         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2613                 return;
2614
2615         start = vma_hugecache_offset(h, vma, vma->vm_start);
2616         end = vma_hugecache_offset(h, vma, vma->vm_end);
2617
2618         reserve = (end - start) - region_count(resv, start, end);
2619
2620         kref_put(&resv->refs, resv_map_release);
2621
2622         if (reserve) {
2623                 /*
2624                  * Decrement reserve counts.  The global reserve count may be
2625                  * adjusted if the subpool has a minimum size.
2626                  */
2627                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2628                 hugetlb_acct_memory(h, -gbl_reserve);
2629         }
2630 }
2631
2632 /*
2633  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2634  * handle_mm_fault() to try to instantiate regular-sized pages in the
2635  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2636  * this far.
2637  */
2638 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2639 {
2640         BUG();
2641         return 0;
2642 }
2643
2644 const struct vm_operations_struct hugetlb_vm_ops = {
2645         .fault = hugetlb_vm_op_fault,
2646         .open = hugetlb_vm_op_open,
2647         .close = hugetlb_vm_op_close,
2648 };
2649
2650 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2651                                 int writable)
2652 {
2653         pte_t entry;
2654
2655         if (writable) {
2656                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2657                                          vma->vm_page_prot)));
2658         } else {
2659                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2660                                            vma->vm_page_prot));
2661         }
2662         entry = pte_mkyoung(entry);
2663         entry = pte_mkhuge(entry);
2664         entry = arch_make_huge_pte(entry, vma, page, writable);
2665
2666         return entry;
2667 }
2668
2669 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2670                                    unsigned long address, pte_t *ptep)
2671 {
2672         pte_t entry;
2673
2674         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2675         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2676                 update_mmu_cache(vma, address, ptep);
2677 }
2678
2679 static int is_hugetlb_entry_migration(pte_t pte)
2680 {
2681         swp_entry_t swp;
2682
2683         if (huge_pte_none(pte) || pte_present(pte))
2684                 return 0;
2685         swp = pte_to_swp_entry(pte);
2686         if (non_swap_entry(swp) && is_migration_entry(swp))
2687                 return 1;
2688         else
2689                 return 0;
2690 }
2691
2692 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2693 {
2694         swp_entry_t swp;
2695
2696         if (huge_pte_none(pte) || pte_present(pte))
2697                 return 0;
2698         swp = pte_to_swp_entry(pte);
2699         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2700                 return 1;
2701         else
2702                 return 0;
2703 }
2704
2705 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2706                             struct vm_area_struct *vma)
2707 {
2708         pte_t *src_pte, *dst_pte, entry;
2709         struct page *ptepage;
2710         unsigned long addr;
2711         int cow;
2712         struct hstate *h = hstate_vma(vma);
2713         unsigned long sz = huge_page_size(h);
2714         unsigned long mmun_start;       /* For mmu_notifiers */
2715         unsigned long mmun_end;         /* For mmu_notifiers */
2716         int ret = 0;
2717
2718         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2719
2720         mmun_start = vma->vm_start;
2721         mmun_end = vma->vm_end;
2722         if (cow)
2723                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2724
2725         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2726                 spinlock_t *src_ptl, *dst_ptl;
2727                 src_pte = huge_pte_offset(src, addr);
2728                 if (!src_pte)
2729                         continue;
2730                 dst_pte = huge_pte_alloc(dst, addr, sz);
2731                 if (!dst_pte) {
2732                         ret = -ENOMEM;
2733                         break;
2734                 }
2735
2736                 /* If the pagetables are shared don't copy or take references */
2737                 if (dst_pte == src_pte)
2738                         continue;
2739
2740                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2741                 src_ptl = huge_pte_lockptr(h, src, src_pte);
2742                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2743                 entry = huge_ptep_get(src_pte);
2744                 if (huge_pte_none(entry)) { /* skip none entry */
2745                         ;
2746                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2747                                     is_hugetlb_entry_hwpoisoned(entry))) {
2748                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
2749
2750                         if (is_write_migration_entry(swp_entry) && cow) {
2751                                 /*
2752                                  * COW mappings require pages in both
2753                                  * parent and child to be set to read.
2754                                  */
2755                                 make_migration_entry_read(&swp_entry);
2756                                 entry = swp_entry_to_pte(swp_entry);
2757                                 set_huge_pte_at(src, addr, src_pte, entry);
2758                         }
2759                         set_huge_pte_at(dst, addr, dst_pte, entry);
2760                 } else {
2761                         if (cow) {
2762                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2763                                 mmu_notifier_invalidate_range(src, mmun_start,
2764                                                                    mmun_end);
2765                         }
2766                         entry = huge_ptep_get(src_pte);
2767                         ptepage = pte_page(entry);
2768                         get_page(ptepage);
2769                         page_dup_rmap(ptepage);
2770                         set_huge_pte_at(dst, addr, dst_pte, entry);
2771                 }
2772                 spin_unlock(src_ptl);
2773                 spin_unlock(dst_ptl);
2774         }
2775
2776         if (cow)
2777                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2778
2779         return ret;
2780 }
2781
2782 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2783                             unsigned long start, unsigned long end,
2784                             struct page *ref_page)
2785 {
2786         int force_flush = 0;
2787         struct mm_struct *mm = vma->vm_mm;
2788         unsigned long address;
2789         pte_t *ptep;
2790         pte_t pte;
2791         spinlock_t *ptl;
2792         struct page *page;
2793         struct hstate *h = hstate_vma(vma);
2794         unsigned long sz = huge_page_size(h);
2795         const unsigned long mmun_start = start; /* For mmu_notifiers */
2796         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
2797
2798         WARN_ON(!is_vm_hugetlb_page(vma));
2799         BUG_ON(start & ~huge_page_mask(h));
2800         BUG_ON(end & ~huge_page_mask(h));
2801
2802         tlb_start_vma(tlb, vma);
2803         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2804         address = start;
2805 again:
2806         for (; address < end; address += sz) {
2807                 ptep = huge_pte_offset(mm, address);
2808                 if (!ptep)
2809                         continue;
2810
2811                 ptl = huge_pte_lock(h, mm, ptep);
2812                 if (huge_pmd_unshare(mm, &address, ptep))
2813                         goto unlock;
2814
2815                 pte = huge_ptep_get(ptep);
2816                 if (huge_pte_none(pte))
2817                         goto unlock;
2818
2819                 /*
2820                  * Migrating hugepage or HWPoisoned hugepage is already
2821                  * unmapped and its refcount is dropped, so just clear pte here.
2822                  */
2823                 if (unlikely(!pte_present(pte))) {
2824                         huge_pte_clear(mm, address, ptep);
2825                         goto unlock;
2826                 }
2827
2828                 page = pte_page(pte);
2829                 /*
2830                  * If a reference page is supplied, it is because a specific
2831                  * page is being unmapped, not a range. Ensure the page we
2832                  * are about to unmap is the actual page of interest.
2833                  */
2834                 if (ref_page) {
2835                         if (page != ref_page)
2836                                 goto unlock;
2837
2838                         /*
2839                          * Mark the VMA as having unmapped its page so that
2840                          * future faults in this VMA will fail rather than
2841                          * looking like data was lost
2842                          */
2843                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2844                 }
2845
2846                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2847                 tlb_remove_tlb_entry(tlb, ptep, address);
2848                 if (huge_pte_dirty(pte))
2849                         set_page_dirty(page);
2850
2851                 page_remove_rmap(page);
2852                 force_flush = !__tlb_remove_page(tlb, page);
2853                 if (force_flush) {
2854                         address += sz;
2855                         spin_unlock(ptl);
2856                         break;
2857                 }
2858                 /* Bail out after unmapping reference page if supplied */
2859                 if (ref_page) {
2860                         spin_unlock(ptl);
2861                         break;
2862                 }
2863 unlock:
2864                 spin_unlock(ptl);
2865         }
2866         /*
2867          * mmu_gather ran out of room to batch pages, we break out of
2868          * the PTE lock to avoid doing the potential expensive TLB invalidate
2869          * and page-free while holding it.
2870          */
2871         if (force_flush) {
2872                 force_flush = 0;
2873                 tlb_flush_mmu(tlb);
2874                 if (address < end && !ref_page)
2875                         goto again;
2876         }
2877         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2878         tlb_end_vma(tlb, vma);
2879 }
2880
2881 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2882                           struct vm_area_struct *vma, unsigned long start,
2883                           unsigned long end, struct page *ref_page)
2884 {
2885         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2886
2887         /*
2888          * Clear this flag so that x86's huge_pmd_share page_table_shareable
2889          * test will fail on a vma being torn down, and not grab a page table
2890          * on its way out.  We're lucky that the flag has such an appropriate
2891          * name, and can in fact be safely cleared here. We could clear it
2892          * before the __unmap_hugepage_range above, but all that's necessary
2893          * is to clear it before releasing the i_mmap_rwsem. This works
2894          * because in the context this is called, the VMA is about to be
2895          * destroyed and the i_mmap_rwsem is held.
2896          */
2897         vma->vm_flags &= ~VM_MAYSHARE;
2898 }
2899
2900 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2901                           unsigned long end, struct page *ref_page)
2902 {
2903         struct mm_struct *mm;
2904         struct mmu_gather tlb;
2905
2906         mm = vma->vm_mm;
2907
2908         tlb_gather_mmu(&tlb, mm, start, end);
2909         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2910         tlb_finish_mmu(&tlb, start, end);
2911 }
2912
2913 /*
2914  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2915  * mappping it owns the reserve page for. The intention is to unmap the page
2916  * from other VMAs and let the children be SIGKILLed if they are faulting the
2917  * same region.
2918  */
2919 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2920                               struct page *page, unsigned long address)
2921 {
2922         struct hstate *h = hstate_vma(vma);
2923         struct vm_area_struct *iter_vma;
2924         struct address_space *mapping;
2925         pgoff_t pgoff;
2926
2927         /*
2928          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2929          * from page cache lookup which is in HPAGE_SIZE units.
2930          */
2931         address = address & huge_page_mask(h);
2932         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2933                         vma->vm_pgoff;
2934         mapping = file_inode(vma->vm_file)->i_mapping;
2935
2936         /*
2937          * Take the mapping lock for the duration of the table walk. As
2938          * this mapping should be shared between all the VMAs,
2939          * __unmap_hugepage_range() is called as the lock is already held
2940          */
2941         i_mmap_lock_write(mapping);
2942         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2943                 /* Do not unmap the current VMA */
2944                 if (iter_vma == vma)
2945                         continue;
2946
2947                 /*
2948                  * Unmap the page from other VMAs without their own reserves.
2949                  * They get marked to be SIGKILLed if they fault in these
2950                  * areas. This is because a future no-page fault on this VMA
2951                  * could insert a zeroed page instead of the data existing
2952                  * from the time of fork. This would look like data corruption
2953                  */
2954                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2955                         unmap_hugepage_range(iter_vma, address,
2956                                              address + huge_page_size(h), page);
2957         }
2958         i_mmap_unlock_write(mapping);
2959 }
2960
2961 /*
2962  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2963  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2964  * cannot race with other handlers or page migration.
2965  * Keep the pte_same checks anyway to make transition from the mutex easier.
2966  */
2967 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2968                         unsigned long address, pte_t *ptep, pte_t pte,
2969                         struct page *pagecache_page, spinlock_t *ptl)
2970 {
2971         struct hstate *h = hstate_vma(vma);
2972         struct page *old_page, *new_page;
2973         int ret = 0, outside_reserve = 0;
2974         unsigned long mmun_start;       /* For mmu_notifiers */
2975         unsigned long mmun_end;         /* For mmu_notifiers */
2976
2977         old_page = pte_page(pte);
2978
2979 retry_avoidcopy:
2980         /* If no-one else is actually using this page, avoid the copy
2981          * and just make the page writable */
2982         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2983                 page_move_anon_rmap(old_page, vma, address);
2984                 set_huge_ptep_writable(vma, address, ptep);
2985                 return 0;
2986         }
2987
2988         /*
2989          * If the process that created a MAP_PRIVATE mapping is about to
2990          * perform a COW due to a shared page count, attempt to satisfy
2991          * the allocation without using the existing reserves. The pagecache
2992          * page is used to determine if the reserve at this address was
2993          * consumed or not. If reserves were used, a partial faulted mapping
2994          * at the time of fork() could consume its reserves on COW instead
2995          * of the full address range.
2996          */
2997         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2998                         old_page != pagecache_page)
2999                 outside_reserve = 1;
3000
3001         page_cache_get(old_page);
3002
3003         /*
3004          * Drop page table lock as buddy allocator may be called. It will
3005          * be acquired again before returning to the caller, as expected.
3006          */
3007         spin_unlock(ptl);
3008         new_page = alloc_huge_page(vma, address, outside_reserve);
3009
3010         if (IS_ERR(new_page)) {
3011                 /*
3012                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3013                  * it is due to references held by a child and an insufficient
3014                  * huge page pool. To guarantee the original mappers
3015                  * reliability, unmap the page from child processes. The child
3016                  * may get SIGKILLed if it later faults.
3017                  */
3018                 if (outside_reserve) {
3019                         page_cache_release(old_page);
3020                         BUG_ON(huge_pte_none(pte));
3021                         unmap_ref_private(mm, vma, old_page, address);
3022                         BUG_ON(huge_pte_none(pte));
3023                         spin_lock(ptl);
3024                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3025                         if (likely(ptep &&
3026                                    pte_same(huge_ptep_get(ptep), pte)))
3027                                 goto retry_avoidcopy;
3028                         /*
3029                          * race occurs while re-acquiring page table
3030                          * lock, and our job is done.
3031                          */
3032                         return 0;
3033                 }
3034
3035                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3036                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
3037                 goto out_release_old;
3038         }
3039
3040         /*
3041          * When the original hugepage is shared one, it does not have
3042          * anon_vma prepared.
3043          */
3044         if (unlikely(anon_vma_prepare(vma))) {
3045                 ret = VM_FAULT_OOM;
3046                 goto out_release_all;
3047         }
3048
3049         copy_user_huge_page(new_page, old_page, address, vma,
3050                             pages_per_huge_page(h));
3051         __SetPageUptodate(new_page);
3052         set_page_huge_active(new_page);
3053
3054         mmun_start = address & huge_page_mask(h);
3055         mmun_end = mmun_start + huge_page_size(h);
3056         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3057
3058         /*
3059          * Retake the page table lock to check for racing updates
3060          * before the page tables are altered
3061          */
3062         spin_lock(ptl);
3063         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3064         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3065                 ClearPagePrivate(new_page);
3066
3067                 /* Break COW */
3068                 huge_ptep_clear_flush(vma, address, ptep);
3069                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3070                 set_huge_pte_at(mm, address, ptep,
3071                                 make_huge_pte(vma, new_page, 1));
3072                 page_remove_rmap(old_page);
3073                 hugepage_add_new_anon_rmap(new_page, vma, address);
3074                 /* Make the old page be freed below */
3075                 new_page = old_page;
3076         }
3077         spin_unlock(ptl);
3078         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3079 out_release_all:
3080         page_cache_release(new_page);
3081 out_release_old:
3082         page_cache_release(old_page);
3083
3084         spin_lock(ptl); /* Caller expects lock to be held */
3085         return ret;
3086 }
3087
3088 /* Return the pagecache page at a given address within a VMA */
3089 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3090                         struct vm_area_struct *vma, unsigned long address)
3091 {
3092         struct address_space *mapping;
3093         pgoff_t idx;
3094
3095         mapping = vma->vm_file->f_mapping;
3096         idx = vma_hugecache_offset(h, vma, address);
3097
3098         return find_lock_page(mapping, idx);
3099 }
3100
3101 /*
3102  * Return whether there is a pagecache page to back given address within VMA.
3103  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3104  */
3105 static bool hugetlbfs_pagecache_present(struct hstate *h,
3106                         struct vm_area_struct *vma, unsigned long address)
3107 {
3108         struct address_space *mapping;
3109         pgoff_t idx;
3110         struct page *page;
3111
3112         mapping = vma->vm_file->f_mapping;
3113         idx = vma_hugecache_offset(h, vma, address);
3114
3115         page = find_get_page(mapping, idx);
3116         if (page)
3117                 put_page(page);
3118         return page != NULL;
3119 }
3120
3121 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3122                            struct address_space *mapping, pgoff_t idx,
3123                            unsigned long address, pte_t *ptep, unsigned int flags)
3124 {
3125         struct hstate *h = hstate_vma(vma);
3126         int ret = VM_FAULT_SIGBUS;
3127         int anon_rmap = 0;
3128         unsigned long size;
3129         struct page *page;
3130         pte_t new_pte;
3131         spinlock_t *ptl;
3132
3133         /*
3134          * Currently, we are forced to kill the process in the event the
3135          * original mapper has unmapped pages from the child due to a failed
3136          * COW. Warn that such a situation has occurred as it may not be obvious
3137          */
3138         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3139                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3140                            current->pid);
3141                 return ret;
3142         }
3143
3144         /*
3145          * Use page lock to guard against racing truncation
3146          * before we get page_table_lock.
3147          */
3148 retry:
3149         page = find_lock_page(mapping, idx);
3150         if (!page) {
3151                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3152                 if (idx >= size)
3153                         goto out;
3154                 page = alloc_huge_page(vma, address, 0);
3155                 if (IS_ERR(page)) {
3156                         ret = PTR_ERR(page);
3157                         if (ret == -ENOMEM)
3158                                 ret = VM_FAULT_OOM;
3159                         else
3160                                 ret = VM_FAULT_SIGBUS;
3161                         goto out;
3162                 }
3163                 clear_huge_page(page, address, pages_per_huge_page(h));
3164                 __SetPageUptodate(page);
3165                 set_page_huge_active(page);
3166
3167                 if (vma->vm_flags & VM_MAYSHARE) {
3168                         int err;
3169                         struct inode *inode = mapping->host;
3170
3171                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3172                         if (err) {
3173                                 put_page(page);
3174                                 if (err == -EEXIST)
3175                                         goto retry;
3176                                 goto out;
3177                         }
3178                         ClearPagePrivate(page);
3179
3180                         spin_lock(&inode->i_lock);
3181                         inode->i_blocks += blocks_per_huge_page(h);
3182                         spin_unlock(&inode->i_lock);
3183                 } else {
3184                         lock_page(page);
3185                         if (unlikely(anon_vma_prepare(vma))) {
3186                                 ret = VM_FAULT_OOM;
3187                                 goto backout_unlocked;
3188                         }
3189                         anon_rmap = 1;
3190                 }
3191         } else {
3192                 /*
3193                  * If memory error occurs between mmap() and fault, some process
3194                  * don't have hwpoisoned swap entry for errored virtual address.
3195                  * So we need to block hugepage fault by PG_hwpoison bit check.
3196                  */
3197                 if (unlikely(PageHWPoison(page))) {
3198                         ret = VM_FAULT_HWPOISON |
3199                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3200                         goto backout_unlocked;
3201                 }
3202         }
3203
3204         /*
3205          * If we are going to COW a private mapping later, we examine the
3206          * pending reservations for this page now. This will ensure that
3207          * any allocations necessary to record that reservation occur outside
3208          * the spinlock.
3209          */
3210         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
3211                 if (vma_needs_reservation(h, vma, address) < 0) {
3212                         ret = VM_FAULT_OOM;
3213                         goto backout_unlocked;
3214                 }
3215
3216         ptl = huge_pte_lockptr(h, mm, ptep);
3217         spin_lock(ptl);
3218         size = i_size_read(mapping->host) >> huge_page_shift(h);
3219         if (idx >= size)
3220                 goto backout;
3221
3222         ret = 0;
3223         if (!huge_pte_none(huge_ptep_get(ptep)))
3224                 goto backout;
3225
3226         if (anon_rmap) {
3227                 ClearPagePrivate(page);
3228                 hugepage_add_new_anon_rmap(page, vma, address);
3229         } else
3230                 page_dup_rmap(page);
3231         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3232                                 && (vma->vm_flags & VM_SHARED)));
3233         set_huge_pte_at(mm, address, ptep, new_pte);
3234
3235         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3236                 /* Optimization, do the COW without a second fault */
3237                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3238         }
3239
3240         spin_unlock(ptl);
3241         unlock_page(page);
3242 out:
3243         return ret;
3244
3245 backout:
3246         spin_unlock(ptl);
3247 backout_unlocked:
3248         unlock_page(page);
3249         put_page(page);
3250         goto out;
3251 }
3252
3253 #ifdef CONFIG_SMP
3254 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3255                             struct vm_area_struct *vma,
3256                             struct address_space *mapping,
3257                             pgoff_t idx, unsigned long address)
3258 {
3259         unsigned long key[2];
3260         u32 hash;
3261
3262         if (vma->vm_flags & VM_SHARED) {
3263                 key[0] = (unsigned long) mapping;
3264                 key[1] = idx;
3265         } else {
3266                 key[0] = (unsigned long) mm;
3267                 key[1] = address >> huge_page_shift(h);
3268         }
3269
3270         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3271
3272         return hash & (num_fault_mutexes - 1);
3273 }
3274 #else
3275 /*
3276  * For uniprocesor systems we always use a single mutex, so just
3277  * return 0 and avoid the hashing overhead.
3278  */
3279 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3280                             struct vm_area_struct *vma,
3281                             struct address_space *mapping,
3282                             pgoff_t idx, unsigned long address)
3283 {
3284         return 0;
3285 }
3286 #endif
3287
3288 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3289                         unsigned long address, unsigned int flags)
3290 {
3291         pte_t *ptep, entry;
3292         spinlock_t *ptl;
3293         int ret;
3294         u32 hash;
3295         pgoff_t idx;
3296         struct page *page = NULL;
3297         struct page *pagecache_page = NULL;
3298         struct hstate *h = hstate_vma(vma);
3299         struct address_space *mapping;
3300         int need_wait_lock = 0;
3301
3302         address &= huge_page_mask(h);
3303
3304         ptep = huge_pte_offset(mm, address);
3305         if (ptep) {
3306                 entry = huge_ptep_get(ptep);
3307                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3308                         migration_entry_wait_huge(vma, mm, ptep);
3309                         return 0;
3310                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3311                         return VM_FAULT_HWPOISON_LARGE |
3312                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3313         }
3314
3315         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3316         if (!ptep)
3317                 return VM_FAULT_OOM;
3318
3319         mapping = vma->vm_file->f_mapping;
3320         idx = vma_hugecache_offset(h, vma, address);
3321
3322         /*
3323          * Serialize hugepage allocation and instantiation, so that we don't
3324          * get spurious allocation failures if two CPUs race to instantiate
3325          * the same page in the page cache.
3326          */
3327         hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3328         mutex_lock(&htlb_fault_mutex_table[hash]);
3329
3330         entry = huge_ptep_get(ptep);
3331         if (huge_pte_none(entry)) {
3332                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3333                 goto out_mutex;
3334         }
3335
3336         ret = 0;
3337
3338         /*
3339          * entry could be a migration/hwpoison entry at this point, so this
3340          * check prevents the kernel from going below assuming that we have
3341          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3342          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3343          * handle it.
3344          */
3345         if (!pte_present(entry))
3346                 goto out_mutex;
3347
3348         /*
3349          * If we are going to COW the mapping later, we examine the pending
3350          * reservations for this page now. This will ensure that any
3351          * allocations necessary to record that reservation occur outside the
3352          * spinlock. For private mappings, we also lookup the pagecache
3353          * page now as it is used to determine if a reservation has been
3354          * consumed.
3355          */
3356         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3357                 if (vma_needs_reservation(h, vma, address) < 0) {
3358                         ret = VM_FAULT_OOM;
3359                         goto out_mutex;
3360                 }
3361
3362                 if (!(vma->vm_flags & VM_MAYSHARE))
3363                         pagecache_page = hugetlbfs_pagecache_page(h,
3364                                                                 vma, address);
3365         }
3366
3367         ptl = huge_pte_lock(h, mm, ptep);
3368
3369         /* Check for a racing update before calling hugetlb_cow */
3370         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3371                 goto out_ptl;
3372
3373         /*
3374          * hugetlb_cow() requires page locks of pte_page(entry) and
3375          * pagecache_page, so here we need take the former one
3376          * when page != pagecache_page or !pagecache_page.
3377          */
3378         page = pte_page(entry);
3379         if (page != pagecache_page)
3380                 if (!trylock_page(page)) {
3381                         need_wait_lock = 1;
3382                         goto out_ptl;
3383                 }
3384
3385         get_page(page);
3386
3387         if (flags & FAULT_FLAG_WRITE) {
3388                 if (!huge_pte_write(entry)) {
3389                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3390                                         pagecache_page, ptl);
3391                         goto out_put_page;
3392                 }
3393                 entry = huge_pte_mkdirty(entry);
3394         }
3395         entry = pte_mkyoung(entry);
3396         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3397                                                 flags & FAULT_FLAG_WRITE))
3398                 update_mmu_cache(vma, address, ptep);
3399 out_put_page:
3400         if (page != pagecache_page)
3401                 unlock_page(page);
3402         put_page(page);
3403 out_ptl:
3404         spin_unlock(ptl);
3405
3406         if (pagecache_page) {
3407                 unlock_page(pagecache_page);
3408                 put_page(pagecache_page);
3409         }
3410 out_mutex:
3411         mutex_unlock(&htlb_fault_mutex_table[hash]);
3412         /*
3413          * Generally it's safe to hold refcount during waiting page lock. But
3414          * here we just wait to defer the next page fault to avoid busy loop and
3415          * the page is not used after unlocked before returning from the current
3416          * page fault. So we are safe from accessing freed page, even if we wait
3417          * here without taking refcount.
3418          */
3419         if (need_wait_lock)
3420                 wait_on_page_locked(page);
3421         return ret;
3422 }
3423
3424 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3425                          struct page **pages, struct vm_area_struct **vmas,
3426                          unsigned long *position, unsigned long *nr_pages,
3427                          long i, unsigned int flags)
3428 {
3429         unsigned long pfn_offset;
3430         unsigned long vaddr = *position;
3431         unsigned long remainder = *nr_pages;
3432         struct hstate *h = hstate_vma(vma);
3433
3434         while (vaddr < vma->vm_end && remainder) {
3435                 pte_t *pte;
3436                 spinlock_t *ptl = NULL;
3437                 int absent;
3438                 struct page *page;
3439
3440                 /*
3441                  * If we have a pending SIGKILL, don't keep faulting pages and
3442                  * potentially allocating memory.
3443                  */
3444                 if (unlikely(fatal_signal_pending(current))) {
3445                         remainder = 0;
3446                         break;
3447                 }
3448
3449                 /*
3450                  * Some archs (sparc64, sh*) have multiple pte_ts to
3451                  * each hugepage.  We have to make sure we get the
3452                  * first, for the page indexing below to work.
3453                  *
3454                  * Note that page table lock is not held when pte is null.
3455                  */
3456                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3457                 if (pte)
3458                         ptl = huge_pte_lock(h, mm, pte);
3459                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3460
3461                 /*
3462                  * When coredumping, it suits get_dump_page if we just return
3463                  * an error where there's an empty slot with no huge pagecache
3464                  * to back it.  This way, we avoid allocating a hugepage, and
3465                  * the sparse dumpfile avoids allocating disk blocks, but its
3466                  * huge holes still show up with zeroes where they need to be.
3467                  */
3468                 if (absent && (flags & FOLL_DUMP) &&
3469                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3470                         if (pte)
3471                                 spin_unlock(ptl);
3472                         remainder = 0;
3473                         break;
3474                 }
3475
3476                 /*
3477                  * We need call hugetlb_fault for both hugepages under migration
3478                  * (in which case hugetlb_fault waits for the migration,) and
3479                  * hwpoisoned hugepages (in which case we need to prevent the
3480                  * caller from accessing to them.) In order to do this, we use
3481                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3482                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3483                  * both cases, and because we can't follow correct pages
3484                  * directly from any kind of swap entries.
3485                  */
3486                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3487                     ((flags & FOLL_WRITE) &&
3488                       !huge_pte_write(huge_ptep_get(pte)))) {
3489                         int ret;
3490
3491                         if (pte)
3492                                 spin_unlock(ptl);
3493                         ret = hugetlb_fault(mm, vma, vaddr,
3494                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3495                         if (!(ret & VM_FAULT_ERROR))
3496                                 continue;
3497
3498                         remainder = 0;
3499                         break;
3500                 }
3501
3502                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3503                 page = pte_page(huge_ptep_get(pte));
3504 same_page:
3505                 if (pages) {
3506                         pages[i] = mem_map_offset(page, pfn_offset);
3507                         get_page_foll(pages[i]);
3508                 }
3509
3510                 if (vmas)
3511                         vmas[i] = vma;
3512
3513                 vaddr += PAGE_SIZE;
3514                 ++pfn_offset;
3515                 --remainder;
3516                 ++i;
3517                 if (vaddr < vma->vm_end && remainder &&
3518                                 pfn_offset < pages_per_huge_page(h)) {
3519                         /*
3520                          * We use pfn_offset to avoid touching the pageframes
3521                          * of this compound page.
3522                          */
3523                         goto same_page;
3524                 }
3525                 spin_unlock(ptl);
3526         }
3527         *nr_pages = remainder;
3528         *position = vaddr;
3529
3530         return i ? i : -EFAULT;
3531 }
3532
3533 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3534                 unsigned long address, unsigned long end, pgprot_t newprot)
3535 {
3536         struct mm_struct *mm = vma->vm_mm;
3537         unsigned long start = address;
3538         pte_t *ptep;
3539         pte_t pte;
3540         struct hstate *h = hstate_vma(vma);
3541         unsigned long pages = 0;
3542
3543         BUG_ON(address >= end);
3544         flush_cache_range(vma, address, end);
3545
3546         mmu_notifier_invalidate_range_start(mm, start, end);
3547         i_mmap_lock_write(vma->vm_file->f_mapping);
3548         for (; address < end; address += huge_page_size(h)) {
3549                 spinlock_t *ptl;
3550                 ptep = huge_pte_offset(mm, address);
3551                 if (!ptep)
3552                         continue;
3553                 ptl = huge_pte_lock(h, mm, ptep);
3554                 if (huge_pmd_unshare(mm, &address, ptep)) {
3555                         pages++;
3556                         spin_unlock(ptl);
3557                         continue;
3558                 }
3559                 pte = huge_ptep_get(ptep);
3560                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3561                         spin_unlock(ptl);
3562                         continue;
3563                 }
3564                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3565                         swp_entry_t entry = pte_to_swp_entry(pte);
3566
3567                         if (is_write_migration_entry(entry)) {
3568                                 pte_t newpte;
3569
3570                                 make_migration_entry_read(&entry);
3571                                 newpte = swp_entry_to_pte(entry);
3572                                 set_huge_pte_at(mm, address, ptep, newpte);
3573                                 pages++;
3574                         }
3575                         spin_unlock(ptl);
3576                         continue;
3577                 }
3578                 if (!huge_pte_none(pte)) {
3579                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3580                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3581                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3582                         set_huge_pte_at(mm, address, ptep, pte);
3583                         pages++;
3584                 }
3585                 spin_unlock(ptl);
3586         }
3587         /*
3588          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3589          * may have cleared our pud entry and done put_page on the page table:
3590          * once we release i_mmap_rwsem, another task can do the final put_page
3591          * and that page table be reused and filled with junk.
3592          */
3593         flush_tlb_range(vma, start, end);
3594         mmu_notifier_invalidate_range(mm, start, end);
3595         i_mmap_unlock_write(vma->vm_file->f_mapping);
3596         mmu_notifier_invalidate_range_end(mm, start, end);
3597
3598         return pages << h->order;
3599 }
3600
3601 int hugetlb_reserve_pages(struct inode *inode,
3602                                         long from, long to,
3603                                         struct vm_area_struct *vma,
3604                                         vm_flags_t vm_flags)
3605 {
3606         long ret, chg;
3607         struct hstate *h = hstate_inode(inode);
3608         struct hugepage_subpool *spool = subpool_inode(inode);
3609         struct resv_map *resv_map;
3610         long gbl_reserve;
3611
3612         /*
3613          * Only apply hugepage reservation if asked. At fault time, an
3614          * attempt will be made for VM_NORESERVE to allocate a page
3615          * without using reserves
3616          */
3617         if (vm_flags & VM_NORESERVE)
3618                 return 0;
3619
3620         /*
3621          * Shared mappings base their reservation on the number of pages that
3622          * are already allocated on behalf of the file. Private mappings need
3623          * to reserve the full area even if read-only as mprotect() may be
3624          * called to make the mapping read-write. Assume !vma is a shm mapping
3625          */
3626         if (!vma || vma->vm_flags & VM_MAYSHARE) {
3627                 resv_map = inode_resv_map(inode);
3628
3629                 chg = region_chg(resv_map, from, to);
3630
3631         } else {
3632                 resv_map = resv_map_alloc();
3633                 if (!resv_map)
3634                         return -ENOMEM;
3635
3636                 chg = to - from;
3637
3638                 set_vma_resv_map(vma, resv_map);
3639                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3640         }
3641
3642         if (chg < 0) {
3643                 ret = chg;
3644                 goto out_err;
3645         }
3646
3647         /*
3648          * There must be enough pages in the subpool for the mapping. If
3649          * the subpool has a minimum size, there may be some global
3650          * reservations already in place (gbl_reserve).
3651          */
3652         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
3653         if (gbl_reserve < 0) {
3654                 ret = -ENOSPC;
3655                 goto out_err;
3656         }
3657
3658         /*
3659          * Check enough hugepages are available for the reservation.
3660          * Hand the pages back to the subpool if there are not
3661          */
3662         ret = hugetlb_acct_memory(h, gbl_reserve);
3663         if (ret < 0) {
3664                 /* put back original number of pages, chg */
3665                 (void)hugepage_subpool_put_pages(spool, chg);
3666                 goto out_err;
3667         }
3668
3669         /*
3670          * Account for the reservations made. Shared mappings record regions
3671          * that have reservations as they are shared by multiple VMAs.
3672          * When the last VMA disappears, the region map says how much
3673          * the reservation was and the page cache tells how much of
3674          * the reservation was consumed. Private mappings are per-VMA and
3675          * only the consumed reservations are tracked. When the VMA
3676          * disappears, the original reservation is the VMA size and the
3677          * consumed reservations are stored in the map. Hence, nothing
3678          * else has to be done for private mappings here
3679          */
3680         if (!vma || vma->vm_flags & VM_MAYSHARE)
3681                 region_add(resv_map, from, to);
3682         return 0;
3683 out_err:
3684         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3685                 kref_put(&resv_map->refs, resv_map_release);
3686         return ret;
3687 }
3688
3689 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3690 {
3691         struct hstate *h = hstate_inode(inode);
3692         struct resv_map *resv_map = inode_resv_map(inode);
3693         long chg = 0;
3694         struct hugepage_subpool *spool = subpool_inode(inode);
3695         long gbl_reserve;
3696
3697         if (resv_map)
3698                 chg = region_truncate(resv_map, offset);
3699         spin_lock(&inode->i_lock);
3700         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3701         spin_unlock(&inode->i_lock);
3702
3703         /*
3704          * If the subpool has a minimum size, the number of global
3705          * reservations to be released may be adjusted.
3706          */
3707         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
3708         hugetlb_acct_memory(h, -gbl_reserve);
3709 }
3710
3711 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3712 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3713                                 struct vm_area_struct *vma,
3714                                 unsigned long addr, pgoff_t idx)
3715 {
3716         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3717                                 svma->vm_start;
3718         unsigned long sbase = saddr & PUD_MASK;
3719         unsigned long s_end = sbase + PUD_SIZE;
3720
3721         /* Allow segments to share if only one is marked locked */
3722         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3723         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3724
3725         /*
3726          * match the virtual addresses, permission and the alignment of the
3727          * page table page.
3728          */
3729         if (pmd_index(addr) != pmd_index(saddr) ||
3730             vm_flags != svm_flags ||
3731             sbase < svma->vm_start || svma->vm_end < s_end)
3732                 return 0;
3733
3734         return saddr;
3735 }
3736
3737 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3738 {
3739         unsigned long base = addr & PUD_MASK;
3740         unsigned long end = base + PUD_SIZE;
3741
3742         /*
3743          * check on proper vm_flags and page table alignment
3744          */
3745         if (vma->vm_flags & VM_MAYSHARE &&
3746             vma->vm_start <= base && end <= vma->vm_end)
3747                 return 1;
3748         return 0;
3749 }
3750
3751 /*
3752  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3753  * and returns the corresponding pte. While this is not necessary for the
3754  * !shared pmd case because we can allocate the pmd later as well, it makes the
3755  * code much cleaner. pmd allocation is essential for the shared case because
3756  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3757  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3758  * bad pmd for sharing.
3759  */
3760 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3761 {
3762         struct vm_area_struct *vma = find_vma(mm, addr);
3763         struct address_space *mapping = vma->vm_file->f_mapping;
3764         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3765                         vma->vm_pgoff;
3766         struct vm_area_struct *svma;
3767         unsigned long saddr;
3768         pte_t *spte = NULL;
3769         pte_t *pte;
3770         spinlock_t *ptl;
3771
3772         if (!vma_shareable(vma, addr))
3773                 return (pte_t *)pmd_alloc(mm, pud, addr);
3774
3775         i_mmap_lock_write(mapping);
3776         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3777                 if (svma == vma)
3778                         continue;
3779
3780                 saddr = page_table_shareable(svma, vma, addr, idx);
3781                 if (saddr) {
3782                         spte = huge_pte_offset(svma->vm_mm, saddr);
3783                         if (spte) {
3784                                 mm_inc_nr_pmds(mm);
3785                                 get_page(virt_to_page(spte));
3786                                 break;
3787                         }
3788                 }
3789         }
3790
3791         if (!spte)
3792                 goto out;
3793
3794         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3795         spin_lock(ptl);
3796         if (pud_none(*pud)) {
3797                 pud_populate(mm, pud,
3798                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
3799         } else {
3800                 put_page(virt_to_page(spte));
3801                 mm_inc_nr_pmds(mm);
3802         }
3803         spin_unlock(ptl);
3804 out:
3805         pte = (pte_t *)pmd_alloc(mm, pud, addr);
3806         i_mmap_unlock_write(mapping);
3807         return pte;
3808 }
3809
3810 /*
3811  * unmap huge page backed by shared pte.
3812  *
3813  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
3814  * indicated by page_count > 1, unmap is achieved by clearing pud and
3815  * decrementing the ref count. If count == 1, the pte page is not shared.
3816  *
3817  * called with page table lock held.
3818  *
3819  * returns: 1 successfully unmapped a shared pte page
3820  *          0 the underlying pte page is not shared, or it is the last user
3821  */
3822 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3823 {
3824         pgd_t *pgd = pgd_offset(mm, *addr);
3825         pud_t *pud = pud_offset(pgd, *addr);
3826
3827         BUG_ON(page_count(virt_to_page(ptep)) == 0);
3828         if (page_count(virt_to_page(ptep)) == 1)
3829                 return 0;
3830
3831         pud_clear(pud);
3832         put_page(virt_to_page(ptep));
3833         mm_dec_nr_pmds(mm);
3834         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3835         return 1;
3836 }
3837 #define want_pmd_share()        (1)
3838 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3839 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3840 {
3841         return NULL;
3842 }
3843
3844 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3845 {
3846         return 0;
3847 }
3848 #define want_pmd_share()        (0)
3849 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3850
3851 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3852 pte_t *huge_pte_alloc(struct mm_struct *mm,
3853                         unsigned long addr, unsigned long sz)
3854 {
3855         pgd_t *pgd;
3856         pud_t *pud;
3857         pte_t *pte = NULL;
3858
3859         pgd = pgd_offset(mm, addr);
3860         pud = pud_alloc(mm, pgd, addr);
3861         if (pud) {
3862                 if (sz == PUD_SIZE) {
3863                         pte = (pte_t *)pud;
3864                 } else {
3865                         BUG_ON(sz != PMD_SIZE);
3866                         if (want_pmd_share() && pud_none(*pud))
3867                                 pte = huge_pmd_share(mm, addr, pud);
3868                         else
3869                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3870                 }
3871         }
3872         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3873
3874         return pte;
3875 }
3876
3877 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3878 {
3879         pgd_t *pgd;
3880         pud_t *pud;
3881         pmd_t *pmd = NULL;
3882
3883         pgd = pgd_offset(mm, addr);
3884         if (pgd_present(*pgd)) {
3885                 pud = pud_offset(pgd, addr);
3886                 if (pud_present(*pud)) {
3887                         if (pud_huge(*pud))
3888                                 return (pte_t *)pud;
3889                         pmd = pmd_offset(pud, addr);
3890                 }
3891         }
3892         return (pte_t *) pmd;
3893 }
3894
3895 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3896
3897 /*
3898  * These functions are overwritable if your architecture needs its own
3899  * behavior.
3900  */
3901 struct page * __weak
3902 follow_huge_addr(struct mm_struct *mm, unsigned long address,
3903                               int write)
3904 {
3905         return ERR_PTR(-EINVAL);
3906 }
3907
3908 struct page * __weak
3909 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3910                 pmd_t *pmd, int flags)
3911 {
3912         struct page *page = NULL;
3913         spinlock_t *ptl;
3914 retry:
3915         ptl = pmd_lockptr(mm, pmd);
3916         spin_lock(ptl);
3917         /*
3918          * make sure that the address range covered by this pmd is not
3919          * unmapped from other threads.
3920          */
3921         if (!pmd_huge(*pmd))
3922                 goto out;
3923         if (pmd_present(*pmd)) {
3924                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
3925                 if (flags & FOLL_GET)
3926                         get_page(page);
3927         } else {
3928                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
3929                         spin_unlock(ptl);
3930                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
3931                         goto retry;
3932                 }
3933                 /*
3934                  * hwpoisoned entry is treated as no_page_table in
3935                  * follow_page_mask().
3936                  */
3937         }
3938 out:
3939         spin_unlock(ptl);
3940         return page;
3941 }
3942
3943 struct page * __weak
3944 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3945                 pud_t *pud, int flags)
3946 {
3947         if (flags & FOLL_GET)
3948                 return NULL;
3949
3950         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
3951 }
3952
3953 #ifdef CONFIG_MEMORY_FAILURE
3954
3955 /*
3956  * This function is called from memory failure code.
3957  * Assume the caller holds page lock of the head page.
3958  */
3959 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3960 {
3961         struct hstate *h = page_hstate(hpage);
3962         int nid = page_to_nid(hpage);
3963         int ret = -EBUSY;
3964
3965         spin_lock(&hugetlb_lock);
3966         /*
3967          * Just checking !page_huge_active is not enough, because that could be
3968          * an isolated/hwpoisoned hugepage (which have >0 refcount).
3969          */
3970         if (!page_huge_active(hpage) && !page_count(hpage)) {
3971                 /*
3972                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
3973                  * but dangling hpage->lru can trigger list-debug warnings
3974                  * (this happens when we call unpoison_memory() on it),
3975                  * so let it point to itself with list_del_init().
3976                  */
3977                 list_del_init(&hpage->lru);
3978                 set_page_refcounted(hpage);
3979                 h->free_huge_pages--;
3980                 h->free_huge_pages_node[nid]--;
3981                 ret = 0;
3982         }
3983         spin_unlock(&hugetlb_lock);
3984         return ret;
3985 }
3986 #endif
3987
3988 bool isolate_huge_page(struct page *page, struct list_head *list)
3989 {
3990         bool ret = true;
3991
3992         VM_BUG_ON_PAGE(!PageHead(page), page);
3993         spin_lock(&hugetlb_lock);
3994         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
3995                 ret = false;
3996                 goto unlock;
3997         }
3998         clear_page_huge_active(page);
3999         list_move_tail(&page->lru, list);
4000 unlock:
4001         spin_unlock(&hugetlb_lock);
4002         return ret;
4003 }
4004
4005 void putback_active_hugepage(struct page *page)
4006 {
4007         VM_BUG_ON_PAGE(!PageHead(page), page);
4008         spin_lock(&hugetlb_lock);
4009         set_page_huge_active(page);
4010         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4011         spin_unlock(&hugetlb_lock);
4012         put_page(page);
4013 }