2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched/signal.h> /* remove ASAP */
15 #include <linux/falloc.h>
17 #include <linux/mount.h>
18 #include <linux/file.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/capability.h>
26 #include <linux/ctype.h>
27 #include <linux/backing-dev.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagevec.h>
30 #include <linux/fs_parser.h>
31 #include <linux/mman.h>
32 #include <linux/slab.h>
33 #include <linux/dnotify.h>
34 #include <linux/statfs.h>
35 #include <linux/security.h>
36 #include <linux/magic.h>
37 #include <linux/migrate.h>
38 #include <linux/uio.h>
40 #include <linux/uaccess.h>
41 #include <linux/sched/mm.h>
43 static const struct super_operations hugetlbfs_ops;
44 static const struct address_space_operations hugetlbfs_aops;
45 const struct file_operations hugetlbfs_file_operations;
46 static const struct inode_operations hugetlbfs_dir_inode_operations;
47 static const struct inode_operations hugetlbfs_inode_operations;
49 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
51 struct hugetlbfs_fs_context {
52 struct hstate *hstate;
53 unsigned long long max_size_opt;
54 unsigned long long min_size_opt;
58 enum hugetlbfs_size_type max_val_type;
59 enum hugetlbfs_size_type min_val_type;
65 int sysctl_hugetlb_shm_group;
77 static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
78 fsparam_u32 ("gid", Opt_gid),
79 fsparam_string("min_size", Opt_min_size),
80 fsparam_u32oct("mode", Opt_mode),
81 fsparam_string("nr_inodes", Opt_nr_inodes),
82 fsparam_string("pagesize", Opt_pagesize),
83 fsparam_string("size", Opt_size),
84 fsparam_u32 ("uid", Opt_uid),
89 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
90 struct inode *inode, pgoff_t index)
92 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
96 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
98 mpol_cond_put(vma->vm_policy);
101 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
102 struct inode *inode, pgoff_t index)
106 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
111 static void huge_pagevec_release(struct pagevec *pvec)
115 for (i = 0; i < pagevec_count(pvec); ++i)
116 put_page(pvec->pages[i]);
118 pagevec_reinit(pvec);
122 * Mask used when checking the page offset value passed in via system
123 * calls. This value will be converted to a loff_t which is signed.
124 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
125 * value. The extra bit (- 1 in the shift value) is to take the sign
128 #define PGOFF_LOFFT_MAX \
129 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
131 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
133 struct inode *inode = file_inode(file);
134 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
137 struct hstate *h = hstate_file(file);
140 * vma address alignment (but not the pgoff alignment) has
141 * already been checked by prepare_hugepage_range. If you add
142 * any error returns here, do so after setting VM_HUGETLB, so
143 * is_vm_hugetlb_page tests below unmap_region go the right
144 * way when do_mmap unwinds (may be important on powerpc
147 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
148 vma->vm_ops = &hugetlb_vm_ops;
150 ret = seal_check_future_write(info->seals, vma);
155 * page based offset in vm_pgoff could be sufficiently large to
156 * overflow a loff_t when converted to byte offset. This can
157 * only happen on architectures where sizeof(loff_t) ==
158 * sizeof(unsigned long). So, only check in those instances.
160 if (sizeof(unsigned long) == sizeof(loff_t)) {
161 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
165 /* must be huge page aligned */
166 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
169 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
170 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
171 /* check for overflow */
179 if (!hugetlb_reserve_pages(inode,
180 vma->vm_pgoff >> huge_page_order(h),
181 len >> huge_page_shift(h), vma,
186 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
187 i_size_write(inode, len);
195 * Called under mmap_write_lock(mm).
199 hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
200 unsigned long len, unsigned long pgoff, unsigned long flags)
202 struct hstate *h = hstate_file(file);
203 struct vm_unmapped_area_info info;
207 info.low_limit = current->mm->mmap_base;
208 info.high_limit = arch_get_mmap_end(addr, len, flags);
209 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
210 info.align_offset = 0;
211 return vm_unmapped_area(&info);
215 hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
216 unsigned long len, unsigned long pgoff, unsigned long flags)
218 struct hstate *h = hstate_file(file);
219 struct vm_unmapped_area_info info;
221 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
223 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
224 info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
225 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
226 info.align_offset = 0;
227 addr = vm_unmapped_area(&info);
230 * A failed mmap() very likely causes application failure,
231 * so fall back to the bottom-up function here. This scenario
232 * can happen with large stack limits and large mmap()
235 if (unlikely(offset_in_page(addr))) {
236 VM_BUG_ON(addr != -ENOMEM);
238 info.low_limit = current->mm->mmap_base;
239 info.high_limit = arch_get_mmap_end(addr, len, flags);
240 addr = vm_unmapped_area(&info);
247 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
248 unsigned long len, unsigned long pgoff,
251 struct mm_struct *mm = current->mm;
252 struct vm_area_struct *vma;
253 struct hstate *h = hstate_file(file);
254 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
256 if (len & ~huge_page_mask(h))
261 if (flags & MAP_FIXED) {
262 if (prepare_hugepage_range(file, addr, len))
268 addr = ALIGN(addr, huge_page_size(h));
269 vma = find_vma(mm, addr);
270 if (mmap_end - len >= addr &&
271 (!vma || addr + len <= vm_start_gap(vma)))
276 * Use mm->get_unmapped_area value as a hint to use topdown routine.
277 * If architectures have special needs, they should define their own
278 * version of hugetlb_get_unmapped_area.
280 if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
281 return hugetlb_get_unmapped_area_topdown(file, addr, len,
283 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
287 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
289 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
290 unsigned long len, unsigned long pgoff,
293 return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
298 hugetlbfs_read_actor(struct page *page, unsigned long offset,
299 struct iov_iter *to, unsigned long size)
304 /* Find which 4k chunk and offset with in that chunk */
305 i = offset >> PAGE_SHIFT;
306 offset = offset & ~PAGE_MASK;
310 chunksize = PAGE_SIZE;
313 if (chunksize > size)
315 n = copy_page_to_iter(&page[i], offset, chunksize, to);
327 * Support for read() - Find the page attached to f_mapping and copy out the
328 * data. Its *very* similar to generic_file_buffered_read(), we can't use that
329 * since it has PAGE_SIZE assumptions.
331 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
333 struct file *file = iocb->ki_filp;
334 struct hstate *h = hstate_file(file);
335 struct address_space *mapping = file->f_mapping;
336 struct inode *inode = mapping->host;
337 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
338 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
339 unsigned long end_index;
343 while (iov_iter_count(to)) {
347 /* nr is the maximum number of bytes to copy from this page */
348 nr = huge_page_size(h);
349 isize = i_size_read(inode);
352 end_index = (isize - 1) >> huge_page_shift(h);
353 if (index > end_index)
355 if (index == end_index) {
356 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
363 page = find_lock_page(mapping, index);
364 if (unlikely(page == NULL)) {
366 * We have a HOLE, zero out the user-buffer for the
367 * length of the hole or request.
369 copied = iov_iter_zero(nr, to);
374 * We have the page, copy it to user space buffer.
376 copied = hugetlbfs_read_actor(page, offset, to, nr);
381 if (copied != nr && iov_iter_count(to)) {
386 index += offset >> huge_page_shift(h);
387 offset &= ~huge_page_mask(h);
389 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
393 static int hugetlbfs_write_begin(struct file *file,
394 struct address_space *mapping,
395 loff_t pos, unsigned len,
396 struct page **pagep, void **fsdata)
401 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
402 loff_t pos, unsigned len, unsigned copied,
403 struct page *page, void *fsdata)
409 static void remove_huge_page(struct page *page)
411 ClearPageDirty(page);
412 ClearPageUptodate(page);
413 delete_from_page_cache(page);
417 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
418 zap_flags_t zap_flags)
420 struct vm_area_struct *vma;
423 * end == 0 indicates that the entire range after start should be
424 * unmapped. Note, end is exclusive, whereas the interval tree takes
425 * an inclusive "last".
427 vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
428 unsigned long v_offset;
432 * Can the expression below overflow on 32-bit arches?
433 * No, because the interval tree returns us only those vmas
434 * which overlap the truncated area starting at pgoff,
435 * and no vma on a 32-bit arch can span beyond the 4GB.
437 if (vma->vm_pgoff < start)
438 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
445 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
447 if (v_end > vma->vm_end)
451 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
457 * remove_inode_hugepages handles two distinct cases: truncation and hole
458 * punch. There are subtle differences in operation for each case.
460 * truncation is indicated by end of range being LLONG_MAX
461 * In this case, we first scan the range and release found pages.
462 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
463 * maps and global counts. Page faults can not race with truncation
464 * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
465 * page faults in the truncated range by checking i_size. i_size is
466 * modified while holding i_mmap_rwsem.
467 * hole punch is indicated if end is not LLONG_MAX
468 * In the hole punch case we scan the range and release found pages.
469 * Only when releasing a page is the associated region/reserve map
470 * deleted. The region/reserve map for ranges without associated
471 * pages are not modified. Page faults can race with hole punch.
472 * This is indicated if we find a mapped page.
473 * Note: If the passed end of range value is beyond the end of file, but
474 * not LLONG_MAX this routine still performs a hole punch operation.
476 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
479 struct hstate *h = hstate_inode(inode);
480 struct address_space *mapping = &inode->i_data;
481 const pgoff_t start = lstart >> huge_page_shift(h);
482 const pgoff_t end = lend >> huge_page_shift(h);
486 bool truncate_op = (lend == LLONG_MAX);
492 * When no more pages are found, we are done.
494 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
497 for (i = 0; i < pagevec_count(&pvec); ++i) {
498 struct page *page = pvec.pages[i];
504 * Only need to hold the fault mutex in the
505 * hole punch case. This prevents races with
506 * page faults. Races are not possible in the
507 * case of truncation.
509 hash = hugetlb_fault_mutex_hash(mapping, index);
510 mutex_lock(&hugetlb_fault_mutex_table[hash]);
514 * If page is mapped, it was faulted in after being
515 * unmapped in caller. Unmap (again) now after taking
516 * the fault mutex. The mutex will prevent faults
517 * until we finish removing the page.
519 * This race can only happen in the hole punch case.
520 * Getting here in a truncate operation is a bug.
522 if (unlikely(page_mapped(page))) {
525 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
526 i_mmap_lock_write(mapping);
527 mutex_lock(&hugetlb_fault_mutex_table[hash]);
528 hugetlb_vmdelete_list(&mapping->i_mmap,
529 index * pages_per_huge_page(h),
530 (index + 1) * pages_per_huge_page(h),
531 ZAP_FLAG_DROP_MARKER);
532 i_mmap_unlock_write(mapping);
537 * We must free the huge page and remove from page
538 * cache (remove_huge_page) BEFORE removing the
539 * region/reserve map (hugetlb_unreserve_pages). In
540 * rare out of memory conditions, removal of the
541 * region/reserve map could fail. Correspondingly,
542 * the subpool and global reserve usage count can need
545 VM_BUG_ON(HPageRestoreReserve(page));
546 remove_huge_page(page);
549 if (unlikely(hugetlb_unreserve_pages(inode,
550 index, index + 1, 1)))
551 hugetlb_fix_reserve_counts(inode);
556 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
558 huge_pagevec_release(&pvec);
563 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
566 static void hugetlbfs_evict_inode(struct inode *inode)
568 struct resv_map *resv_map;
570 remove_inode_hugepages(inode, 0, LLONG_MAX);
573 * Get the resv_map from the address space embedded in the inode.
574 * This is the address space which points to any resv_map allocated
575 * at inode creation time. If this is a device special inode,
576 * i_mapping may not point to the original address space.
578 resv_map = (struct resv_map *)(&inode->i_data)->private_data;
579 /* Only regular and link inodes have associated reserve maps */
581 resv_map_release(&resv_map->refs);
585 static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
588 struct address_space *mapping = inode->i_mapping;
589 struct hstate *h = hstate_inode(inode);
591 BUG_ON(offset & ~huge_page_mask(h));
592 pgoff = offset >> PAGE_SHIFT;
594 i_mmap_lock_write(mapping);
595 i_size_write(inode, offset);
596 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
597 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
598 ZAP_FLAG_DROP_MARKER);
599 i_mmap_unlock_write(mapping);
600 remove_inode_hugepages(inode, offset, LLONG_MAX);
603 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
605 struct hstate *h = hstate_inode(inode);
606 loff_t hpage_size = huge_page_size(h);
607 loff_t hole_start, hole_end;
610 * For hole punch round up the beginning offset of the hole and
611 * round down the end.
613 hole_start = round_up(offset, hpage_size);
614 hole_end = round_down(offset + len, hpage_size);
616 if (hole_end > hole_start) {
617 struct address_space *mapping = inode->i_mapping;
618 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
622 /* protected by i_rwsem */
623 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
628 i_mmap_lock_write(mapping);
629 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
630 hugetlb_vmdelete_list(&mapping->i_mmap,
631 hole_start >> PAGE_SHIFT,
632 hole_end >> PAGE_SHIFT, 0);
633 i_mmap_unlock_write(mapping);
634 remove_inode_hugepages(inode, hole_start, hole_end);
641 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
644 struct inode *inode = file_inode(file);
645 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
646 struct address_space *mapping = inode->i_mapping;
647 struct hstate *h = hstate_inode(inode);
648 struct vm_area_struct pseudo_vma;
649 struct mm_struct *mm = current->mm;
650 loff_t hpage_size = huge_page_size(h);
651 unsigned long hpage_shift = huge_page_shift(h);
652 pgoff_t start, index, end;
656 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
659 if (mode & FALLOC_FL_PUNCH_HOLE)
660 return hugetlbfs_punch_hole(inode, offset, len);
663 * Default preallocate case.
664 * For this range, start is rounded down and end is rounded up
665 * as well as being converted to page offsets.
667 start = offset >> hpage_shift;
668 end = (offset + len + hpage_size - 1) >> hpage_shift;
672 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
673 error = inode_newsize_ok(inode, offset + len);
677 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
683 * Initialize a pseudo vma as this is required by the huge page
684 * allocation routines. If NUMA is configured, use page index
685 * as input to create an allocation policy.
687 vma_init(&pseudo_vma, mm);
688 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
689 pseudo_vma.vm_file = file;
691 for (index = start; index < end; index++) {
693 * This is supposed to be the vaddr where the page is being
694 * faulted in, but we have no vaddr here.
702 * fallocate(2) manpage permits EINTR; we may have been
703 * interrupted because we are using up too much memory.
705 if (signal_pending(current)) {
710 /* Set numa allocation policy based on index */
711 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
713 /* addr is the offset within the file (zero based) */
714 addr = index * hpage_size;
717 * fault mutex taken here, protects against fault path
718 * and hole punch. inode_lock previously taken protects
719 * against truncation.
721 hash = hugetlb_fault_mutex_hash(mapping, index);
722 mutex_lock(&hugetlb_fault_mutex_table[hash]);
724 /* See if already present in mapping to avoid alloc/free */
725 page = find_get_page(mapping, index);
728 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
729 hugetlb_drop_vma_policy(&pseudo_vma);
734 * Allocate page without setting the avoid_reserve argument.
735 * There certainly are no reserves associated with the
736 * pseudo_vma. However, there could be shared mappings with
737 * reserves for the file at the inode level. If we fallocate
738 * pages in these areas, we need to consume the reserves
739 * to keep reservation accounting consistent.
741 page = alloc_huge_page(&pseudo_vma, addr, 0);
742 hugetlb_drop_vma_policy(&pseudo_vma);
744 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
745 error = PTR_ERR(page);
748 clear_huge_page(page, addr, pages_per_huge_page(h));
749 __SetPageUptodate(page);
750 error = huge_add_to_page_cache(page, mapping, index);
751 if (unlikely(error)) {
752 restore_reserve_on_error(h, &pseudo_vma, addr, page);
754 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
758 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
760 SetHPageMigratable(page);
762 * unlock_page because locked by add_to_page_cache()
763 * put_page() due to reference from alloc_huge_page()
769 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
770 i_size_write(inode, offset + len);
771 inode->i_ctime = current_time(inode);
777 static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
778 struct dentry *dentry, struct iattr *attr)
780 struct inode *inode = d_inode(dentry);
781 struct hstate *h = hstate_inode(inode);
783 unsigned int ia_valid = attr->ia_valid;
784 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
786 error = setattr_prepare(&init_user_ns, dentry, attr);
790 if (ia_valid & ATTR_SIZE) {
791 loff_t oldsize = inode->i_size;
792 loff_t newsize = attr->ia_size;
794 if (newsize & ~huge_page_mask(h))
796 /* protected by i_rwsem */
797 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
798 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
800 hugetlb_vmtruncate(inode, newsize);
803 setattr_copy(&init_user_ns, inode, attr);
804 mark_inode_dirty(inode);
808 static struct inode *hugetlbfs_get_root(struct super_block *sb,
809 struct hugetlbfs_fs_context *ctx)
813 inode = new_inode(sb);
815 inode->i_ino = get_next_ino();
816 inode->i_mode = S_IFDIR | ctx->mode;
817 inode->i_uid = ctx->uid;
818 inode->i_gid = ctx->gid;
819 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
820 inode->i_op = &hugetlbfs_dir_inode_operations;
821 inode->i_fop = &simple_dir_operations;
822 /* directory inodes start off with i_nlink == 2 (for "." entry) */
824 lockdep_annotate_inode_mutex_key(inode);
830 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
831 * be taken from reclaim -- unlike regular filesystems. This needs an
832 * annotation because huge_pmd_share() does an allocation under hugetlb's
835 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
837 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
839 umode_t mode, dev_t dev)
842 struct resv_map *resv_map = NULL;
845 * Reserve maps are only needed for inodes that can have associated
848 if (S_ISREG(mode) || S_ISLNK(mode)) {
849 resv_map = resv_map_alloc();
854 inode = new_inode(sb);
856 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
858 inode->i_ino = get_next_ino();
859 inode_init_owner(&init_user_ns, inode, dir, mode);
860 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
861 &hugetlbfs_i_mmap_rwsem_key);
862 inode->i_mapping->a_ops = &hugetlbfs_aops;
863 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
864 inode->i_mapping->private_data = resv_map;
865 info->seals = F_SEAL_SEAL;
866 switch (mode & S_IFMT) {
868 init_special_inode(inode, mode, dev);
871 inode->i_op = &hugetlbfs_inode_operations;
872 inode->i_fop = &hugetlbfs_file_operations;
875 inode->i_op = &hugetlbfs_dir_inode_operations;
876 inode->i_fop = &simple_dir_operations;
878 /* directory inodes start off with i_nlink == 2 (for "." entry) */
882 inode->i_op = &page_symlink_inode_operations;
883 inode_nohighmem(inode);
886 lockdep_annotate_inode_mutex_key(inode);
889 kref_put(&resv_map->refs, resv_map_release);
896 * File creation. Allocate an inode, and we're done..
898 static int do_hugetlbfs_mknod(struct inode *dir,
899 struct dentry *dentry,
907 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
909 dir->i_ctime = dir->i_mtime = current_time(dir);
911 d_tmpfile(dentry, inode);
913 d_instantiate(dentry, inode);
914 dget(dentry);/* Extra count - pin the dentry in core */
921 static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
922 struct dentry *dentry, umode_t mode, dev_t dev)
924 return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
927 static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
928 struct dentry *dentry, umode_t mode)
930 int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
937 static int hugetlbfs_create(struct user_namespace *mnt_userns,
938 struct inode *dir, struct dentry *dentry,
939 umode_t mode, bool excl)
941 return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
944 static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
945 struct inode *dir, struct dentry *dentry,
948 return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
951 static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
952 struct inode *dir, struct dentry *dentry,
958 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
960 int l = strlen(symname)+1;
961 error = page_symlink(inode, symname, l);
963 d_instantiate(dentry, inode);
968 dir->i_ctime = dir->i_mtime = current_time(dir);
973 static int hugetlbfs_migrate_page(struct address_space *mapping,
974 struct page *newpage, struct page *page,
975 enum migrate_mode mode)
979 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
980 if (rc != MIGRATEPAGE_SUCCESS)
983 if (hugetlb_page_subpool(page)) {
984 hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
985 hugetlb_set_page_subpool(page, NULL);
988 if (mode != MIGRATE_SYNC_NO_COPY)
989 migrate_page_copy(newpage, page);
991 migrate_page_states(newpage, page);
993 return MIGRATEPAGE_SUCCESS;
996 static int hugetlbfs_error_remove_page(struct address_space *mapping,
999 struct inode *inode = mapping->host;
1000 pgoff_t index = page->index;
1002 remove_huge_page(page);
1003 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
1004 hugetlb_fix_reserve_counts(inode);
1010 * Display the mount options in /proc/mounts.
1012 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1014 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1015 struct hugepage_subpool *spool = sbinfo->spool;
1016 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1017 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1020 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1021 seq_printf(m, ",uid=%u",
1022 from_kuid_munged(&init_user_ns, sbinfo->uid));
1023 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1024 seq_printf(m, ",gid=%u",
1025 from_kgid_munged(&init_user_ns, sbinfo->gid));
1026 if (sbinfo->mode != 0755)
1027 seq_printf(m, ",mode=%o", sbinfo->mode);
1028 if (sbinfo->max_inodes != -1)
1029 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1033 if (hpage_size >= 1024) {
1037 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1039 if (spool->max_hpages != -1)
1040 seq_printf(m, ",size=%llu",
1041 (unsigned long long)spool->max_hpages << hpage_shift);
1042 if (spool->min_hpages != -1)
1043 seq_printf(m, ",min_size=%llu",
1044 (unsigned long long)spool->min_hpages << hpage_shift);
1049 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1051 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
1052 struct hstate *h = hstate_inode(d_inode(dentry));
1054 buf->f_type = HUGETLBFS_MAGIC;
1055 buf->f_bsize = huge_page_size(h);
1057 spin_lock(&sbinfo->stat_lock);
1058 /* If no limits set, just report 0 for max/free/used
1059 * blocks, like simple_statfs() */
1060 if (sbinfo->spool) {
1063 spin_lock_irq(&sbinfo->spool->lock);
1064 buf->f_blocks = sbinfo->spool->max_hpages;
1065 free_pages = sbinfo->spool->max_hpages
1066 - sbinfo->spool->used_hpages;
1067 buf->f_bavail = buf->f_bfree = free_pages;
1068 spin_unlock_irq(&sbinfo->spool->lock);
1069 buf->f_files = sbinfo->max_inodes;
1070 buf->f_ffree = sbinfo->free_inodes;
1072 spin_unlock(&sbinfo->stat_lock);
1074 buf->f_namelen = NAME_MAX;
1078 static void hugetlbfs_put_super(struct super_block *sb)
1080 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1083 sb->s_fs_info = NULL;
1086 hugepage_put_subpool(sbi->spool);
1092 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1094 if (sbinfo->free_inodes >= 0) {
1095 spin_lock(&sbinfo->stat_lock);
1096 if (unlikely(!sbinfo->free_inodes)) {
1097 spin_unlock(&sbinfo->stat_lock);
1100 sbinfo->free_inodes--;
1101 spin_unlock(&sbinfo->stat_lock);
1107 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1109 if (sbinfo->free_inodes >= 0) {
1110 spin_lock(&sbinfo->stat_lock);
1111 sbinfo->free_inodes++;
1112 spin_unlock(&sbinfo->stat_lock);
1117 static struct kmem_cache *hugetlbfs_inode_cachep;
1119 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1121 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1122 struct hugetlbfs_inode_info *p;
1124 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1126 p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
1128 hugetlbfs_inc_free_inodes(sbinfo);
1133 * Any time after allocation, hugetlbfs_destroy_inode can be called
1134 * for the inode. mpol_free_shared_policy is unconditionally called
1135 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1136 * in case of a quick call to destroy.
1138 * Note that the policy is initialized even if we are creating a
1139 * private inode. This simplifies hugetlbfs_destroy_inode.
1141 mpol_shared_policy_init(&p->policy, NULL);
1143 return &p->vfs_inode;
1146 static void hugetlbfs_free_inode(struct inode *inode)
1148 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1151 static void hugetlbfs_destroy_inode(struct inode *inode)
1153 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1154 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
1157 static const struct address_space_operations hugetlbfs_aops = {
1158 .write_begin = hugetlbfs_write_begin,
1159 .write_end = hugetlbfs_write_end,
1160 .dirty_folio = noop_dirty_folio,
1161 .migratepage = hugetlbfs_migrate_page,
1162 .error_remove_page = hugetlbfs_error_remove_page,
1166 static void init_once(void *foo)
1168 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1170 inode_init_once(&ei->vfs_inode);
1173 const struct file_operations hugetlbfs_file_operations = {
1174 .read_iter = hugetlbfs_read_iter,
1175 .mmap = hugetlbfs_file_mmap,
1176 .fsync = noop_fsync,
1177 .get_unmapped_area = hugetlb_get_unmapped_area,
1178 .llseek = default_llseek,
1179 .fallocate = hugetlbfs_fallocate,
1182 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1183 .create = hugetlbfs_create,
1184 .lookup = simple_lookup,
1185 .link = simple_link,
1186 .unlink = simple_unlink,
1187 .symlink = hugetlbfs_symlink,
1188 .mkdir = hugetlbfs_mkdir,
1189 .rmdir = simple_rmdir,
1190 .mknod = hugetlbfs_mknod,
1191 .rename = simple_rename,
1192 .setattr = hugetlbfs_setattr,
1193 .tmpfile = hugetlbfs_tmpfile,
1196 static const struct inode_operations hugetlbfs_inode_operations = {
1197 .setattr = hugetlbfs_setattr,
1200 static const struct super_operations hugetlbfs_ops = {
1201 .alloc_inode = hugetlbfs_alloc_inode,
1202 .free_inode = hugetlbfs_free_inode,
1203 .destroy_inode = hugetlbfs_destroy_inode,
1204 .evict_inode = hugetlbfs_evict_inode,
1205 .statfs = hugetlbfs_statfs,
1206 .put_super = hugetlbfs_put_super,
1207 .show_options = hugetlbfs_show_options,
1211 * Convert size option passed from command line to number of huge pages
1212 * in the pool specified by hstate. Size option could be in bytes
1213 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1216 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1217 enum hugetlbfs_size_type val_type)
1219 if (val_type == NO_SIZE)
1222 if (val_type == SIZE_PERCENT) {
1223 size_opt <<= huge_page_shift(h);
1224 size_opt *= h->max_huge_pages;
1225 do_div(size_opt, 100);
1228 size_opt >>= huge_page_shift(h);
1233 * Parse one mount parameter.
1235 static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1237 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1238 struct fs_parse_result result;
1243 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
1249 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1250 if (!uid_valid(ctx->uid))
1255 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1256 if (!gid_valid(ctx->gid))
1261 ctx->mode = result.uint_32 & 01777U;
1265 /* memparse() will accept a K/M/G without a digit */
1266 if (!isdigit(param->string[0]))
1268 ctx->max_size_opt = memparse(param->string, &rest);
1269 ctx->max_val_type = SIZE_STD;
1271 ctx->max_val_type = SIZE_PERCENT;
1275 /* memparse() will accept a K/M/G without a digit */
1276 if (!isdigit(param->string[0]))
1278 ctx->nr_inodes = memparse(param->string, &rest);
1282 ps = memparse(param->string, &rest);
1283 ctx->hstate = size_to_hstate(ps);
1285 pr_err("Unsupported page size %lu MB\n", ps >> 20);
1291 /* memparse() will accept a K/M/G without a digit */
1292 if (!isdigit(param->string[0]))
1294 ctx->min_size_opt = memparse(param->string, &rest);
1295 ctx->min_val_type = SIZE_STD;
1297 ctx->min_val_type = SIZE_PERCENT;
1305 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
1306 param->string, param->key);
1310 * Validate the parsed options.
1312 static int hugetlbfs_validate(struct fs_context *fc)
1314 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1317 * Use huge page pool size (in hstate) to convert the size
1318 * options to number of huge pages. If NO_SIZE, -1 is returned.
1320 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1323 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1328 * If max_size was specified, then min_size must be smaller
1330 if (ctx->max_val_type > NO_SIZE &&
1331 ctx->min_hpages > ctx->max_hpages) {
1332 pr_err("Minimum size can not be greater than maximum size\n");
1340 hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
1342 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1343 struct hugetlbfs_sb_info *sbinfo;
1345 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1348 sb->s_fs_info = sbinfo;
1349 spin_lock_init(&sbinfo->stat_lock);
1350 sbinfo->hstate = ctx->hstate;
1351 sbinfo->max_inodes = ctx->nr_inodes;
1352 sbinfo->free_inodes = ctx->nr_inodes;
1353 sbinfo->spool = NULL;
1354 sbinfo->uid = ctx->uid;
1355 sbinfo->gid = ctx->gid;
1356 sbinfo->mode = ctx->mode;
1359 * Allocate and initialize subpool if maximum or minimum size is
1360 * specified. Any needed reservations (for minimum size) are taken
1361 * taken when the subpool is created.
1363 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1364 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1370 sb->s_maxbytes = MAX_LFS_FILESIZE;
1371 sb->s_blocksize = huge_page_size(ctx->hstate);
1372 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
1373 sb->s_magic = HUGETLBFS_MAGIC;
1374 sb->s_op = &hugetlbfs_ops;
1375 sb->s_time_gran = 1;
1378 * Due to the special and limited functionality of hugetlbfs, it does
1379 * not work well as a stacking filesystem.
1381 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
1382 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
1387 kfree(sbinfo->spool);
1392 static int hugetlbfs_get_tree(struct fs_context *fc)
1394 int err = hugetlbfs_validate(fc);
1397 return get_tree_nodev(fc, hugetlbfs_fill_super);
1400 static void hugetlbfs_fs_context_free(struct fs_context *fc)
1402 kfree(fc->fs_private);
1405 static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1406 .free = hugetlbfs_fs_context_free,
1407 .parse_param = hugetlbfs_parse_param,
1408 .get_tree = hugetlbfs_get_tree,
1411 static int hugetlbfs_init_fs_context(struct fs_context *fc)
1413 struct hugetlbfs_fs_context *ctx;
1415 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1419 ctx->max_hpages = -1; /* No limit on size by default */
1420 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1421 ctx->uid = current_fsuid();
1422 ctx->gid = current_fsgid();
1424 ctx->hstate = &default_hstate;
1425 ctx->min_hpages = -1; /* No default minimum size */
1426 ctx->max_val_type = NO_SIZE;
1427 ctx->min_val_type = NO_SIZE;
1428 fc->fs_private = ctx;
1429 fc->ops = &hugetlbfs_fs_context_ops;
1433 static struct file_system_type hugetlbfs_fs_type = {
1434 .name = "hugetlbfs",
1435 .init_fs_context = hugetlbfs_init_fs_context,
1436 .parameters = hugetlb_fs_parameters,
1437 .kill_sb = kill_litter_super,
1440 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1442 static int can_do_hugetlb_shm(void)
1445 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1446 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1449 static int get_hstate_idx(int page_size_log)
1451 struct hstate *h = hstate_sizelog(page_size_log);
1455 return hstate_index(h);
1459 * Note that size should be aligned to proper hugepage size in caller side,
1460 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1462 struct file *hugetlb_file_setup(const char *name, size_t size,
1463 vm_flags_t acctflag, int creat_flags,
1466 struct inode *inode;
1467 struct vfsmount *mnt;
1471 hstate_idx = get_hstate_idx(page_size_log);
1473 return ERR_PTR(-ENODEV);
1475 mnt = hugetlbfs_vfsmount[hstate_idx];
1477 return ERR_PTR(-ENOENT);
1479 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1480 struct ucounts *ucounts = current_ucounts();
1482 if (user_shm_lock(size, ucounts)) {
1483 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
1484 current->comm, current->pid);
1485 user_shm_unlock(size, ucounts);
1487 return ERR_PTR(-EPERM);
1490 file = ERR_PTR(-ENOSPC);
1491 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
1494 if (creat_flags == HUGETLB_SHMFS_INODE)
1495 inode->i_flags |= S_PRIVATE;
1497 inode->i_size = size;
1500 if (!hugetlb_reserve_pages(inode, 0,
1501 size >> huge_page_shift(hstate_inode(inode)), NULL,
1503 file = ERR_PTR(-ENOMEM);
1505 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1506 &hugetlbfs_file_operations);
1515 static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1517 struct fs_context *fc;
1518 struct vfsmount *mnt;
1520 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1524 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1530 pr_err("Cannot mount internal hugetlbfs for page size %luK",
1531 huge_page_size(h) >> 10);
1535 static int __init init_hugetlbfs_fs(void)
1537 struct vfsmount *mnt;
1542 if (!hugepages_supported()) {
1543 pr_info("disabling because there are no supported hugepage sizes\n");
1548 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1549 sizeof(struct hugetlbfs_inode_info),
1550 0, SLAB_ACCOUNT, init_once);
1551 if (hugetlbfs_inode_cachep == NULL)
1554 error = register_filesystem(&hugetlbfs_fs_type);
1558 /* default hstate mount is required */
1559 mnt = mount_one_hugetlbfs(&default_hstate);
1561 error = PTR_ERR(mnt);
1564 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1566 /* other hstates are optional */
1568 for_each_hstate(h) {
1569 if (i == default_hstate_idx) {
1574 mnt = mount_one_hugetlbfs(h);
1576 hugetlbfs_vfsmount[i] = NULL;
1578 hugetlbfs_vfsmount[i] = mnt;
1585 (void)unregister_filesystem(&hugetlbfs_fs_type);
1587 kmem_cache_destroy(hugetlbfs_inode_cachep);
1591 fs_initcall(init_hugetlbfs_fs)