Merge tag 'hole_punch_for_v5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/random.h>
33 #include <linux/sched/signal.h>
34 #include <linux/export.h>
35 #include <linux/swap.h>
36 #include <linux/uio.h>
37 #include <linux/khugepaged.h>
38 #include <linux/hugetlb.h>
39 #include <linux/frontswap.h>
40 #include <linux/fs_parser.h>
41
42 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
43
44 static struct vfsmount *shm_mnt;
45
46 #ifdef CONFIG_SHMEM
47 /*
48  * This virtual memory filesystem is heavily based on the ramfs. It
49  * extends ramfs by the ability to use swap and honor resource limits
50  * which makes it a completely usable filesystem.
51  */
52
53 #include <linux/xattr.h>
54 #include <linux/exportfs.h>
55 #include <linux/posix_acl.h>
56 #include <linux/posix_acl_xattr.h>
57 #include <linux/mman.h>
58 #include <linux/string.h>
59 #include <linux/slab.h>
60 #include <linux/backing-dev.h>
61 #include <linux/shmem_fs.h>
62 #include <linux/writeback.h>
63 #include <linux/blkdev.h>
64 #include <linux/pagevec.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/swapops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/userfaultfd_k.h>
81 #include <linux/rmap.h>
82 #include <linux/uuid.h>
83
84 #include <linux/uaccess.h>
85
86 #include "internal.h"
87
88 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
89 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
90
91 /* Pretend that each entry is of this size in directory's i_size */
92 #define BOGO_DIRENT_SIZE 20
93
94 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
95 #define SHORT_SYMLINK_LEN 128
96
97 /*
98  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99  * inode->i_private (with i_rwsem making sure that it has only one user at
100  * a time): we would prefer not to enlarge the shmem inode just for that.
101  */
102 struct shmem_falloc {
103         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
104         pgoff_t start;          /* start of range currently being fallocated */
105         pgoff_t next;           /* the next page offset to be fallocated */
106         pgoff_t nr_falloced;    /* how many new pages have been fallocated */
107         pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
108 };
109
110 struct shmem_options {
111         unsigned long long blocks;
112         unsigned long long inodes;
113         struct mempolicy *mpol;
114         kuid_t uid;
115         kgid_t gid;
116         umode_t mode;
117         bool full_inums;
118         int huge;
119         int seen;
120 #define SHMEM_SEEN_BLOCKS 1
121 #define SHMEM_SEEN_INODES 2
122 #define SHMEM_SEEN_HUGE 4
123 #define SHMEM_SEEN_INUMS 8
124 };
125
126 #ifdef CONFIG_TMPFS
127 static unsigned long shmem_default_max_blocks(void)
128 {
129         return totalram_pages() / 2;
130 }
131
132 static unsigned long shmem_default_max_inodes(void)
133 {
134         unsigned long nr_pages = totalram_pages();
135
136         return min(nr_pages - totalhigh_pages(), nr_pages / 2);
137 }
138 #endif
139
140 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
142                                 struct shmem_inode_info *info, pgoff_t index);
143 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
144                              struct page **pagep, enum sgp_type sgp,
145                              gfp_t gfp, struct vm_area_struct *vma,
146                              vm_fault_t *fault_type);
147 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
148                 struct page **pagep, enum sgp_type sgp,
149                 gfp_t gfp, struct vm_area_struct *vma,
150                 struct vm_fault *vmf, vm_fault_t *fault_type);
151
152 int shmem_getpage(struct inode *inode, pgoff_t index,
153                 struct page **pagep, enum sgp_type sgp)
154 {
155         return shmem_getpage_gfp(inode, index, pagep, sgp,
156                 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
157 }
158
159 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
160 {
161         return sb->s_fs_info;
162 }
163
164 /*
165  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
166  * for shared memory and for shared anonymous (/dev/zero) mappings
167  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
168  * consistent with the pre-accounting of private mappings ...
169  */
170 static inline int shmem_acct_size(unsigned long flags, loff_t size)
171 {
172         return (flags & VM_NORESERVE) ?
173                 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
174 }
175
176 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
177 {
178         if (!(flags & VM_NORESERVE))
179                 vm_unacct_memory(VM_ACCT(size));
180 }
181
182 static inline int shmem_reacct_size(unsigned long flags,
183                 loff_t oldsize, loff_t newsize)
184 {
185         if (!(flags & VM_NORESERVE)) {
186                 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
187                         return security_vm_enough_memory_mm(current->mm,
188                                         VM_ACCT(newsize) - VM_ACCT(oldsize));
189                 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
190                         vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
191         }
192         return 0;
193 }
194
195 /*
196  * ... whereas tmpfs objects are accounted incrementally as
197  * pages are allocated, in order to allow large sparse files.
198  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
199  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
200  */
201 static inline int shmem_acct_block(unsigned long flags, long pages)
202 {
203         if (!(flags & VM_NORESERVE))
204                 return 0;
205
206         return security_vm_enough_memory_mm(current->mm,
207                         pages * VM_ACCT(PAGE_SIZE));
208 }
209
210 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
211 {
212         if (flags & VM_NORESERVE)
213                 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
214 }
215
216 static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
217 {
218         struct shmem_inode_info *info = SHMEM_I(inode);
219         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
220
221         if (shmem_acct_block(info->flags, pages))
222                 return false;
223
224         if (sbinfo->max_blocks) {
225                 if (percpu_counter_compare(&sbinfo->used_blocks,
226                                            sbinfo->max_blocks - pages) > 0)
227                         goto unacct;
228                 percpu_counter_add(&sbinfo->used_blocks, pages);
229         }
230
231         return true;
232
233 unacct:
234         shmem_unacct_blocks(info->flags, pages);
235         return false;
236 }
237
238 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
239 {
240         struct shmem_inode_info *info = SHMEM_I(inode);
241         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
242
243         if (sbinfo->max_blocks)
244                 percpu_counter_sub(&sbinfo->used_blocks, pages);
245         shmem_unacct_blocks(info->flags, pages);
246 }
247
248 static const struct super_operations shmem_ops;
249 const struct address_space_operations shmem_aops;
250 static const struct file_operations shmem_file_operations;
251 static const struct inode_operations shmem_inode_operations;
252 static const struct inode_operations shmem_dir_inode_operations;
253 static const struct inode_operations shmem_special_inode_operations;
254 static const struct vm_operations_struct shmem_vm_ops;
255 static struct file_system_type shmem_fs_type;
256
257 bool vma_is_shmem(struct vm_area_struct *vma)
258 {
259         return vma->vm_ops == &shmem_vm_ops;
260 }
261
262 static LIST_HEAD(shmem_swaplist);
263 static DEFINE_MUTEX(shmem_swaplist_mutex);
264
265 /*
266  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
267  * produces a novel ino for the newly allocated inode.
268  *
269  * It may also be called when making a hard link to permit the space needed by
270  * each dentry. However, in that case, no new inode number is needed since that
271  * internally draws from another pool of inode numbers (currently global
272  * get_next_ino()). This case is indicated by passing NULL as inop.
273  */
274 #define SHMEM_INO_BATCH 1024
275 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
276 {
277         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
278         ino_t ino;
279
280         if (!(sb->s_flags & SB_KERNMOUNT)) {
281                 spin_lock(&sbinfo->stat_lock);
282                 if (sbinfo->max_inodes) {
283                         if (!sbinfo->free_inodes) {
284                                 spin_unlock(&sbinfo->stat_lock);
285                                 return -ENOSPC;
286                         }
287                         sbinfo->free_inodes--;
288                 }
289                 if (inop) {
290                         ino = sbinfo->next_ino++;
291                         if (unlikely(is_zero_ino(ino)))
292                                 ino = sbinfo->next_ino++;
293                         if (unlikely(!sbinfo->full_inums &&
294                                      ino > UINT_MAX)) {
295                                 /*
296                                  * Emulate get_next_ino uint wraparound for
297                                  * compatibility
298                                  */
299                                 if (IS_ENABLED(CONFIG_64BIT))
300                                         pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
301                                                 __func__, MINOR(sb->s_dev));
302                                 sbinfo->next_ino = 1;
303                                 ino = sbinfo->next_ino++;
304                         }
305                         *inop = ino;
306                 }
307                 spin_unlock(&sbinfo->stat_lock);
308         } else if (inop) {
309                 /*
310                  * __shmem_file_setup, one of our callers, is lock-free: it
311                  * doesn't hold stat_lock in shmem_reserve_inode since
312                  * max_inodes is always 0, and is called from potentially
313                  * unknown contexts. As such, use a per-cpu batched allocator
314                  * which doesn't require the per-sb stat_lock unless we are at
315                  * the batch boundary.
316                  *
317                  * We don't need to worry about inode{32,64} since SB_KERNMOUNT
318                  * shmem mounts are not exposed to userspace, so we don't need
319                  * to worry about things like glibc compatibility.
320                  */
321                 ino_t *next_ino;
322                 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
323                 ino = *next_ino;
324                 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
325                         spin_lock(&sbinfo->stat_lock);
326                         ino = sbinfo->next_ino;
327                         sbinfo->next_ino += SHMEM_INO_BATCH;
328                         spin_unlock(&sbinfo->stat_lock);
329                         if (unlikely(is_zero_ino(ino)))
330                                 ino++;
331                 }
332                 *inop = ino;
333                 *next_ino = ++ino;
334                 put_cpu();
335         }
336
337         return 0;
338 }
339
340 static void shmem_free_inode(struct super_block *sb)
341 {
342         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
343         if (sbinfo->max_inodes) {
344                 spin_lock(&sbinfo->stat_lock);
345                 sbinfo->free_inodes++;
346                 spin_unlock(&sbinfo->stat_lock);
347         }
348 }
349
350 /**
351  * shmem_recalc_inode - recalculate the block usage of an inode
352  * @inode: inode to recalc
353  *
354  * We have to calculate the free blocks since the mm can drop
355  * undirtied hole pages behind our back.
356  *
357  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
358  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
359  *
360  * It has to be called with the spinlock held.
361  */
362 static void shmem_recalc_inode(struct inode *inode)
363 {
364         struct shmem_inode_info *info = SHMEM_I(inode);
365         long freed;
366
367         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
368         if (freed > 0) {
369                 info->alloced -= freed;
370                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
371                 shmem_inode_unacct_blocks(inode, freed);
372         }
373 }
374
375 bool shmem_charge(struct inode *inode, long pages)
376 {
377         struct shmem_inode_info *info = SHMEM_I(inode);
378         unsigned long flags;
379
380         if (!shmem_inode_acct_block(inode, pages))
381                 return false;
382
383         /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
384         inode->i_mapping->nrpages += pages;
385
386         spin_lock_irqsave(&info->lock, flags);
387         info->alloced += pages;
388         inode->i_blocks += pages * BLOCKS_PER_PAGE;
389         shmem_recalc_inode(inode);
390         spin_unlock_irqrestore(&info->lock, flags);
391
392         return true;
393 }
394
395 void shmem_uncharge(struct inode *inode, long pages)
396 {
397         struct shmem_inode_info *info = SHMEM_I(inode);
398         unsigned long flags;
399
400         /* nrpages adjustment done by __delete_from_page_cache() or caller */
401
402         spin_lock_irqsave(&info->lock, flags);
403         info->alloced -= pages;
404         inode->i_blocks -= pages * BLOCKS_PER_PAGE;
405         shmem_recalc_inode(inode);
406         spin_unlock_irqrestore(&info->lock, flags);
407
408         shmem_inode_unacct_blocks(inode, pages);
409 }
410
411 /*
412  * Replace item expected in xarray by a new item, while holding xa_lock.
413  */
414 static int shmem_replace_entry(struct address_space *mapping,
415                         pgoff_t index, void *expected, void *replacement)
416 {
417         XA_STATE(xas, &mapping->i_pages, index);
418         void *item;
419
420         VM_BUG_ON(!expected);
421         VM_BUG_ON(!replacement);
422         item = xas_load(&xas);
423         if (item != expected)
424                 return -ENOENT;
425         xas_store(&xas, replacement);
426         return 0;
427 }
428
429 /*
430  * Sometimes, before we decide whether to proceed or to fail, we must check
431  * that an entry was not already brought back from swap by a racing thread.
432  *
433  * Checking page is not enough: by the time a SwapCache page is locked, it
434  * might be reused, and again be SwapCache, using the same swap as before.
435  */
436 static bool shmem_confirm_swap(struct address_space *mapping,
437                                pgoff_t index, swp_entry_t swap)
438 {
439         return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
440 }
441
442 /*
443  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
444  *
445  * SHMEM_HUGE_NEVER:
446  *      disables huge pages for the mount;
447  * SHMEM_HUGE_ALWAYS:
448  *      enables huge pages for the mount;
449  * SHMEM_HUGE_WITHIN_SIZE:
450  *      only allocate huge pages if the page will be fully within i_size,
451  *      also respect fadvise()/madvise() hints;
452  * SHMEM_HUGE_ADVISE:
453  *      only allocate huge pages if requested with fadvise()/madvise();
454  */
455
456 #define SHMEM_HUGE_NEVER        0
457 #define SHMEM_HUGE_ALWAYS       1
458 #define SHMEM_HUGE_WITHIN_SIZE  2
459 #define SHMEM_HUGE_ADVISE       3
460
461 /*
462  * Special values.
463  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
464  *
465  * SHMEM_HUGE_DENY:
466  *      disables huge on shm_mnt and all mounts, for emergency use;
467  * SHMEM_HUGE_FORCE:
468  *      enables huge on shm_mnt and all mounts, w/o needing option, for testing;
469  *
470  */
471 #define SHMEM_HUGE_DENY         (-1)
472 #define SHMEM_HUGE_FORCE        (-2)
473
474 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
475 /* ifdef here to avoid bloating shmem.o when not necessary */
476
477 static int shmem_huge __read_mostly;
478
479 #if defined(CONFIG_SYSFS)
480 static int shmem_parse_huge(const char *str)
481 {
482         if (!strcmp(str, "never"))
483                 return SHMEM_HUGE_NEVER;
484         if (!strcmp(str, "always"))
485                 return SHMEM_HUGE_ALWAYS;
486         if (!strcmp(str, "within_size"))
487                 return SHMEM_HUGE_WITHIN_SIZE;
488         if (!strcmp(str, "advise"))
489                 return SHMEM_HUGE_ADVISE;
490         if (!strcmp(str, "deny"))
491                 return SHMEM_HUGE_DENY;
492         if (!strcmp(str, "force"))
493                 return SHMEM_HUGE_FORCE;
494         return -EINVAL;
495 }
496 #endif
497
498 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
499 static const char *shmem_format_huge(int huge)
500 {
501         switch (huge) {
502         case SHMEM_HUGE_NEVER:
503                 return "never";
504         case SHMEM_HUGE_ALWAYS:
505                 return "always";
506         case SHMEM_HUGE_WITHIN_SIZE:
507                 return "within_size";
508         case SHMEM_HUGE_ADVISE:
509                 return "advise";
510         case SHMEM_HUGE_DENY:
511                 return "deny";
512         case SHMEM_HUGE_FORCE:
513                 return "force";
514         default:
515                 VM_BUG_ON(1);
516                 return "bad_val";
517         }
518 }
519 #endif
520
521 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
522                 struct shrink_control *sc, unsigned long nr_to_split)
523 {
524         LIST_HEAD(list), *pos, *next;
525         LIST_HEAD(to_remove);
526         struct inode *inode;
527         struct shmem_inode_info *info;
528         struct page *page;
529         unsigned long batch = sc ? sc->nr_to_scan : 128;
530         int removed = 0, split = 0;
531
532         if (list_empty(&sbinfo->shrinklist))
533                 return SHRINK_STOP;
534
535         spin_lock(&sbinfo->shrinklist_lock);
536         list_for_each_safe(pos, next, &sbinfo->shrinklist) {
537                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
538
539                 /* pin the inode */
540                 inode = igrab(&info->vfs_inode);
541
542                 /* inode is about to be evicted */
543                 if (!inode) {
544                         list_del_init(&info->shrinklist);
545                         removed++;
546                         goto next;
547                 }
548
549                 /* Check if there's anything to gain */
550                 if (round_up(inode->i_size, PAGE_SIZE) ==
551                                 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
552                         list_move(&info->shrinklist, &to_remove);
553                         removed++;
554                         goto next;
555                 }
556
557                 list_move(&info->shrinklist, &list);
558 next:
559                 if (!--batch)
560                         break;
561         }
562         spin_unlock(&sbinfo->shrinklist_lock);
563
564         list_for_each_safe(pos, next, &to_remove) {
565                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
566                 inode = &info->vfs_inode;
567                 list_del_init(&info->shrinklist);
568                 iput(inode);
569         }
570
571         list_for_each_safe(pos, next, &list) {
572                 int ret;
573
574                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
575                 inode = &info->vfs_inode;
576
577                 if (nr_to_split && split >= nr_to_split)
578                         goto leave;
579
580                 page = find_get_page(inode->i_mapping,
581                                 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
582                 if (!page)
583                         goto drop;
584
585                 /* No huge page at the end of the file: nothing to split */
586                 if (!PageTransHuge(page)) {
587                         put_page(page);
588                         goto drop;
589                 }
590
591                 /*
592                  * Leave the inode on the list if we failed to lock
593                  * the page at this time.
594                  *
595                  * Waiting for the lock may lead to deadlock in the
596                  * reclaim path.
597                  */
598                 if (!trylock_page(page)) {
599                         put_page(page);
600                         goto leave;
601                 }
602
603                 ret = split_huge_page(page);
604                 unlock_page(page);
605                 put_page(page);
606
607                 /* If split failed leave the inode on the list */
608                 if (ret)
609                         goto leave;
610
611                 split++;
612 drop:
613                 list_del_init(&info->shrinklist);
614                 removed++;
615 leave:
616                 iput(inode);
617         }
618
619         spin_lock(&sbinfo->shrinklist_lock);
620         list_splice_tail(&list, &sbinfo->shrinklist);
621         sbinfo->shrinklist_len -= removed;
622         spin_unlock(&sbinfo->shrinklist_lock);
623
624         return split;
625 }
626
627 static long shmem_unused_huge_scan(struct super_block *sb,
628                 struct shrink_control *sc)
629 {
630         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
631
632         if (!READ_ONCE(sbinfo->shrinklist_len))
633                 return SHRINK_STOP;
634
635         return shmem_unused_huge_shrink(sbinfo, sc, 0);
636 }
637
638 static long shmem_unused_huge_count(struct super_block *sb,
639                 struct shrink_control *sc)
640 {
641         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
642         return READ_ONCE(sbinfo->shrinklist_len);
643 }
644 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
645
646 #define shmem_huge SHMEM_HUGE_DENY
647
648 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
649                 struct shrink_control *sc, unsigned long nr_to_split)
650 {
651         return 0;
652 }
653 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
654
655 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
656 {
657         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
658             (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
659             shmem_huge != SHMEM_HUGE_DENY)
660                 return true;
661         return false;
662 }
663
664 /*
665  * Like add_to_page_cache_locked, but error if expected item has gone.
666  */
667 static int shmem_add_to_page_cache(struct page *page,
668                                    struct address_space *mapping,
669                                    pgoff_t index, void *expected, gfp_t gfp,
670                                    struct mm_struct *charge_mm)
671 {
672         XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
673         unsigned long i = 0;
674         unsigned long nr = compound_nr(page);
675         int error;
676
677         VM_BUG_ON_PAGE(PageTail(page), page);
678         VM_BUG_ON_PAGE(index != round_down(index, nr), page);
679         VM_BUG_ON_PAGE(!PageLocked(page), page);
680         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
681         VM_BUG_ON(expected && PageTransHuge(page));
682
683         page_ref_add(page, nr);
684         page->mapping = mapping;
685         page->index = index;
686
687         if (!PageSwapCache(page)) {
688                 error = mem_cgroup_charge(page, charge_mm, gfp);
689                 if (error) {
690                         if (PageTransHuge(page)) {
691                                 count_vm_event(THP_FILE_FALLBACK);
692                                 count_vm_event(THP_FILE_FALLBACK_CHARGE);
693                         }
694                         goto error;
695                 }
696         }
697         cgroup_throttle_swaprate(page, gfp);
698
699         do {
700                 void *entry;
701                 xas_lock_irq(&xas);
702                 entry = xas_find_conflict(&xas);
703                 if (entry != expected)
704                         xas_set_err(&xas, -EEXIST);
705                 xas_create_range(&xas);
706                 if (xas_error(&xas))
707                         goto unlock;
708 next:
709                 xas_store(&xas, page);
710                 if (++i < nr) {
711                         xas_next(&xas);
712                         goto next;
713                 }
714                 if (PageTransHuge(page)) {
715                         count_vm_event(THP_FILE_ALLOC);
716                         __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
717                 }
718                 mapping->nrpages += nr;
719                 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
720                 __mod_lruvec_page_state(page, NR_SHMEM, nr);
721 unlock:
722                 xas_unlock_irq(&xas);
723         } while (xas_nomem(&xas, gfp));
724
725         if (xas_error(&xas)) {
726                 error = xas_error(&xas);
727                 goto error;
728         }
729
730         return 0;
731 error:
732         page->mapping = NULL;
733         page_ref_sub(page, nr);
734         return error;
735 }
736
737 /*
738  * Like delete_from_page_cache, but substitutes swap for page.
739  */
740 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
741 {
742         struct address_space *mapping = page->mapping;
743         int error;
744
745         VM_BUG_ON_PAGE(PageCompound(page), page);
746
747         xa_lock_irq(&mapping->i_pages);
748         error = shmem_replace_entry(mapping, page->index, page, radswap);
749         page->mapping = NULL;
750         mapping->nrpages--;
751         __dec_lruvec_page_state(page, NR_FILE_PAGES);
752         __dec_lruvec_page_state(page, NR_SHMEM);
753         xa_unlock_irq(&mapping->i_pages);
754         put_page(page);
755         BUG_ON(error);
756 }
757
758 /*
759  * Remove swap entry from page cache, free the swap and its page cache.
760  */
761 static int shmem_free_swap(struct address_space *mapping,
762                            pgoff_t index, void *radswap)
763 {
764         void *old;
765
766         old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
767         if (old != radswap)
768                 return -ENOENT;
769         free_swap_and_cache(radix_to_swp_entry(radswap));
770         return 0;
771 }
772
773 /*
774  * Determine (in bytes) how many of the shmem object's pages mapped by the
775  * given offsets are swapped out.
776  *
777  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
778  * as long as the inode doesn't go away and racy results are not a problem.
779  */
780 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
781                                                 pgoff_t start, pgoff_t end)
782 {
783         XA_STATE(xas, &mapping->i_pages, start);
784         struct page *page;
785         unsigned long swapped = 0;
786
787         rcu_read_lock();
788         xas_for_each(&xas, page, end - 1) {
789                 if (xas_retry(&xas, page))
790                         continue;
791                 if (xa_is_value(page))
792                         swapped++;
793
794                 if (need_resched()) {
795                         xas_pause(&xas);
796                         cond_resched_rcu();
797                 }
798         }
799
800         rcu_read_unlock();
801
802         return swapped << PAGE_SHIFT;
803 }
804
805 /*
806  * Determine (in bytes) how many of the shmem object's pages mapped by the
807  * given vma is swapped out.
808  *
809  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
810  * as long as the inode doesn't go away and racy results are not a problem.
811  */
812 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
813 {
814         struct inode *inode = file_inode(vma->vm_file);
815         struct shmem_inode_info *info = SHMEM_I(inode);
816         struct address_space *mapping = inode->i_mapping;
817         unsigned long swapped;
818
819         /* Be careful as we don't hold info->lock */
820         swapped = READ_ONCE(info->swapped);
821
822         /*
823          * The easier cases are when the shmem object has nothing in swap, or
824          * the vma maps it whole. Then we can simply use the stats that we
825          * already track.
826          */
827         if (!swapped)
828                 return 0;
829
830         if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
831                 return swapped << PAGE_SHIFT;
832
833         /* Here comes the more involved part */
834         return shmem_partial_swap_usage(mapping,
835                         linear_page_index(vma, vma->vm_start),
836                         linear_page_index(vma, vma->vm_end));
837 }
838
839 /*
840  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
841  */
842 void shmem_unlock_mapping(struct address_space *mapping)
843 {
844         struct pagevec pvec;
845         pgoff_t index = 0;
846
847         pagevec_init(&pvec);
848         /*
849          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
850          */
851         while (!mapping_unevictable(mapping)) {
852                 if (!pagevec_lookup(&pvec, mapping, &index))
853                         break;
854                 check_move_unevictable_pages(&pvec);
855                 pagevec_release(&pvec);
856                 cond_resched();
857         }
858 }
859
860 /*
861  * Check whether a hole-punch or truncation needs to split a huge page,
862  * returning true if no split was required, or the split has been successful.
863  *
864  * Eviction (or truncation to 0 size) should never need to split a huge page;
865  * but in rare cases might do so, if shmem_undo_range() failed to trylock on
866  * head, and then succeeded to trylock on tail.
867  *
868  * A split can only succeed when there are no additional references on the
869  * huge page: so the split below relies upon find_get_entries() having stopped
870  * when it found a subpage of the huge page, without getting further references.
871  */
872 static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
873 {
874         if (!PageTransCompound(page))
875                 return true;
876
877         /* Just proceed to delete a huge page wholly within the range punched */
878         if (PageHead(page) &&
879             page->index >= start && page->index + HPAGE_PMD_NR <= end)
880                 return true;
881
882         /* Try to split huge page, so we can truly punch the hole or truncate */
883         return split_huge_page(page) >= 0;
884 }
885
886 /*
887  * Remove range of pages and swap entries from page cache, and free them.
888  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
889  */
890 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
891                                                                  bool unfalloc)
892 {
893         struct address_space *mapping = inode->i_mapping;
894         struct shmem_inode_info *info = SHMEM_I(inode);
895         pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
896         pgoff_t end = (lend + 1) >> PAGE_SHIFT;
897         unsigned int partial_start = lstart & (PAGE_SIZE - 1);
898         unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
899         struct pagevec pvec;
900         pgoff_t indices[PAGEVEC_SIZE];
901         long nr_swaps_freed = 0;
902         pgoff_t index;
903         int i;
904
905         if (lend == -1)
906                 end = -1;       /* unsigned, so actually very big */
907
908         pagevec_init(&pvec);
909         index = start;
910         while (index < end && find_lock_entries(mapping, index, end - 1,
911                         &pvec, indices)) {
912                 for (i = 0; i < pagevec_count(&pvec); i++) {
913                         struct page *page = pvec.pages[i];
914
915                         index = indices[i];
916
917                         if (xa_is_value(page)) {
918                                 if (unfalloc)
919                                         continue;
920                                 nr_swaps_freed += !shmem_free_swap(mapping,
921                                                                 index, page);
922                                 continue;
923                         }
924                         index += thp_nr_pages(page) - 1;
925
926                         if (!unfalloc || !PageUptodate(page))
927                                 truncate_inode_page(mapping, page);
928                         unlock_page(page);
929                 }
930                 pagevec_remove_exceptionals(&pvec);
931                 pagevec_release(&pvec);
932                 cond_resched();
933                 index++;
934         }
935
936         if (partial_start) {
937                 struct page *page = NULL;
938                 shmem_getpage(inode, start - 1, &page, SGP_READ);
939                 if (page) {
940                         unsigned int top = PAGE_SIZE;
941                         if (start > end) {
942                                 top = partial_end;
943                                 partial_end = 0;
944                         }
945                         zero_user_segment(page, partial_start, top);
946                         set_page_dirty(page);
947                         unlock_page(page);
948                         put_page(page);
949                 }
950         }
951         if (partial_end) {
952                 struct page *page = NULL;
953                 shmem_getpage(inode, end, &page, SGP_READ);
954                 if (page) {
955                         zero_user_segment(page, 0, partial_end);
956                         set_page_dirty(page);
957                         unlock_page(page);
958                         put_page(page);
959                 }
960         }
961         if (start >= end)
962                 return;
963
964         index = start;
965         while (index < end) {
966                 cond_resched();
967
968                 if (!find_get_entries(mapping, index, end - 1, &pvec,
969                                 indices)) {
970                         /* If all gone or hole-punch or unfalloc, we're done */
971                         if (index == start || end != -1)
972                                 break;
973                         /* But if truncating, restart to make sure all gone */
974                         index = start;
975                         continue;
976                 }
977                 for (i = 0; i < pagevec_count(&pvec); i++) {
978                         struct page *page = pvec.pages[i];
979
980                         index = indices[i];
981                         if (xa_is_value(page)) {
982                                 if (unfalloc)
983                                         continue;
984                                 if (shmem_free_swap(mapping, index, page)) {
985                                         /* Swap was replaced by page: retry */
986                                         index--;
987                                         break;
988                                 }
989                                 nr_swaps_freed++;
990                                 continue;
991                         }
992
993                         lock_page(page);
994
995                         if (!unfalloc || !PageUptodate(page)) {
996                                 if (page_mapping(page) != mapping) {
997                                         /* Page was replaced by swap: retry */
998                                         unlock_page(page);
999                                         index--;
1000                                         break;
1001                                 }
1002                                 VM_BUG_ON_PAGE(PageWriteback(page), page);
1003                                 if (shmem_punch_compound(page, start, end))
1004                                         truncate_inode_page(mapping, page);
1005                                 else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1006                                         /* Wipe the page and don't get stuck */
1007                                         clear_highpage(page);
1008                                         flush_dcache_page(page);
1009                                         set_page_dirty(page);
1010                                         if (index <
1011                                             round_up(start, HPAGE_PMD_NR))
1012                                                 start = index + 1;
1013                                 }
1014                         }
1015                         unlock_page(page);
1016                 }
1017                 pagevec_remove_exceptionals(&pvec);
1018                 pagevec_release(&pvec);
1019                 index++;
1020         }
1021
1022         spin_lock_irq(&info->lock);
1023         info->swapped -= nr_swaps_freed;
1024         shmem_recalc_inode(inode);
1025         spin_unlock_irq(&info->lock);
1026 }
1027
1028 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1029 {
1030         shmem_undo_range(inode, lstart, lend, false);
1031         inode->i_ctime = inode->i_mtime = current_time(inode);
1032 }
1033 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1034
1035 static int shmem_getattr(struct user_namespace *mnt_userns,
1036                          const struct path *path, struct kstat *stat,
1037                          u32 request_mask, unsigned int query_flags)
1038 {
1039         struct inode *inode = path->dentry->d_inode;
1040         struct shmem_inode_info *info = SHMEM_I(inode);
1041         struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1042
1043         if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1044                 spin_lock_irq(&info->lock);
1045                 shmem_recalc_inode(inode);
1046                 spin_unlock_irq(&info->lock);
1047         }
1048         generic_fillattr(&init_user_ns, inode, stat);
1049
1050         if (is_huge_enabled(sb_info))
1051                 stat->blksize = HPAGE_PMD_SIZE;
1052
1053         return 0;
1054 }
1055
1056 static int shmem_setattr(struct user_namespace *mnt_userns,
1057                          struct dentry *dentry, struct iattr *attr)
1058 {
1059         struct inode *inode = d_inode(dentry);
1060         struct shmem_inode_info *info = SHMEM_I(inode);
1061         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1062         int error;
1063
1064         error = setattr_prepare(&init_user_ns, dentry, attr);
1065         if (error)
1066                 return error;
1067
1068         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1069                 loff_t oldsize = inode->i_size;
1070                 loff_t newsize = attr->ia_size;
1071
1072                 /* protected by i_rwsem */
1073                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1074                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1075                         return -EPERM;
1076
1077                 if (newsize != oldsize) {
1078                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
1079                                         oldsize, newsize);
1080                         if (error)
1081                                 return error;
1082                         i_size_write(inode, newsize);
1083                         inode->i_ctime = inode->i_mtime = current_time(inode);
1084                 }
1085                 if (newsize <= oldsize) {
1086                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
1087                         if (oldsize > holebegin)
1088                                 unmap_mapping_range(inode->i_mapping,
1089                                                         holebegin, 0, 1);
1090                         if (info->alloced)
1091                                 shmem_truncate_range(inode,
1092                                                         newsize, (loff_t)-1);
1093                         /* unmap again to remove racily COWed private pages */
1094                         if (oldsize > holebegin)
1095                                 unmap_mapping_range(inode->i_mapping,
1096                                                         holebegin, 0, 1);
1097
1098                         /*
1099                          * Part of the huge page can be beyond i_size: subject
1100                          * to shrink under memory pressure.
1101                          */
1102                         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1103                                 spin_lock(&sbinfo->shrinklist_lock);
1104                                 /*
1105                                  * _careful to defend against unlocked access to
1106                                  * ->shrink_list in shmem_unused_huge_shrink()
1107                                  */
1108                                 if (list_empty_careful(&info->shrinklist)) {
1109                                         list_add_tail(&info->shrinklist,
1110                                                         &sbinfo->shrinklist);
1111                                         sbinfo->shrinklist_len++;
1112                                 }
1113                                 spin_unlock(&sbinfo->shrinklist_lock);
1114                         }
1115                 }
1116         }
1117
1118         setattr_copy(&init_user_ns, inode, attr);
1119         if (attr->ia_valid & ATTR_MODE)
1120                 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
1121         return error;
1122 }
1123
1124 static void shmem_evict_inode(struct inode *inode)
1125 {
1126         struct shmem_inode_info *info = SHMEM_I(inode);
1127         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1128
1129         if (shmem_mapping(inode->i_mapping)) {
1130                 shmem_unacct_size(info->flags, inode->i_size);
1131                 inode->i_size = 0;
1132                 shmem_truncate_range(inode, 0, (loff_t)-1);
1133                 if (!list_empty(&info->shrinklist)) {
1134                         spin_lock(&sbinfo->shrinklist_lock);
1135                         if (!list_empty(&info->shrinklist)) {
1136                                 list_del_init(&info->shrinklist);
1137                                 sbinfo->shrinklist_len--;
1138                         }
1139                         spin_unlock(&sbinfo->shrinklist_lock);
1140                 }
1141                 while (!list_empty(&info->swaplist)) {
1142                         /* Wait while shmem_unuse() is scanning this inode... */
1143                         wait_var_event(&info->stop_eviction,
1144                                        !atomic_read(&info->stop_eviction));
1145                         mutex_lock(&shmem_swaplist_mutex);
1146                         /* ...but beware of the race if we peeked too early */
1147                         if (!atomic_read(&info->stop_eviction))
1148                                 list_del_init(&info->swaplist);
1149                         mutex_unlock(&shmem_swaplist_mutex);
1150                 }
1151         }
1152
1153         simple_xattrs_free(&info->xattrs);
1154         WARN_ON(inode->i_blocks);
1155         shmem_free_inode(inode->i_sb);
1156         clear_inode(inode);
1157 }
1158
1159 extern struct swap_info_struct *swap_info[];
1160
1161 static int shmem_find_swap_entries(struct address_space *mapping,
1162                                    pgoff_t start, unsigned int nr_entries,
1163                                    struct page **entries, pgoff_t *indices,
1164                                    unsigned int type, bool frontswap)
1165 {
1166         XA_STATE(xas, &mapping->i_pages, start);
1167         struct page *page;
1168         swp_entry_t entry;
1169         unsigned int ret = 0;
1170
1171         if (!nr_entries)
1172                 return 0;
1173
1174         rcu_read_lock();
1175         xas_for_each(&xas, page, ULONG_MAX) {
1176                 if (xas_retry(&xas, page))
1177                         continue;
1178
1179                 if (!xa_is_value(page))
1180                         continue;
1181
1182                 entry = radix_to_swp_entry(page);
1183                 if (swp_type(entry) != type)
1184                         continue;
1185                 if (frontswap &&
1186                     !frontswap_test(swap_info[type], swp_offset(entry)))
1187                         continue;
1188
1189                 indices[ret] = xas.xa_index;
1190                 entries[ret] = page;
1191
1192                 if (need_resched()) {
1193                         xas_pause(&xas);
1194                         cond_resched_rcu();
1195                 }
1196                 if (++ret == nr_entries)
1197                         break;
1198         }
1199         rcu_read_unlock();
1200
1201         return ret;
1202 }
1203
1204 /*
1205  * Move the swapped pages for an inode to page cache. Returns the count
1206  * of pages swapped in, or the error in case of failure.
1207  */
1208 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1209                                     pgoff_t *indices)
1210 {
1211         int i = 0;
1212         int ret = 0;
1213         int error = 0;
1214         struct address_space *mapping = inode->i_mapping;
1215
1216         for (i = 0; i < pvec.nr; i++) {
1217                 struct page *page = pvec.pages[i];
1218
1219                 if (!xa_is_value(page))
1220                         continue;
1221                 error = shmem_swapin_page(inode, indices[i],
1222                                           &page, SGP_CACHE,
1223                                           mapping_gfp_mask(mapping),
1224                                           NULL, NULL);
1225                 if (error == 0) {
1226                         unlock_page(page);
1227                         put_page(page);
1228                         ret++;
1229                 }
1230                 if (error == -ENOMEM)
1231                         break;
1232                 error = 0;
1233         }
1234         return error ? error : ret;
1235 }
1236
1237 /*
1238  * If swap found in inode, free it and move page from swapcache to filecache.
1239  */
1240 static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1241                              bool frontswap, unsigned long *fs_pages_to_unuse)
1242 {
1243         struct address_space *mapping = inode->i_mapping;
1244         pgoff_t start = 0;
1245         struct pagevec pvec;
1246         pgoff_t indices[PAGEVEC_SIZE];
1247         bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1248         int ret = 0;
1249
1250         pagevec_init(&pvec);
1251         do {
1252                 unsigned int nr_entries = PAGEVEC_SIZE;
1253
1254                 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1255                         nr_entries = *fs_pages_to_unuse;
1256
1257                 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1258                                                   pvec.pages, indices,
1259                                                   type, frontswap);
1260                 if (pvec.nr == 0) {
1261                         ret = 0;
1262                         break;
1263                 }
1264
1265                 ret = shmem_unuse_swap_entries(inode, pvec, indices);
1266                 if (ret < 0)
1267                         break;
1268
1269                 if (frontswap_partial) {
1270                         *fs_pages_to_unuse -= ret;
1271                         if (*fs_pages_to_unuse == 0) {
1272                                 ret = FRONTSWAP_PAGES_UNUSED;
1273                                 break;
1274                         }
1275                 }
1276
1277                 start = indices[pvec.nr - 1];
1278         } while (true);
1279
1280         return ret;
1281 }
1282
1283 /*
1284  * Read all the shared memory data that resides in the swap
1285  * device 'type' back into memory, so the swap device can be
1286  * unused.
1287  */
1288 int shmem_unuse(unsigned int type, bool frontswap,
1289                 unsigned long *fs_pages_to_unuse)
1290 {
1291         struct shmem_inode_info *info, *next;
1292         int error = 0;
1293
1294         if (list_empty(&shmem_swaplist))
1295                 return 0;
1296
1297         mutex_lock(&shmem_swaplist_mutex);
1298         list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1299                 if (!info->swapped) {
1300                         list_del_init(&info->swaplist);
1301                         continue;
1302                 }
1303                 /*
1304                  * Drop the swaplist mutex while searching the inode for swap;
1305                  * but before doing so, make sure shmem_evict_inode() will not
1306                  * remove placeholder inode from swaplist, nor let it be freed
1307                  * (igrab() would protect from unlink, but not from unmount).
1308                  */
1309                 atomic_inc(&info->stop_eviction);
1310                 mutex_unlock(&shmem_swaplist_mutex);
1311
1312                 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1313                                           fs_pages_to_unuse);
1314                 cond_resched();
1315
1316                 mutex_lock(&shmem_swaplist_mutex);
1317                 next = list_next_entry(info, swaplist);
1318                 if (!info->swapped)
1319                         list_del_init(&info->swaplist);
1320                 if (atomic_dec_and_test(&info->stop_eviction))
1321                         wake_up_var(&info->stop_eviction);
1322                 if (error)
1323                         break;
1324         }
1325         mutex_unlock(&shmem_swaplist_mutex);
1326
1327         return error;
1328 }
1329
1330 /*
1331  * Move the page from the page cache to the swap cache.
1332  */
1333 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1334 {
1335         struct shmem_inode_info *info;
1336         struct address_space *mapping;
1337         struct inode *inode;
1338         swp_entry_t swap;
1339         pgoff_t index;
1340
1341         VM_BUG_ON_PAGE(PageCompound(page), page);
1342         BUG_ON(!PageLocked(page));
1343         mapping = page->mapping;
1344         index = page->index;
1345         inode = mapping->host;
1346         info = SHMEM_I(inode);
1347         if (info->flags & VM_LOCKED)
1348                 goto redirty;
1349         if (!total_swap_pages)
1350                 goto redirty;
1351
1352         /*
1353          * Our capabilities prevent regular writeback or sync from ever calling
1354          * shmem_writepage; but a stacking filesystem might use ->writepage of
1355          * its underlying filesystem, in which case tmpfs should write out to
1356          * swap only in response to memory pressure, and not for the writeback
1357          * threads or sync.
1358          */
1359         if (!wbc->for_reclaim) {
1360                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
1361                 goto redirty;
1362         }
1363
1364         /*
1365          * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1366          * value into swapfile.c, the only way we can correctly account for a
1367          * fallocated page arriving here is now to initialize it and write it.
1368          *
1369          * That's okay for a page already fallocated earlier, but if we have
1370          * not yet completed the fallocation, then (a) we want to keep track
1371          * of this page in case we have to undo it, and (b) it may not be a
1372          * good idea to continue anyway, once we're pushing into swap.  So
1373          * reactivate the page, and let shmem_fallocate() quit when too many.
1374          */
1375         if (!PageUptodate(page)) {
1376                 if (inode->i_private) {
1377                         struct shmem_falloc *shmem_falloc;
1378                         spin_lock(&inode->i_lock);
1379                         shmem_falloc = inode->i_private;
1380                         if (shmem_falloc &&
1381                             !shmem_falloc->waitq &&
1382                             index >= shmem_falloc->start &&
1383                             index < shmem_falloc->next)
1384                                 shmem_falloc->nr_unswapped++;
1385                         else
1386                                 shmem_falloc = NULL;
1387                         spin_unlock(&inode->i_lock);
1388                         if (shmem_falloc)
1389                                 goto redirty;
1390                 }
1391                 clear_highpage(page);
1392                 flush_dcache_page(page);
1393                 SetPageUptodate(page);
1394         }
1395
1396         swap = get_swap_page(page);
1397         if (!swap.val)
1398                 goto redirty;
1399
1400         /*
1401          * Add inode to shmem_unuse()'s list of swapped-out inodes,
1402          * if it's not already there.  Do it now before the page is
1403          * moved to swap cache, when its pagelock no longer protects
1404          * the inode from eviction.  But don't unlock the mutex until
1405          * we've incremented swapped, because shmem_unuse_inode() will
1406          * prune a !swapped inode from the swaplist under this mutex.
1407          */
1408         mutex_lock(&shmem_swaplist_mutex);
1409         if (list_empty(&info->swaplist))
1410                 list_add(&info->swaplist, &shmem_swaplist);
1411
1412         if (add_to_swap_cache(page, swap,
1413                         __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1414                         NULL) == 0) {
1415                 spin_lock_irq(&info->lock);
1416                 shmem_recalc_inode(inode);
1417                 info->swapped++;
1418                 spin_unlock_irq(&info->lock);
1419
1420                 swap_shmem_alloc(swap);
1421                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1422
1423                 mutex_unlock(&shmem_swaplist_mutex);
1424                 BUG_ON(page_mapped(page));
1425                 swap_writepage(page, wbc);
1426                 return 0;
1427         }
1428
1429         mutex_unlock(&shmem_swaplist_mutex);
1430         put_swap_page(page, swap);
1431 redirty:
1432         set_page_dirty(page);
1433         if (wbc->for_reclaim)
1434                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1435         unlock_page(page);
1436         return 0;
1437 }
1438
1439 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1440 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1441 {
1442         char buffer[64];
1443
1444         if (!mpol || mpol->mode == MPOL_DEFAULT)
1445                 return;         /* show nothing */
1446
1447         mpol_to_str(buffer, sizeof(buffer), mpol);
1448
1449         seq_printf(seq, ",mpol=%s", buffer);
1450 }
1451
1452 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1453 {
1454         struct mempolicy *mpol = NULL;
1455         if (sbinfo->mpol) {
1456                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1457                 mpol = sbinfo->mpol;
1458                 mpol_get(mpol);
1459                 spin_unlock(&sbinfo->stat_lock);
1460         }
1461         return mpol;
1462 }
1463 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1464 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1465 {
1466 }
1467 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1468 {
1469         return NULL;
1470 }
1471 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1472 #ifndef CONFIG_NUMA
1473 #define vm_policy vm_private_data
1474 #endif
1475
1476 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1477                 struct shmem_inode_info *info, pgoff_t index)
1478 {
1479         /* Create a pseudo vma that just contains the policy */
1480         vma_init(vma, NULL);
1481         /* Bias interleave by inode number to distribute better across nodes */
1482         vma->vm_pgoff = index + info->vfs_inode.i_ino;
1483         vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1484 }
1485
1486 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1487 {
1488         /* Drop reference taken by mpol_shared_policy_lookup() */
1489         mpol_cond_put(vma->vm_policy);
1490 }
1491
1492 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1493                         struct shmem_inode_info *info, pgoff_t index)
1494 {
1495         struct vm_area_struct pvma;
1496         struct page *page;
1497         struct vm_fault vmf = {
1498                 .vma = &pvma,
1499         };
1500
1501         shmem_pseudo_vma_init(&pvma, info, index);
1502         page = swap_cluster_readahead(swap, gfp, &vmf);
1503         shmem_pseudo_vma_destroy(&pvma);
1504
1505         return page;
1506 }
1507
1508 /*
1509  * Make sure huge_gfp is always more limited than limit_gfp.
1510  * Some of the flags set permissions, while others set limitations.
1511  */
1512 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1513 {
1514         gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1515         gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1516         gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1517         gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1518
1519         /* Allow allocations only from the originally specified zones. */
1520         result |= zoneflags;
1521
1522         /*
1523          * Minimize the result gfp by taking the union with the deny flags,
1524          * and the intersection of the allow flags.
1525          */
1526         result |= (limit_gfp & denyflags);
1527         result |= (huge_gfp & limit_gfp) & allowflags;
1528
1529         return result;
1530 }
1531
1532 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1533                 struct shmem_inode_info *info, pgoff_t index)
1534 {
1535         struct vm_area_struct pvma;
1536         struct address_space *mapping = info->vfs_inode.i_mapping;
1537         pgoff_t hindex;
1538         struct page *page;
1539
1540         hindex = round_down(index, HPAGE_PMD_NR);
1541         if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1542                                                                 XA_PRESENT))
1543                 return NULL;
1544
1545         shmem_pseudo_vma_init(&pvma, info, hindex);
1546         page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(),
1547                                true);
1548         shmem_pseudo_vma_destroy(&pvma);
1549         if (page)
1550                 prep_transhuge_page(page);
1551         else
1552                 count_vm_event(THP_FILE_FALLBACK);
1553         return page;
1554 }
1555
1556 static struct page *shmem_alloc_page(gfp_t gfp,
1557                         struct shmem_inode_info *info, pgoff_t index)
1558 {
1559         struct vm_area_struct pvma;
1560         struct page *page;
1561
1562         shmem_pseudo_vma_init(&pvma, info, index);
1563         page = alloc_page_vma(gfp, &pvma, 0);
1564         shmem_pseudo_vma_destroy(&pvma);
1565
1566         return page;
1567 }
1568
1569 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1570                 struct inode *inode,
1571                 pgoff_t index, bool huge)
1572 {
1573         struct shmem_inode_info *info = SHMEM_I(inode);
1574         struct page *page;
1575         int nr;
1576         int err = -ENOSPC;
1577
1578         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1579                 huge = false;
1580         nr = huge ? HPAGE_PMD_NR : 1;
1581
1582         if (!shmem_inode_acct_block(inode, nr))
1583                 goto failed;
1584
1585         if (huge)
1586                 page = shmem_alloc_hugepage(gfp, info, index);
1587         else
1588                 page = shmem_alloc_page(gfp, info, index);
1589         if (page) {
1590                 __SetPageLocked(page);
1591                 __SetPageSwapBacked(page);
1592                 return page;
1593         }
1594
1595         err = -ENOMEM;
1596         shmem_inode_unacct_blocks(inode, nr);
1597 failed:
1598         return ERR_PTR(err);
1599 }
1600
1601 /*
1602  * When a page is moved from swapcache to shmem filecache (either by the
1603  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1604  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1605  * ignorance of the mapping it belongs to.  If that mapping has special
1606  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1607  * we may need to copy to a suitable page before moving to filecache.
1608  *
1609  * In a future release, this may well be extended to respect cpuset and
1610  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1611  * but for now it is a simple matter of zone.
1612  */
1613 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1614 {
1615         return page_zonenum(page) > gfp_zone(gfp);
1616 }
1617
1618 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1619                                 struct shmem_inode_info *info, pgoff_t index)
1620 {
1621         struct page *oldpage, *newpage;
1622         struct address_space *swap_mapping;
1623         swp_entry_t entry;
1624         pgoff_t swap_index;
1625         int error;
1626
1627         oldpage = *pagep;
1628         entry.val = page_private(oldpage);
1629         swap_index = swp_offset(entry);
1630         swap_mapping = page_mapping(oldpage);
1631
1632         /*
1633          * We have arrived here because our zones are constrained, so don't
1634          * limit chance of success by further cpuset and node constraints.
1635          */
1636         gfp &= ~GFP_CONSTRAINT_MASK;
1637         newpage = shmem_alloc_page(gfp, info, index);
1638         if (!newpage)
1639                 return -ENOMEM;
1640
1641         get_page(newpage);
1642         copy_highpage(newpage, oldpage);
1643         flush_dcache_page(newpage);
1644
1645         __SetPageLocked(newpage);
1646         __SetPageSwapBacked(newpage);
1647         SetPageUptodate(newpage);
1648         set_page_private(newpage, entry.val);
1649         SetPageSwapCache(newpage);
1650
1651         /*
1652          * Our caller will very soon move newpage out of swapcache, but it's
1653          * a nice clean interface for us to replace oldpage by newpage there.
1654          */
1655         xa_lock_irq(&swap_mapping->i_pages);
1656         error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1657         if (!error) {
1658                 mem_cgroup_migrate(oldpage, newpage);
1659                 __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1660                 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1661         }
1662         xa_unlock_irq(&swap_mapping->i_pages);
1663
1664         if (unlikely(error)) {
1665                 /*
1666                  * Is this possible?  I think not, now that our callers check
1667                  * both PageSwapCache and page_private after getting page lock;
1668                  * but be defensive.  Reverse old to newpage for clear and free.
1669                  */
1670                 oldpage = newpage;
1671         } else {
1672                 lru_cache_add(newpage);
1673                 *pagep = newpage;
1674         }
1675
1676         ClearPageSwapCache(oldpage);
1677         set_page_private(oldpage, 0);
1678
1679         unlock_page(oldpage);
1680         put_page(oldpage);
1681         put_page(oldpage);
1682         return error;
1683 }
1684
1685 /*
1686  * Swap in the page pointed to by *pagep.
1687  * Caller has to make sure that *pagep contains a valid swapped page.
1688  * Returns 0 and the page in pagep if success. On failure, returns the
1689  * error code and NULL in *pagep.
1690  */
1691 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1692                              struct page **pagep, enum sgp_type sgp,
1693                              gfp_t gfp, struct vm_area_struct *vma,
1694                              vm_fault_t *fault_type)
1695 {
1696         struct address_space *mapping = inode->i_mapping;
1697         struct shmem_inode_info *info = SHMEM_I(inode);
1698         struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1699         struct page *page;
1700         swp_entry_t swap;
1701         int error;
1702
1703         VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1704         swap = radix_to_swp_entry(*pagep);
1705         *pagep = NULL;
1706
1707         /* Look it up and read it in.. */
1708         page = lookup_swap_cache(swap, NULL, 0);
1709         if (!page) {
1710                 /* Or update major stats only when swapin succeeds?? */
1711                 if (fault_type) {
1712                         *fault_type |= VM_FAULT_MAJOR;
1713                         count_vm_event(PGMAJFAULT);
1714                         count_memcg_event_mm(charge_mm, PGMAJFAULT);
1715                 }
1716                 /* Here we actually start the io */
1717                 page = shmem_swapin(swap, gfp, info, index);
1718                 if (!page) {
1719                         error = -ENOMEM;
1720                         goto failed;
1721                 }
1722         }
1723
1724         /* We have to do this with page locked to prevent races */
1725         lock_page(page);
1726         if (!PageSwapCache(page) || page_private(page) != swap.val ||
1727             !shmem_confirm_swap(mapping, index, swap)) {
1728                 error = -EEXIST;
1729                 goto unlock;
1730         }
1731         if (!PageUptodate(page)) {
1732                 error = -EIO;
1733                 goto failed;
1734         }
1735         wait_on_page_writeback(page);
1736
1737         /*
1738          * Some architectures may have to restore extra metadata to the
1739          * physical page after reading from swap.
1740          */
1741         arch_swap_restore(swap, page);
1742
1743         if (shmem_should_replace_page(page, gfp)) {
1744                 error = shmem_replace_page(&page, gfp, info, index);
1745                 if (error)
1746                         goto failed;
1747         }
1748
1749         error = shmem_add_to_page_cache(page, mapping, index,
1750                                         swp_to_radix_entry(swap), gfp,
1751                                         charge_mm);
1752         if (error)
1753                 goto failed;
1754
1755         spin_lock_irq(&info->lock);
1756         info->swapped--;
1757         shmem_recalc_inode(inode);
1758         spin_unlock_irq(&info->lock);
1759
1760         if (sgp == SGP_WRITE)
1761                 mark_page_accessed(page);
1762
1763         delete_from_swap_cache(page);
1764         set_page_dirty(page);
1765         swap_free(swap);
1766
1767         *pagep = page;
1768         return 0;
1769 failed:
1770         if (!shmem_confirm_swap(mapping, index, swap))
1771                 error = -EEXIST;
1772 unlock:
1773         if (page) {
1774                 unlock_page(page);
1775                 put_page(page);
1776         }
1777
1778         return error;
1779 }
1780
1781 /*
1782  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1783  *
1784  * If we allocate a new one we do not mark it dirty. That's up to the
1785  * vm. If we swap it in we mark it dirty since we also free the swap
1786  * entry since a page cannot live in both the swap and page cache.
1787  *
1788  * vma, vmf, and fault_type are only supplied by shmem_fault:
1789  * otherwise they are NULL.
1790  */
1791 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1792         struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1793         struct vm_area_struct *vma, struct vm_fault *vmf,
1794                         vm_fault_t *fault_type)
1795 {
1796         struct address_space *mapping = inode->i_mapping;
1797         struct shmem_inode_info *info = SHMEM_I(inode);
1798         struct shmem_sb_info *sbinfo;
1799         struct mm_struct *charge_mm;
1800         struct page *page;
1801         enum sgp_type sgp_huge = sgp;
1802         pgoff_t hindex = index;
1803         gfp_t huge_gfp;
1804         int error;
1805         int once = 0;
1806         int alloced = 0;
1807
1808         if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1809                 return -EFBIG;
1810         if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1811                 sgp = SGP_CACHE;
1812 repeat:
1813         if (sgp <= SGP_CACHE &&
1814             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1815                 return -EINVAL;
1816         }
1817
1818         sbinfo = SHMEM_SB(inode->i_sb);
1819         charge_mm = vma ? vma->vm_mm : NULL;
1820
1821         page = pagecache_get_page(mapping, index,
1822                                         FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
1823
1824         if (page && vma && userfaultfd_minor(vma)) {
1825                 if (!xa_is_value(page)) {
1826                         unlock_page(page);
1827                         put_page(page);
1828                 }
1829                 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1830                 return 0;
1831         }
1832
1833         if (xa_is_value(page)) {
1834                 error = shmem_swapin_page(inode, index, &page,
1835                                           sgp, gfp, vma, fault_type);
1836                 if (error == -EEXIST)
1837                         goto repeat;
1838
1839                 *pagep = page;
1840                 return error;
1841         }
1842
1843         if (page)
1844                 hindex = page->index;
1845         if (page && sgp == SGP_WRITE)
1846                 mark_page_accessed(page);
1847
1848         /* fallocated page? */
1849         if (page && !PageUptodate(page)) {
1850                 if (sgp != SGP_READ)
1851                         goto clear;
1852                 unlock_page(page);
1853                 put_page(page);
1854                 page = NULL;
1855                 hindex = index;
1856         }
1857         if (page || sgp == SGP_READ)
1858                 goto out;
1859
1860         /*
1861          * Fast cache lookup did not find it:
1862          * bring it back from swap or allocate.
1863          */
1864
1865         if (vma && userfaultfd_missing(vma)) {
1866                 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1867                 return 0;
1868         }
1869
1870         /* shmem_symlink() */
1871         if (!shmem_mapping(mapping))
1872                 goto alloc_nohuge;
1873         if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1874                 goto alloc_nohuge;
1875         if (shmem_huge == SHMEM_HUGE_FORCE)
1876                 goto alloc_huge;
1877         switch (sbinfo->huge) {
1878         case SHMEM_HUGE_NEVER:
1879                 goto alloc_nohuge;
1880         case SHMEM_HUGE_WITHIN_SIZE: {
1881                 loff_t i_size;
1882                 pgoff_t off;
1883
1884                 off = round_up(index, HPAGE_PMD_NR);
1885                 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1886                 if (i_size >= HPAGE_PMD_SIZE &&
1887                     i_size >> PAGE_SHIFT >= off)
1888                         goto alloc_huge;
1889
1890                 fallthrough;
1891         }
1892         case SHMEM_HUGE_ADVISE:
1893                 if (sgp_huge == SGP_HUGE)
1894                         goto alloc_huge;
1895                 /* TODO: implement fadvise() hints */
1896                 goto alloc_nohuge;
1897         }
1898
1899 alloc_huge:
1900         huge_gfp = vma_thp_gfp_mask(vma);
1901         huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1902         page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
1903         if (IS_ERR(page)) {
1904 alloc_nohuge:
1905                 page = shmem_alloc_and_acct_page(gfp, inode,
1906                                                  index, false);
1907         }
1908         if (IS_ERR(page)) {
1909                 int retry = 5;
1910
1911                 error = PTR_ERR(page);
1912                 page = NULL;
1913                 if (error != -ENOSPC)
1914                         goto unlock;
1915                 /*
1916                  * Try to reclaim some space by splitting a huge page
1917                  * beyond i_size on the filesystem.
1918                  */
1919                 while (retry--) {
1920                         int ret;
1921
1922                         ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1923                         if (ret == SHRINK_STOP)
1924                                 break;
1925                         if (ret)
1926                                 goto alloc_nohuge;
1927                 }
1928                 goto unlock;
1929         }
1930
1931         if (PageTransHuge(page))
1932                 hindex = round_down(index, HPAGE_PMD_NR);
1933         else
1934                 hindex = index;
1935
1936         if (sgp == SGP_WRITE)
1937                 __SetPageReferenced(page);
1938
1939         error = shmem_add_to_page_cache(page, mapping, hindex,
1940                                         NULL, gfp & GFP_RECLAIM_MASK,
1941                                         charge_mm);
1942         if (error)
1943                 goto unacct;
1944         lru_cache_add(page);
1945
1946         spin_lock_irq(&info->lock);
1947         info->alloced += compound_nr(page);
1948         inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1949         shmem_recalc_inode(inode);
1950         spin_unlock_irq(&info->lock);
1951         alloced = true;
1952
1953         if (PageTransHuge(page) &&
1954             DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1955                         hindex + HPAGE_PMD_NR - 1) {
1956                 /*
1957                  * Part of the huge page is beyond i_size: subject
1958                  * to shrink under memory pressure.
1959                  */
1960                 spin_lock(&sbinfo->shrinklist_lock);
1961                 /*
1962                  * _careful to defend against unlocked access to
1963                  * ->shrink_list in shmem_unused_huge_shrink()
1964                  */
1965                 if (list_empty_careful(&info->shrinklist)) {
1966                         list_add_tail(&info->shrinklist,
1967                                       &sbinfo->shrinklist);
1968                         sbinfo->shrinklist_len++;
1969                 }
1970                 spin_unlock(&sbinfo->shrinklist_lock);
1971         }
1972
1973         /*
1974          * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1975          */
1976         if (sgp == SGP_FALLOC)
1977                 sgp = SGP_WRITE;
1978 clear:
1979         /*
1980          * Let SGP_WRITE caller clear ends if write does not fill page;
1981          * but SGP_FALLOC on a page fallocated earlier must initialize
1982          * it now, lest undo on failure cancel our earlier guarantee.
1983          */
1984         if (sgp != SGP_WRITE && !PageUptodate(page)) {
1985                 int i;
1986
1987                 for (i = 0; i < compound_nr(page); i++) {
1988                         clear_highpage(page + i);
1989                         flush_dcache_page(page + i);
1990                 }
1991                 SetPageUptodate(page);
1992         }
1993
1994         /* Perhaps the file has been truncated since we checked */
1995         if (sgp <= SGP_CACHE &&
1996             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1997                 if (alloced) {
1998                         ClearPageDirty(page);
1999                         delete_from_page_cache(page);
2000                         spin_lock_irq(&info->lock);
2001                         shmem_recalc_inode(inode);
2002                         spin_unlock_irq(&info->lock);
2003                 }
2004                 error = -EINVAL;
2005                 goto unlock;
2006         }
2007 out:
2008         *pagep = page + index - hindex;
2009         return 0;
2010
2011         /*
2012          * Error recovery.
2013          */
2014 unacct:
2015         shmem_inode_unacct_blocks(inode, compound_nr(page));
2016
2017         if (PageTransHuge(page)) {
2018                 unlock_page(page);
2019                 put_page(page);
2020                 goto alloc_nohuge;
2021         }
2022 unlock:
2023         if (page) {
2024                 unlock_page(page);
2025                 put_page(page);
2026         }
2027         if (error == -ENOSPC && !once++) {
2028                 spin_lock_irq(&info->lock);
2029                 shmem_recalc_inode(inode);
2030                 spin_unlock_irq(&info->lock);
2031                 goto repeat;
2032         }
2033         if (error == -EEXIST)
2034                 goto repeat;
2035         return error;
2036 }
2037
2038 /*
2039  * This is like autoremove_wake_function, but it removes the wait queue
2040  * entry unconditionally - even if something else had already woken the
2041  * target.
2042  */
2043 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2044 {
2045         int ret = default_wake_function(wait, mode, sync, key);
2046         list_del_init(&wait->entry);
2047         return ret;
2048 }
2049
2050 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2051 {
2052         struct vm_area_struct *vma = vmf->vma;
2053         struct inode *inode = file_inode(vma->vm_file);
2054         gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2055         enum sgp_type sgp;
2056         int err;
2057         vm_fault_t ret = VM_FAULT_LOCKED;
2058
2059         /*
2060          * Trinity finds that probing a hole which tmpfs is punching can
2061          * prevent the hole-punch from ever completing: which in turn
2062          * locks writers out with its hold on i_rwsem.  So refrain from
2063          * faulting pages into the hole while it's being punched.  Although
2064          * shmem_undo_range() does remove the additions, it may be unable to
2065          * keep up, as each new page needs its own unmap_mapping_range() call,
2066          * and the i_mmap tree grows ever slower to scan if new vmas are added.
2067          *
2068          * It does not matter if we sometimes reach this check just before the
2069          * hole-punch begins, so that one fault then races with the punch:
2070          * we just need to make racing faults a rare case.
2071          *
2072          * The implementation below would be much simpler if we just used a
2073          * standard mutex or completion: but we cannot take i_rwsem in fault,
2074          * and bloating every shmem inode for this unlikely case would be sad.
2075          */
2076         if (unlikely(inode->i_private)) {
2077                 struct shmem_falloc *shmem_falloc;
2078
2079                 spin_lock(&inode->i_lock);
2080                 shmem_falloc = inode->i_private;
2081                 if (shmem_falloc &&
2082                     shmem_falloc->waitq &&
2083                     vmf->pgoff >= shmem_falloc->start &&
2084                     vmf->pgoff < shmem_falloc->next) {
2085                         struct file *fpin;
2086                         wait_queue_head_t *shmem_falloc_waitq;
2087                         DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2088
2089                         ret = VM_FAULT_NOPAGE;
2090                         fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2091                         if (fpin)
2092                                 ret = VM_FAULT_RETRY;
2093
2094                         shmem_falloc_waitq = shmem_falloc->waitq;
2095                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2096                                         TASK_UNINTERRUPTIBLE);
2097                         spin_unlock(&inode->i_lock);
2098                         schedule();
2099
2100                         /*
2101                          * shmem_falloc_waitq points into the shmem_fallocate()
2102                          * stack of the hole-punching task: shmem_falloc_waitq
2103                          * is usually invalid by the time we reach here, but
2104                          * finish_wait() does not dereference it in that case;
2105                          * though i_lock needed lest racing with wake_up_all().
2106                          */
2107                         spin_lock(&inode->i_lock);
2108                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2109                         spin_unlock(&inode->i_lock);
2110
2111                         if (fpin)
2112                                 fput(fpin);
2113                         return ret;
2114                 }
2115                 spin_unlock(&inode->i_lock);
2116         }
2117
2118         sgp = SGP_CACHE;
2119
2120         if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2121             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2122                 sgp = SGP_NOHUGE;
2123         else if (vma->vm_flags & VM_HUGEPAGE)
2124                 sgp = SGP_HUGE;
2125
2126         err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2127                                   gfp, vma, vmf, &ret);
2128         if (err)
2129                 return vmf_error(err);
2130         return ret;
2131 }
2132
2133 unsigned long shmem_get_unmapped_area(struct file *file,
2134                                       unsigned long uaddr, unsigned long len,
2135                                       unsigned long pgoff, unsigned long flags)
2136 {
2137         unsigned long (*get_area)(struct file *,
2138                 unsigned long, unsigned long, unsigned long, unsigned long);
2139         unsigned long addr;
2140         unsigned long offset;
2141         unsigned long inflated_len;
2142         unsigned long inflated_addr;
2143         unsigned long inflated_offset;
2144
2145         if (len > TASK_SIZE)
2146                 return -ENOMEM;
2147
2148         get_area = current->mm->get_unmapped_area;
2149         addr = get_area(file, uaddr, len, pgoff, flags);
2150
2151         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2152                 return addr;
2153         if (IS_ERR_VALUE(addr))
2154                 return addr;
2155         if (addr & ~PAGE_MASK)
2156                 return addr;
2157         if (addr > TASK_SIZE - len)
2158                 return addr;
2159
2160         if (shmem_huge == SHMEM_HUGE_DENY)
2161                 return addr;
2162         if (len < HPAGE_PMD_SIZE)
2163                 return addr;
2164         if (flags & MAP_FIXED)
2165                 return addr;
2166         /*
2167          * Our priority is to support MAP_SHARED mapped hugely;
2168          * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2169          * But if caller specified an address hint and we allocated area there
2170          * successfully, respect that as before.
2171          */
2172         if (uaddr == addr)
2173                 return addr;
2174
2175         if (shmem_huge != SHMEM_HUGE_FORCE) {
2176                 struct super_block *sb;
2177
2178                 if (file) {
2179                         VM_BUG_ON(file->f_op != &shmem_file_operations);
2180                         sb = file_inode(file)->i_sb;
2181                 } else {
2182                         /*
2183                          * Called directly from mm/mmap.c, or drivers/char/mem.c
2184                          * for "/dev/zero", to create a shared anonymous object.
2185                          */
2186                         if (IS_ERR(shm_mnt))
2187                                 return addr;
2188                         sb = shm_mnt->mnt_sb;
2189                 }
2190                 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2191                         return addr;
2192         }
2193
2194         offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2195         if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2196                 return addr;
2197         if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2198                 return addr;
2199
2200         inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2201         if (inflated_len > TASK_SIZE)
2202                 return addr;
2203         if (inflated_len < len)
2204                 return addr;
2205
2206         inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2207         if (IS_ERR_VALUE(inflated_addr))
2208                 return addr;
2209         if (inflated_addr & ~PAGE_MASK)
2210                 return addr;
2211
2212         inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2213         inflated_addr += offset - inflated_offset;
2214         if (inflated_offset > offset)
2215                 inflated_addr += HPAGE_PMD_SIZE;
2216
2217         if (inflated_addr > TASK_SIZE - len)
2218                 return addr;
2219         return inflated_addr;
2220 }
2221
2222 #ifdef CONFIG_NUMA
2223 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2224 {
2225         struct inode *inode = file_inode(vma->vm_file);
2226         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2227 }
2228
2229 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2230                                           unsigned long addr)
2231 {
2232         struct inode *inode = file_inode(vma->vm_file);
2233         pgoff_t index;
2234
2235         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2236         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2237 }
2238 #endif
2239
2240 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2241 {
2242         struct inode *inode = file_inode(file);
2243         struct shmem_inode_info *info = SHMEM_I(inode);
2244         int retval = -ENOMEM;
2245
2246         /*
2247          * What serializes the accesses to info->flags?
2248          * ipc_lock_object() when called from shmctl_do_lock(),
2249          * no serialization needed when called from shm_destroy().
2250          */
2251         if (lock && !(info->flags & VM_LOCKED)) {
2252                 if (!user_shm_lock(inode->i_size, ucounts))
2253                         goto out_nomem;
2254                 info->flags |= VM_LOCKED;
2255                 mapping_set_unevictable(file->f_mapping);
2256         }
2257         if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2258                 user_shm_unlock(inode->i_size, ucounts);
2259                 info->flags &= ~VM_LOCKED;
2260                 mapping_clear_unevictable(file->f_mapping);
2261         }
2262         retval = 0;
2263
2264 out_nomem:
2265         return retval;
2266 }
2267
2268 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2269 {
2270         struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2271         int ret;
2272
2273         ret = seal_check_future_write(info->seals, vma);
2274         if (ret)
2275                 return ret;
2276
2277         /* arm64 - allow memory tagging on RAM-based files */
2278         vma->vm_flags |= VM_MTE_ALLOWED;
2279
2280         file_accessed(file);
2281         vma->vm_ops = &shmem_vm_ops;
2282         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2283                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2284                         (vma->vm_end & HPAGE_PMD_MASK)) {
2285                 khugepaged_enter(vma, vma->vm_flags);
2286         }
2287         return 0;
2288 }
2289
2290 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2291                                      umode_t mode, dev_t dev, unsigned long flags)
2292 {
2293         struct inode *inode;
2294         struct shmem_inode_info *info;
2295         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2296         ino_t ino;
2297
2298         if (shmem_reserve_inode(sb, &ino))
2299                 return NULL;
2300
2301         inode = new_inode(sb);
2302         if (inode) {
2303                 inode->i_ino = ino;
2304                 inode_init_owner(&init_user_ns, inode, dir, mode);
2305                 inode->i_blocks = 0;
2306                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2307                 inode->i_generation = prandom_u32();
2308                 info = SHMEM_I(inode);
2309                 memset(info, 0, (char *)inode - (char *)info);
2310                 spin_lock_init(&info->lock);
2311                 atomic_set(&info->stop_eviction, 0);
2312                 info->seals = F_SEAL_SEAL;
2313                 info->flags = flags & VM_NORESERVE;
2314                 INIT_LIST_HEAD(&info->shrinklist);
2315                 INIT_LIST_HEAD(&info->swaplist);
2316                 simple_xattrs_init(&info->xattrs);
2317                 cache_no_acl(inode);
2318
2319                 switch (mode & S_IFMT) {
2320                 default:
2321                         inode->i_op = &shmem_special_inode_operations;
2322                         init_special_inode(inode, mode, dev);
2323                         break;
2324                 case S_IFREG:
2325                         inode->i_mapping->a_ops = &shmem_aops;
2326                         inode->i_op = &shmem_inode_operations;
2327                         inode->i_fop = &shmem_file_operations;
2328                         mpol_shared_policy_init(&info->policy,
2329                                                  shmem_get_sbmpol(sbinfo));
2330                         break;
2331                 case S_IFDIR:
2332                         inc_nlink(inode);
2333                         /* Some things misbehave if size == 0 on a directory */
2334                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
2335                         inode->i_op = &shmem_dir_inode_operations;
2336                         inode->i_fop = &simple_dir_operations;
2337                         break;
2338                 case S_IFLNK:
2339                         /*
2340                          * Must not load anything in the rbtree,
2341                          * mpol_free_shared_policy will not be called.
2342                          */
2343                         mpol_shared_policy_init(&info->policy, NULL);
2344                         break;
2345                 }
2346
2347                 lockdep_annotate_inode_mutex_key(inode);
2348         } else
2349                 shmem_free_inode(sb);
2350         return inode;
2351 }
2352
2353 #ifdef CONFIG_USERFAULTFD
2354 int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2355                            pmd_t *dst_pmd,
2356                            struct vm_area_struct *dst_vma,
2357                            unsigned long dst_addr,
2358                            unsigned long src_addr,
2359                            bool zeropage,
2360                            struct page **pagep)
2361 {
2362         struct inode *inode = file_inode(dst_vma->vm_file);
2363         struct shmem_inode_info *info = SHMEM_I(inode);
2364         struct address_space *mapping = inode->i_mapping;
2365         gfp_t gfp = mapping_gfp_mask(mapping);
2366         pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2367         void *page_kaddr;
2368         struct page *page;
2369         int ret;
2370         pgoff_t max_off;
2371
2372         if (!shmem_inode_acct_block(inode, 1)) {
2373                 /*
2374                  * We may have got a page, returned -ENOENT triggering a retry,
2375                  * and now we find ourselves with -ENOMEM. Release the page, to
2376                  * avoid a BUG_ON in our caller.
2377                  */
2378                 if (unlikely(*pagep)) {
2379                         put_page(*pagep);
2380                         *pagep = NULL;
2381                 }
2382                 return -ENOMEM;
2383         }
2384
2385         if (!*pagep) {
2386                 ret = -ENOMEM;
2387                 page = shmem_alloc_page(gfp, info, pgoff);
2388                 if (!page)
2389                         goto out_unacct_blocks;
2390
2391                 if (!zeropage) {        /* COPY */
2392                         page_kaddr = kmap_atomic(page);
2393                         ret = copy_from_user(page_kaddr,
2394                                              (const void __user *)src_addr,
2395                                              PAGE_SIZE);
2396                         kunmap_atomic(page_kaddr);
2397
2398                         /* fallback to copy_from_user outside mmap_lock */
2399                         if (unlikely(ret)) {
2400                                 *pagep = page;
2401                                 ret = -ENOENT;
2402                                 /* don't free the page */
2403                                 goto out_unacct_blocks;
2404                         }
2405                 } else {                /* ZEROPAGE */
2406                         clear_highpage(page);
2407                 }
2408         } else {
2409                 page = *pagep;
2410                 *pagep = NULL;
2411         }
2412
2413         VM_BUG_ON(PageLocked(page));
2414         VM_BUG_ON(PageSwapBacked(page));
2415         __SetPageLocked(page);
2416         __SetPageSwapBacked(page);
2417         __SetPageUptodate(page);
2418
2419         ret = -EFAULT;
2420         max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2421         if (unlikely(pgoff >= max_off))
2422                 goto out_release;
2423
2424         ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2425                                       gfp & GFP_RECLAIM_MASK, dst_mm);
2426         if (ret)
2427                 goto out_release;
2428
2429         ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
2430                                        page, true, false);
2431         if (ret)
2432                 goto out_delete_from_cache;
2433
2434         spin_lock_irq(&info->lock);
2435         info->alloced++;
2436         inode->i_blocks += BLOCKS_PER_PAGE;
2437         shmem_recalc_inode(inode);
2438         spin_unlock_irq(&info->lock);
2439
2440         SetPageDirty(page);
2441         unlock_page(page);
2442         return 0;
2443 out_delete_from_cache:
2444         delete_from_page_cache(page);
2445 out_release:
2446         unlock_page(page);
2447         put_page(page);
2448 out_unacct_blocks:
2449         shmem_inode_unacct_blocks(inode, 1);
2450         return ret;
2451 }
2452 #endif /* CONFIG_USERFAULTFD */
2453
2454 #ifdef CONFIG_TMPFS
2455 static const struct inode_operations shmem_symlink_inode_operations;
2456 static const struct inode_operations shmem_short_symlink_operations;
2457
2458 #ifdef CONFIG_TMPFS_XATTR
2459 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2460 #else
2461 #define shmem_initxattrs NULL
2462 #endif
2463
2464 static int
2465 shmem_write_begin(struct file *file, struct address_space *mapping,
2466                         loff_t pos, unsigned len, unsigned flags,
2467                         struct page **pagep, void **fsdata)
2468 {
2469         struct inode *inode = mapping->host;
2470         struct shmem_inode_info *info = SHMEM_I(inode);
2471         pgoff_t index = pos >> PAGE_SHIFT;
2472
2473         /* i_rwsem is held by caller */
2474         if (unlikely(info->seals & (F_SEAL_GROW |
2475                                    F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2476                 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2477                         return -EPERM;
2478                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2479                         return -EPERM;
2480         }
2481
2482         return shmem_getpage(inode, index, pagep, SGP_WRITE);
2483 }
2484
2485 static int
2486 shmem_write_end(struct file *file, struct address_space *mapping,
2487                         loff_t pos, unsigned len, unsigned copied,
2488                         struct page *page, void *fsdata)
2489 {
2490         struct inode *inode = mapping->host;
2491
2492         if (pos + copied > inode->i_size)
2493                 i_size_write(inode, pos + copied);
2494
2495         if (!PageUptodate(page)) {
2496                 struct page *head = compound_head(page);
2497                 if (PageTransCompound(page)) {
2498                         int i;
2499
2500                         for (i = 0; i < HPAGE_PMD_NR; i++) {
2501                                 if (head + i == page)
2502                                         continue;
2503                                 clear_highpage(head + i);
2504                                 flush_dcache_page(head + i);
2505                         }
2506                 }
2507                 if (copied < PAGE_SIZE) {
2508                         unsigned from = pos & (PAGE_SIZE - 1);
2509                         zero_user_segments(page, 0, from,
2510                                         from + copied, PAGE_SIZE);
2511                 }
2512                 SetPageUptodate(head);
2513         }
2514         set_page_dirty(page);
2515         unlock_page(page);
2516         put_page(page);
2517
2518         return copied;
2519 }
2520
2521 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2522 {
2523         struct file *file = iocb->ki_filp;
2524         struct inode *inode = file_inode(file);
2525         struct address_space *mapping = inode->i_mapping;
2526         pgoff_t index;
2527         unsigned long offset;
2528         enum sgp_type sgp = SGP_READ;
2529         int error = 0;
2530         ssize_t retval = 0;
2531         loff_t *ppos = &iocb->ki_pos;
2532
2533         /*
2534          * Might this read be for a stacking filesystem?  Then when reading
2535          * holes of a sparse file, we actually need to allocate those pages,
2536          * and even mark them dirty, so it cannot exceed the max_blocks limit.
2537          */
2538         if (!iter_is_iovec(to))
2539                 sgp = SGP_CACHE;
2540
2541         index = *ppos >> PAGE_SHIFT;
2542         offset = *ppos & ~PAGE_MASK;
2543
2544         for (;;) {
2545                 struct page *page = NULL;
2546                 pgoff_t end_index;
2547                 unsigned long nr, ret;
2548                 loff_t i_size = i_size_read(inode);
2549
2550                 end_index = i_size >> PAGE_SHIFT;
2551                 if (index > end_index)
2552                         break;
2553                 if (index == end_index) {
2554                         nr = i_size & ~PAGE_MASK;
2555                         if (nr <= offset)
2556                                 break;
2557                 }
2558
2559                 error = shmem_getpage(inode, index, &page, sgp);
2560                 if (error) {
2561                         if (error == -EINVAL)
2562                                 error = 0;
2563                         break;
2564                 }
2565                 if (page) {
2566                         if (sgp == SGP_CACHE)
2567                                 set_page_dirty(page);
2568                         unlock_page(page);
2569                 }
2570
2571                 /*
2572                  * We must evaluate after, since reads (unlike writes)
2573                  * are called without i_rwsem protection against truncate
2574                  */
2575                 nr = PAGE_SIZE;
2576                 i_size = i_size_read(inode);
2577                 end_index = i_size >> PAGE_SHIFT;
2578                 if (index == end_index) {
2579                         nr = i_size & ~PAGE_MASK;
2580                         if (nr <= offset) {
2581                                 if (page)
2582                                         put_page(page);
2583                                 break;
2584                         }
2585                 }
2586                 nr -= offset;
2587
2588                 if (page) {
2589                         /*
2590                          * If users can be writing to this page using arbitrary
2591                          * virtual addresses, take care about potential aliasing
2592                          * before reading the page on the kernel side.
2593                          */
2594                         if (mapping_writably_mapped(mapping))
2595                                 flush_dcache_page(page);
2596                         /*
2597                          * Mark the page accessed if we read the beginning.
2598                          */
2599                         if (!offset)
2600                                 mark_page_accessed(page);
2601                 } else {
2602                         page = ZERO_PAGE(0);
2603                         get_page(page);
2604                 }
2605
2606                 /*
2607                  * Ok, we have the page, and it's up-to-date, so
2608                  * now we can copy it to user space...
2609                  */
2610                 ret = copy_page_to_iter(page, offset, nr, to);
2611                 retval += ret;
2612                 offset += ret;
2613                 index += offset >> PAGE_SHIFT;
2614                 offset &= ~PAGE_MASK;
2615
2616                 put_page(page);
2617                 if (!iov_iter_count(to))
2618                         break;
2619                 if (ret < nr) {
2620                         error = -EFAULT;
2621                         break;
2622                 }
2623                 cond_resched();
2624         }
2625
2626         *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2627         file_accessed(file);
2628         return retval ? retval : error;
2629 }
2630
2631 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2632 {
2633         struct address_space *mapping = file->f_mapping;
2634         struct inode *inode = mapping->host;
2635
2636         if (whence != SEEK_DATA && whence != SEEK_HOLE)
2637                 return generic_file_llseek_size(file, offset, whence,
2638                                         MAX_LFS_FILESIZE, i_size_read(inode));
2639         if (offset < 0)
2640                 return -ENXIO;
2641
2642         inode_lock(inode);
2643         /* We're holding i_rwsem so we can access i_size directly */
2644         offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2645         if (offset >= 0)
2646                 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2647         inode_unlock(inode);
2648         return offset;
2649 }
2650
2651 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2652                                                          loff_t len)
2653 {
2654         struct inode *inode = file_inode(file);
2655         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2656         struct shmem_inode_info *info = SHMEM_I(inode);
2657         struct shmem_falloc shmem_falloc;
2658         pgoff_t start, index, end;
2659         int error;
2660
2661         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2662                 return -EOPNOTSUPP;
2663
2664         inode_lock(inode);
2665
2666         if (mode & FALLOC_FL_PUNCH_HOLE) {
2667                 struct address_space *mapping = file->f_mapping;
2668                 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2669                 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2670                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2671
2672                 /* protected by i_rwsem */
2673                 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2674                         error = -EPERM;
2675                         goto out;
2676                 }
2677
2678                 shmem_falloc.waitq = &shmem_falloc_waitq;
2679                 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2680                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2681                 spin_lock(&inode->i_lock);
2682                 inode->i_private = &shmem_falloc;
2683                 spin_unlock(&inode->i_lock);
2684
2685                 if ((u64)unmap_end > (u64)unmap_start)
2686                         unmap_mapping_range(mapping, unmap_start,
2687                                             1 + unmap_end - unmap_start, 0);
2688                 shmem_truncate_range(inode, offset, offset + len - 1);
2689                 /* No need to unmap again: hole-punching leaves COWed pages */
2690
2691                 spin_lock(&inode->i_lock);
2692                 inode->i_private = NULL;
2693                 wake_up_all(&shmem_falloc_waitq);
2694                 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2695                 spin_unlock(&inode->i_lock);
2696                 error = 0;
2697                 goto out;
2698         }
2699
2700         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2701         error = inode_newsize_ok(inode, offset + len);
2702         if (error)
2703                 goto out;
2704
2705         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2706                 error = -EPERM;
2707                 goto out;
2708         }
2709
2710         start = offset >> PAGE_SHIFT;
2711         end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2712         /* Try to avoid a swapstorm if len is impossible to satisfy */
2713         if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2714                 error = -ENOSPC;
2715                 goto out;
2716         }
2717
2718         shmem_falloc.waitq = NULL;
2719         shmem_falloc.start = start;
2720         shmem_falloc.next  = start;
2721         shmem_falloc.nr_falloced = 0;
2722         shmem_falloc.nr_unswapped = 0;
2723         spin_lock(&inode->i_lock);
2724         inode->i_private = &shmem_falloc;
2725         spin_unlock(&inode->i_lock);
2726
2727         for (index = start; index < end; index++) {
2728                 struct page *page;
2729
2730                 /*
2731                  * Good, the fallocate(2) manpage permits EINTR: we may have
2732                  * been interrupted because we are using up too much memory.
2733                  */
2734                 if (signal_pending(current))
2735                         error = -EINTR;
2736                 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2737                         error = -ENOMEM;
2738                 else
2739                         error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2740                 if (error) {
2741                         /* Remove the !PageUptodate pages we added */
2742                         if (index > start) {
2743                                 shmem_undo_range(inode,
2744                                     (loff_t)start << PAGE_SHIFT,
2745                                     ((loff_t)index << PAGE_SHIFT) - 1, true);
2746                         }
2747                         goto undone;
2748                 }
2749
2750                 /*
2751                  * Inform shmem_writepage() how far we have reached.
2752                  * No need for lock or barrier: we have the page lock.
2753                  */
2754                 shmem_falloc.next++;
2755                 if (!PageUptodate(page))
2756                         shmem_falloc.nr_falloced++;
2757
2758                 /*
2759                  * If !PageUptodate, leave it that way so that freeable pages
2760                  * can be recognized if we need to rollback on error later.
2761                  * But set_page_dirty so that memory pressure will swap rather
2762                  * than free the pages we are allocating (and SGP_CACHE pages
2763                  * might still be clean: we now need to mark those dirty too).
2764                  */
2765                 set_page_dirty(page);
2766                 unlock_page(page);
2767                 put_page(page);
2768                 cond_resched();
2769         }
2770
2771         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2772                 i_size_write(inode, offset + len);
2773         inode->i_ctime = current_time(inode);
2774 undone:
2775         spin_lock(&inode->i_lock);
2776         inode->i_private = NULL;
2777         spin_unlock(&inode->i_lock);
2778 out:
2779         inode_unlock(inode);
2780         return error;
2781 }
2782
2783 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2784 {
2785         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2786
2787         buf->f_type = TMPFS_MAGIC;
2788         buf->f_bsize = PAGE_SIZE;
2789         buf->f_namelen = NAME_MAX;
2790         if (sbinfo->max_blocks) {
2791                 buf->f_blocks = sbinfo->max_blocks;
2792                 buf->f_bavail =
2793                 buf->f_bfree  = sbinfo->max_blocks -
2794                                 percpu_counter_sum(&sbinfo->used_blocks);
2795         }
2796         if (sbinfo->max_inodes) {
2797                 buf->f_files = sbinfo->max_inodes;
2798                 buf->f_ffree = sbinfo->free_inodes;
2799         }
2800         /* else leave those fields 0 like simple_statfs */
2801
2802         buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
2803
2804         return 0;
2805 }
2806
2807 /*
2808  * File creation. Allocate an inode, and we're done..
2809  */
2810 static int
2811 shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2812             struct dentry *dentry, umode_t mode, dev_t dev)
2813 {
2814         struct inode *inode;
2815         int error = -ENOSPC;
2816
2817         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2818         if (inode) {
2819                 error = simple_acl_create(dir, inode);
2820                 if (error)
2821                         goto out_iput;
2822                 error = security_inode_init_security(inode, dir,
2823                                                      &dentry->d_name,
2824                                                      shmem_initxattrs, NULL);
2825                 if (error && error != -EOPNOTSUPP)
2826                         goto out_iput;
2827
2828                 error = 0;
2829                 dir->i_size += BOGO_DIRENT_SIZE;
2830                 dir->i_ctime = dir->i_mtime = current_time(dir);
2831                 d_instantiate(dentry, inode);
2832                 dget(dentry); /* Extra count - pin the dentry in core */
2833         }
2834         return error;
2835 out_iput:
2836         iput(inode);
2837         return error;
2838 }
2839
2840 static int
2841 shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2842               struct dentry *dentry, umode_t mode)
2843 {
2844         struct inode *inode;
2845         int error = -ENOSPC;
2846
2847         inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2848         if (inode) {
2849                 error = security_inode_init_security(inode, dir,
2850                                                      NULL,
2851                                                      shmem_initxattrs, NULL);
2852                 if (error && error != -EOPNOTSUPP)
2853                         goto out_iput;
2854                 error = simple_acl_create(dir, inode);
2855                 if (error)
2856                         goto out_iput;
2857                 d_tmpfile(dentry, inode);
2858         }
2859         return error;
2860 out_iput:
2861         iput(inode);
2862         return error;
2863 }
2864
2865 static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2866                        struct dentry *dentry, umode_t mode)
2867 {
2868         int error;
2869
2870         if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2871                                  mode | S_IFDIR, 0)))
2872                 return error;
2873         inc_nlink(dir);
2874         return 0;
2875 }
2876
2877 static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2878                         struct dentry *dentry, umode_t mode, bool excl)
2879 {
2880         return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
2881 }
2882
2883 /*
2884  * Link a file..
2885  */
2886 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2887 {
2888         struct inode *inode = d_inode(old_dentry);
2889         int ret = 0;
2890
2891         /*
2892          * No ordinary (disk based) filesystem counts links as inodes;
2893          * but each new link needs a new dentry, pinning lowmem, and
2894          * tmpfs dentries cannot be pruned until they are unlinked.
2895          * But if an O_TMPFILE file is linked into the tmpfs, the
2896          * first link must skip that, to get the accounting right.
2897          */
2898         if (inode->i_nlink) {
2899                 ret = shmem_reserve_inode(inode->i_sb, NULL);
2900                 if (ret)
2901                         goto out;
2902         }
2903
2904         dir->i_size += BOGO_DIRENT_SIZE;
2905         inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2906         inc_nlink(inode);
2907         ihold(inode);   /* New dentry reference */
2908         dget(dentry);           /* Extra pinning count for the created dentry */
2909         d_instantiate(dentry, inode);
2910 out:
2911         return ret;
2912 }
2913
2914 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2915 {
2916         struct inode *inode = d_inode(dentry);
2917
2918         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2919                 shmem_free_inode(inode->i_sb);
2920
2921         dir->i_size -= BOGO_DIRENT_SIZE;
2922         inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2923         drop_nlink(inode);
2924         dput(dentry);   /* Undo the count from "create" - this does all the work */
2925         return 0;
2926 }
2927
2928 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2929 {
2930         if (!simple_empty(dentry))
2931                 return -ENOTEMPTY;
2932
2933         drop_nlink(d_inode(dentry));
2934         drop_nlink(dir);
2935         return shmem_unlink(dir, dentry);
2936 }
2937
2938 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2939 {
2940         bool old_is_dir = d_is_dir(old_dentry);
2941         bool new_is_dir = d_is_dir(new_dentry);
2942
2943         if (old_dir != new_dir && old_is_dir != new_is_dir) {
2944                 if (old_is_dir) {
2945                         drop_nlink(old_dir);
2946                         inc_nlink(new_dir);
2947                 } else {
2948                         drop_nlink(new_dir);
2949                         inc_nlink(old_dir);
2950                 }
2951         }
2952         old_dir->i_ctime = old_dir->i_mtime =
2953         new_dir->i_ctime = new_dir->i_mtime =
2954         d_inode(old_dentry)->i_ctime =
2955         d_inode(new_dentry)->i_ctime = current_time(old_dir);
2956
2957         return 0;
2958 }
2959
2960 static int shmem_whiteout(struct user_namespace *mnt_userns,
2961                           struct inode *old_dir, struct dentry *old_dentry)
2962 {
2963         struct dentry *whiteout;
2964         int error;
2965
2966         whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2967         if (!whiteout)
2968                 return -ENOMEM;
2969
2970         error = shmem_mknod(&init_user_ns, old_dir, whiteout,
2971                             S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2972         dput(whiteout);
2973         if (error)
2974                 return error;
2975
2976         /*
2977          * Cheat and hash the whiteout while the old dentry is still in
2978          * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2979          *
2980          * d_lookup() will consistently find one of them at this point,
2981          * not sure which one, but that isn't even important.
2982          */
2983         d_rehash(whiteout);
2984         return 0;
2985 }
2986
2987 /*
2988  * The VFS layer already does all the dentry stuff for rename,
2989  * we just have to decrement the usage count for the target if
2990  * it exists so that the VFS layer correctly free's it when it
2991  * gets overwritten.
2992  */
2993 static int shmem_rename2(struct user_namespace *mnt_userns,
2994                          struct inode *old_dir, struct dentry *old_dentry,
2995                          struct inode *new_dir, struct dentry *new_dentry,
2996                          unsigned int flags)
2997 {
2998         struct inode *inode = d_inode(old_dentry);
2999         int they_are_dirs = S_ISDIR(inode->i_mode);
3000
3001         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3002                 return -EINVAL;
3003
3004         if (flags & RENAME_EXCHANGE)
3005                 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3006
3007         if (!simple_empty(new_dentry))
3008                 return -ENOTEMPTY;
3009
3010         if (flags & RENAME_WHITEOUT) {
3011                 int error;
3012
3013                 error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
3014                 if (error)
3015                         return error;
3016         }
3017
3018         if (d_really_is_positive(new_dentry)) {
3019                 (void) shmem_unlink(new_dir, new_dentry);
3020                 if (they_are_dirs) {
3021                         drop_nlink(d_inode(new_dentry));
3022                         drop_nlink(old_dir);
3023                 }
3024         } else if (they_are_dirs) {
3025                 drop_nlink(old_dir);
3026                 inc_nlink(new_dir);
3027         }
3028
3029         old_dir->i_size -= BOGO_DIRENT_SIZE;
3030         new_dir->i_size += BOGO_DIRENT_SIZE;
3031         old_dir->i_ctime = old_dir->i_mtime =
3032         new_dir->i_ctime = new_dir->i_mtime =
3033         inode->i_ctime = current_time(old_dir);
3034         return 0;
3035 }
3036
3037 static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3038                          struct dentry *dentry, const char *symname)
3039 {
3040         int error;
3041         int len;
3042         struct inode *inode;
3043         struct page *page;
3044
3045         len = strlen(symname) + 1;
3046         if (len > PAGE_SIZE)
3047                 return -ENAMETOOLONG;
3048
3049         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3050                                 VM_NORESERVE);
3051         if (!inode)
3052                 return -ENOSPC;
3053
3054         error = security_inode_init_security(inode, dir, &dentry->d_name,
3055                                              shmem_initxattrs, NULL);
3056         if (error && error != -EOPNOTSUPP) {
3057                 iput(inode);
3058                 return error;
3059         }
3060
3061         inode->i_size = len-1;
3062         if (len <= SHORT_SYMLINK_LEN) {
3063                 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3064                 if (!inode->i_link) {
3065                         iput(inode);
3066                         return -ENOMEM;
3067                 }
3068                 inode->i_op = &shmem_short_symlink_operations;
3069         } else {
3070                 inode_nohighmem(inode);
3071                 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3072                 if (error) {
3073                         iput(inode);
3074                         return error;
3075                 }
3076                 inode->i_mapping->a_ops = &shmem_aops;
3077                 inode->i_op = &shmem_symlink_inode_operations;
3078                 memcpy(page_address(page), symname, len);
3079                 SetPageUptodate(page);
3080                 set_page_dirty(page);
3081                 unlock_page(page);
3082                 put_page(page);
3083         }
3084         dir->i_size += BOGO_DIRENT_SIZE;
3085         dir->i_ctime = dir->i_mtime = current_time(dir);
3086         d_instantiate(dentry, inode);
3087         dget(dentry);
3088         return 0;
3089 }
3090
3091 static void shmem_put_link(void *arg)
3092 {
3093         mark_page_accessed(arg);
3094         put_page(arg);
3095 }
3096
3097 static const char *shmem_get_link(struct dentry *dentry,
3098                                   struct inode *inode,
3099                                   struct delayed_call *done)
3100 {
3101         struct page *page = NULL;
3102         int error;
3103         if (!dentry) {
3104                 page = find_get_page(inode->i_mapping, 0);
3105                 if (!page)
3106                         return ERR_PTR(-ECHILD);
3107                 if (!PageUptodate(page)) {
3108                         put_page(page);
3109                         return ERR_PTR(-ECHILD);
3110                 }
3111         } else {
3112                 error = shmem_getpage(inode, 0, &page, SGP_READ);
3113                 if (error)
3114                         return ERR_PTR(error);
3115                 unlock_page(page);
3116         }
3117         set_delayed_call(done, shmem_put_link, page);
3118         return page_address(page);
3119 }
3120
3121 #ifdef CONFIG_TMPFS_XATTR
3122 /*
3123  * Superblocks without xattr inode operations may get some security.* xattr
3124  * support from the LSM "for free". As soon as we have any other xattrs
3125  * like ACLs, we also need to implement the security.* handlers at
3126  * filesystem level, though.
3127  */
3128
3129 /*
3130  * Callback for security_inode_init_security() for acquiring xattrs.
3131  */
3132 static int shmem_initxattrs(struct inode *inode,
3133                             const struct xattr *xattr_array,
3134                             void *fs_info)
3135 {
3136         struct shmem_inode_info *info = SHMEM_I(inode);
3137         const struct xattr *xattr;
3138         struct simple_xattr *new_xattr;
3139         size_t len;
3140
3141         for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3142                 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3143                 if (!new_xattr)
3144                         return -ENOMEM;
3145
3146                 len = strlen(xattr->name) + 1;
3147                 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3148                                           GFP_KERNEL);
3149                 if (!new_xattr->name) {
3150                         kvfree(new_xattr);
3151                         return -ENOMEM;
3152                 }
3153
3154                 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3155                        XATTR_SECURITY_PREFIX_LEN);
3156                 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3157                        xattr->name, len);
3158
3159                 simple_xattr_list_add(&info->xattrs, new_xattr);
3160         }
3161
3162         return 0;
3163 }
3164
3165 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3166                                    struct dentry *unused, struct inode *inode,
3167                                    const char *name, void *buffer, size_t size)
3168 {
3169         struct shmem_inode_info *info = SHMEM_I(inode);
3170
3171         name = xattr_full_name(handler, name);
3172         return simple_xattr_get(&info->xattrs, name, buffer, size);
3173 }
3174
3175 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3176                                    struct user_namespace *mnt_userns,
3177                                    struct dentry *unused, struct inode *inode,
3178                                    const char *name, const void *value,
3179                                    size_t size, int flags)
3180 {
3181         struct shmem_inode_info *info = SHMEM_I(inode);
3182
3183         name = xattr_full_name(handler, name);
3184         return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3185 }
3186
3187 static const struct xattr_handler shmem_security_xattr_handler = {
3188         .prefix = XATTR_SECURITY_PREFIX,
3189         .get = shmem_xattr_handler_get,
3190         .set = shmem_xattr_handler_set,
3191 };
3192
3193 static const struct xattr_handler shmem_trusted_xattr_handler = {
3194         .prefix = XATTR_TRUSTED_PREFIX,
3195         .get = shmem_xattr_handler_get,
3196         .set = shmem_xattr_handler_set,
3197 };
3198
3199 static const struct xattr_handler *shmem_xattr_handlers[] = {
3200 #ifdef CONFIG_TMPFS_POSIX_ACL
3201         &posix_acl_access_xattr_handler,
3202         &posix_acl_default_xattr_handler,
3203 #endif
3204         &shmem_security_xattr_handler,
3205         &shmem_trusted_xattr_handler,
3206         NULL
3207 };
3208
3209 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3210 {
3211         struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3212         return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3213 }
3214 #endif /* CONFIG_TMPFS_XATTR */
3215
3216 static const struct inode_operations shmem_short_symlink_operations = {
3217         .get_link       = simple_get_link,
3218 #ifdef CONFIG_TMPFS_XATTR
3219         .listxattr      = shmem_listxattr,
3220 #endif
3221 };
3222
3223 static const struct inode_operations shmem_symlink_inode_operations = {
3224         .get_link       = shmem_get_link,
3225 #ifdef CONFIG_TMPFS_XATTR
3226         .listxattr      = shmem_listxattr,
3227 #endif
3228 };
3229
3230 static struct dentry *shmem_get_parent(struct dentry *child)
3231 {
3232         return ERR_PTR(-ESTALE);
3233 }
3234
3235 static int shmem_match(struct inode *ino, void *vfh)
3236 {
3237         __u32 *fh = vfh;
3238         __u64 inum = fh[2];
3239         inum = (inum << 32) | fh[1];
3240         return ino->i_ino == inum && fh[0] == ino->i_generation;
3241 }
3242
3243 /* Find any alias of inode, but prefer a hashed alias */
3244 static struct dentry *shmem_find_alias(struct inode *inode)
3245 {
3246         struct dentry *alias = d_find_alias(inode);
3247
3248         return alias ?: d_find_any_alias(inode);
3249 }
3250
3251
3252 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3253                 struct fid *fid, int fh_len, int fh_type)
3254 {
3255         struct inode *inode;
3256         struct dentry *dentry = NULL;
3257         u64 inum;
3258
3259         if (fh_len < 3)
3260                 return NULL;
3261
3262         inum = fid->raw[2];
3263         inum = (inum << 32) | fid->raw[1];
3264
3265         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3266                         shmem_match, fid->raw);
3267         if (inode) {
3268                 dentry = shmem_find_alias(inode);
3269                 iput(inode);
3270         }
3271
3272         return dentry;
3273 }
3274
3275 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3276                                 struct inode *parent)
3277 {
3278         if (*len < 3) {
3279                 *len = 3;
3280                 return FILEID_INVALID;
3281         }
3282
3283         if (inode_unhashed(inode)) {
3284                 /* Unfortunately insert_inode_hash is not idempotent,
3285                  * so as we hash inodes here rather than at creation
3286                  * time, we need a lock to ensure we only try
3287                  * to do it once
3288                  */
3289                 static DEFINE_SPINLOCK(lock);
3290                 spin_lock(&lock);
3291                 if (inode_unhashed(inode))
3292                         __insert_inode_hash(inode,
3293                                             inode->i_ino + inode->i_generation);
3294                 spin_unlock(&lock);
3295         }
3296
3297         fh[0] = inode->i_generation;
3298         fh[1] = inode->i_ino;
3299         fh[2] = ((__u64)inode->i_ino) >> 32;
3300
3301         *len = 3;
3302         return 1;
3303 }
3304
3305 static const struct export_operations shmem_export_ops = {
3306         .get_parent     = shmem_get_parent,
3307         .encode_fh      = shmem_encode_fh,
3308         .fh_to_dentry   = shmem_fh_to_dentry,
3309 };
3310
3311 enum shmem_param {
3312         Opt_gid,
3313         Opt_huge,
3314         Opt_mode,
3315         Opt_mpol,
3316         Opt_nr_blocks,
3317         Opt_nr_inodes,
3318         Opt_size,
3319         Opt_uid,
3320         Opt_inode32,
3321         Opt_inode64,
3322 };
3323
3324 static const struct constant_table shmem_param_enums_huge[] = {
3325         {"never",       SHMEM_HUGE_NEVER },
3326         {"always",      SHMEM_HUGE_ALWAYS },
3327         {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3328         {"advise",      SHMEM_HUGE_ADVISE },
3329         {}
3330 };
3331
3332 const struct fs_parameter_spec shmem_fs_parameters[] = {
3333         fsparam_u32   ("gid",           Opt_gid),
3334         fsparam_enum  ("huge",          Opt_huge,  shmem_param_enums_huge),
3335         fsparam_u32oct("mode",          Opt_mode),
3336         fsparam_string("mpol",          Opt_mpol),
3337         fsparam_string("nr_blocks",     Opt_nr_blocks),
3338         fsparam_string("nr_inodes",     Opt_nr_inodes),
3339         fsparam_string("size",          Opt_size),
3340         fsparam_u32   ("uid",           Opt_uid),
3341         fsparam_flag  ("inode32",       Opt_inode32),
3342         fsparam_flag  ("inode64",       Opt_inode64),
3343         {}
3344 };
3345
3346 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3347 {
3348         struct shmem_options *ctx = fc->fs_private;
3349         struct fs_parse_result result;
3350         unsigned long long size;
3351         char *rest;
3352         int opt;
3353
3354         opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3355         if (opt < 0)
3356                 return opt;
3357
3358         switch (opt) {
3359         case Opt_size:
3360                 size = memparse(param->string, &rest);
3361                 if (*rest == '%') {
3362                         size <<= PAGE_SHIFT;
3363                         size *= totalram_pages();
3364                         do_div(size, 100);
3365                         rest++;
3366                 }
3367                 if (*rest)
3368                         goto bad_value;
3369                 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3370                 ctx->seen |= SHMEM_SEEN_BLOCKS;
3371                 break;
3372         case Opt_nr_blocks:
3373                 ctx->blocks = memparse(param->string, &rest);
3374                 if (*rest)
3375                         goto bad_value;
3376                 ctx->seen |= SHMEM_SEEN_BLOCKS;
3377                 break;
3378         case Opt_nr_inodes:
3379                 ctx->inodes = memparse(param->string, &rest);
3380                 if (*rest)
3381                         goto bad_value;
3382                 ctx->seen |= SHMEM_SEEN_INODES;
3383                 break;
3384         case Opt_mode:
3385                 ctx->mode = result.uint_32 & 07777;
3386                 break;
3387         case Opt_uid:
3388                 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3389                 if (!uid_valid(ctx->uid))
3390                         goto bad_value;
3391                 break;
3392         case Opt_gid:
3393                 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3394                 if (!gid_valid(ctx->gid))
3395                         goto bad_value;
3396                 break;
3397         case Opt_huge:
3398                 ctx->huge = result.uint_32;
3399                 if (ctx->huge != SHMEM_HUGE_NEVER &&
3400                     !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3401                       has_transparent_hugepage()))
3402                         goto unsupported_parameter;
3403                 ctx->seen |= SHMEM_SEEN_HUGE;
3404                 break;
3405         case Opt_mpol:
3406                 if (IS_ENABLED(CONFIG_NUMA)) {
3407                         mpol_put(ctx->mpol);
3408                         ctx->mpol = NULL;
3409                         if (mpol_parse_str(param->string, &ctx->mpol))
3410                                 goto bad_value;
3411                         break;
3412                 }
3413                 goto unsupported_parameter;
3414         case Opt_inode32:
3415                 ctx->full_inums = false;
3416                 ctx->seen |= SHMEM_SEEN_INUMS;
3417                 break;
3418         case Opt_inode64:
3419                 if (sizeof(ino_t) < 8) {
3420                         return invalfc(fc,
3421                                        "Cannot use inode64 with <64bit inums in kernel\n");
3422                 }
3423                 ctx->full_inums = true;
3424                 ctx->seen |= SHMEM_SEEN_INUMS;
3425                 break;
3426         }
3427         return 0;
3428
3429 unsupported_parameter:
3430         return invalfc(fc, "Unsupported parameter '%s'", param->key);
3431 bad_value:
3432         return invalfc(fc, "Bad value for '%s'", param->key);
3433 }
3434
3435 static int shmem_parse_options(struct fs_context *fc, void *data)
3436 {
3437         char *options = data;
3438
3439         if (options) {
3440                 int err = security_sb_eat_lsm_opts(options, &fc->security);
3441                 if (err)
3442                         return err;
3443         }
3444
3445         while (options != NULL) {
3446                 char *this_char = options;
3447                 for (;;) {
3448                         /*
3449                          * NUL-terminate this option: unfortunately,
3450                          * mount options form a comma-separated list,
3451                          * but mpol's nodelist may also contain commas.
3452                          */
3453                         options = strchr(options, ',');
3454                         if (options == NULL)
3455                                 break;
3456                         options++;
3457                         if (!isdigit(*options)) {
3458                                 options[-1] = '\0';
3459                                 break;
3460                         }
3461                 }
3462                 if (*this_char) {
3463                         char *value = strchr(this_char, '=');
3464                         size_t len = 0;
3465                         int err;
3466
3467                         if (value) {
3468                                 *value++ = '\0';
3469                                 len = strlen(value);
3470                         }
3471                         err = vfs_parse_fs_string(fc, this_char, value, len);
3472                         if (err < 0)
3473                                 return err;
3474                 }
3475         }
3476         return 0;
3477 }
3478
3479 /*
3480  * Reconfigure a shmem filesystem.
3481  *
3482  * Note that we disallow change from limited->unlimited blocks/inodes while any
3483  * are in use; but we must separately disallow unlimited->limited, because in
3484  * that case we have no record of how much is already in use.
3485  */
3486 static int shmem_reconfigure(struct fs_context *fc)
3487 {
3488         struct shmem_options *ctx = fc->fs_private;
3489         struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3490         unsigned long inodes;
3491         const char *err;
3492
3493         spin_lock(&sbinfo->stat_lock);
3494         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3495         if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3496                 if (!sbinfo->max_blocks) {
3497                         err = "Cannot retroactively limit size";
3498                         goto out;
3499                 }
3500                 if (percpu_counter_compare(&sbinfo->used_blocks,
3501                                            ctx->blocks) > 0) {
3502                         err = "Too small a size for current use";
3503                         goto out;
3504                 }
3505         }
3506         if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3507                 if (!sbinfo->max_inodes) {
3508                         err = "Cannot retroactively limit inodes";
3509                         goto out;
3510                 }
3511                 if (ctx->inodes < inodes) {
3512                         err = "Too few inodes for current use";
3513                         goto out;
3514                 }
3515         }
3516
3517         if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3518             sbinfo->next_ino > UINT_MAX) {
3519                 err = "Current inum too high to switch to 32-bit inums";
3520                 goto out;
3521         }
3522
3523         if (ctx->seen & SHMEM_SEEN_HUGE)
3524                 sbinfo->huge = ctx->huge;
3525         if (ctx->seen & SHMEM_SEEN_INUMS)
3526                 sbinfo->full_inums = ctx->full_inums;
3527         if (ctx->seen & SHMEM_SEEN_BLOCKS)
3528                 sbinfo->max_blocks  = ctx->blocks;
3529         if (ctx->seen & SHMEM_SEEN_INODES) {
3530                 sbinfo->max_inodes  = ctx->inodes;
3531                 sbinfo->free_inodes = ctx->inodes - inodes;
3532         }
3533
3534         /*
3535          * Preserve previous mempolicy unless mpol remount option was specified.
3536          */
3537         if (ctx->mpol) {
3538                 mpol_put(sbinfo->mpol);
3539                 sbinfo->mpol = ctx->mpol;       /* transfers initial ref */
3540                 ctx->mpol = NULL;
3541         }
3542         spin_unlock(&sbinfo->stat_lock);
3543         return 0;
3544 out:
3545         spin_unlock(&sbinfo->stat_lock);
3546         return invalfc(fc, "%s", err);
3547 }
3548
3549 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3550 {
3551         struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3552
3553         if (sbinfo->max_blocks != shmem_default_max_blocks())
3554                 seq_printf(seq, ",size=%luk",
3555                         sbinfo->max_blocks << (PAGE_SHIFT - 10));
3556         if (sbinfo->max_inodes != shmem_default_max_inodes())
3557                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3558         if (sbinfo->mode != (0777 | S_ISVTX))
3559                 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3560         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3561                 seq_printf(seq, ",uid=%u",
3562                                 from_kuid_munged(&init_user_ns, sbinfo->uid));
3563         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3564                 seq_printf(seq, ",gid=%u",
3565                                 from_kgid_munged(&init_user_ns, sbinfo->gid));
3566
3567         /*
3568          * Showing inode{64,32} might be useful even if it's the system default,
3569          * since then people don't have to resort to checking both here and
3570          * /proc/config.gz to confirm 64-bit inums were successfully applied
3571          * (which may not even exist if IKCONFIG_PROC isn't enabled).
3572          *
3573          * We hide it when inode64 isn't the default and we are using 32-bit
3574          * inodes, since that probably just means the feature isn't even under
3575          * consideration.
3576          *
3577          * As such:
3578          *
3579          *                     +-----------------+-----------------+
3580          *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3581          *  +------------------+-----------------+-----------------+
3582          *  | full_inums=true  | show            | show            |
3583          *  | full_inums=false | show            | hide            |
3584          *  +------------------+-----------------+-----------------+
3585          *
3586          */
3587         if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3588                 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3589 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3590         /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3591         if (sbinfo->huge)
3592                 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3593 #endif
3594         shmem_show_mpol(seq, sbinfo->mpol);
3595         return 0;
3596 }
3597
3598 #endif /* CONFIG_TMPFS */
3599
3600 static void shmem_put_super(struct super_block *sb)
3601 {
3602         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3603
3604         free_percpu(sbinfo->ino_batch);
3605         percpu_counter_destroy(&sbinfo->used_blocks);
3606         mpol_put(sbinfo->mpol);
3607         kfree(sbinfo);
3608         sb->s_fs_info = NULL;
3609 }
3610
3611 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3612 {
3613         struct shmem_options *ctx = fc->fs_private;
3614         struct inode *inode;
3615         struct shmem_sb_info *sbinfo;
3616         int err = -ENOMEM;
3617
3618         /* Round up to L1_CACHE_BYTES to resist false sharing */
3619         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3620                                 L1_CACHE_BYTES), GFP_KERNEL);
3621         if (!sbinfo)
3622                 return -ENOMEM;
3623
3624         sb->s_fs_info = sbinfo;
3625
3626 #ifdef CONFIG_TMPFS
3627         /*
3628          * Per default we only allow half of the physical ram per
3629          * tmpfs instance, limiting inodes to one per page of lowmem;
3630          * but the internal instance is left unlimited.
3631          */
3632         if (!(sb->s_flags & SB_KERNMOUNT)) {
3633                 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3634                         ctx->blocks = shmem_default_max_blocks();
3635                 if (!(ctx->seen & SHMEM_SEEN_INODES))
3636                         ctx->inodes = shmem_default_max_inodes();
3637                 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3638                         ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3639         } else {
3640                 sb->s_flags |= SB_NOUSER;
3641         }
3642         sb->s_export_op = &shmem_export_ops;
3643         sb->s_flags |= SB_NOSEC;
3644 #else
3645         sb->s_flags |= SB_NOUSER;
3646 #endif
3647         sbinfo->max_blocks = ctx->blocks;
3648         sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3649         if (sb->s_flags & SB_KERNMOUNT) {
3650                 sbinfo->ino_batch = alloc_percpu(ino_t);
3651                 if (!sbinfo->ino_batch)
3652                         goto failed;
3653         }
3654         sbinfo->uid = ctx->uid;
3655         sbinfo->gid = ctx->gid;
3656         sbinfo->full_inums = ctx->full_inums;
3657         sbinfo->mode = ctx->mode;
3658         sbinfo->huge = ctx->huge;
3659         sbinfo->mpol = ctx->mpol;
3660         ctx->mpol = NULL;
3661
3662         spin_lock_init(&sbinfo->stat_lock);
3663         if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3664                 goto failed;
3665         spin_lock_init(&sbinfo->shrinklist_lock);
3666         INIT_LIST_HEAD(&sbinfo->shrinklist);
3667
3668         sb->s_maxbytes = MAX_LFS_FILESIZE;
3669         sb->s_blocksize = PAGE_SIZE;
3670         sb->s_blocksize_bits = PAGE_SHIFT;
3671         sb->s_magic = TMPFS_MAGIC;
3672         sb->s_op = &shmem_ops;
3673         sb->s_time_gran = 1;
3674 #ifdef CONFIG_TMPFS_XATTR
3675         sb->s_xattr = shmem_xattr_handlers;
3676 #endif
3677 #ifdef CONFIG_TMPFS_POSIX_ACL
3678         sb->s_flags |= SB_POSIXACL;
3679 #endif
3680         uuid_gen(&sb->s_uuid);
3681
3682         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3683         if (!inode)
3684                 goto failed;
3685         inode->i_uid = sbinfo->uid;
3686         inode->i_gid = sbinfo->gid;
3687         sb->s_root = d_make_root(inode);
3688         if (!sb->s_root)
3689                 goto failed;
3690         return 0;
3691
3692 failed:
3693         shmem_put_super(sb);
3694         return err;
3695 }
3696
3697 static int shmem_get_tree(struct fs_context *fc)
3698 {
3699         return get_tree_nodev(fc, shmem_fill_super);
3700 }
3701
3702 static void shmem_free_fc(struct fs_context *fc)
3703 {
3704         struct shmem_options *ctx = fc->fs_private;
3705
3706         if (ctx) {
3707                 mpol_put(ctx->mpol);
3708                 kfree(ctx);
3709         }
3710 }
3711
3712 static const struct fs_context_operations shmem_fs_context_ops = {
3713         .free                   = shmem_free_fc,
3714         .get_tree               = shmem_get_tree,
3715 #ifdef CONFIG_TMPFS
3716         .parse_monolithic       = shmem_parse_options,
3717         .parse_param            = shmem_parse_one,
3718         .reconfigure            = shmem_reconfigure,
3719 #endif
3720 };
3721
3722 static struct kmem_cache *shmem_inode_cachep;
3723
3724 static struct inode *shmem_alloc_inode(struct super_block *sb)
3725 {
3726         struct shmem_inode_info *info;
3727         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3728         if (!info)
3729                 return NULL;
3730         return &info->vfs_inode;
3731 }
3732
3733 static void shmem_free_in_core_inode(struct inode *inode)
3734 {
3735         if (S_ISLNK(inode->i_mode))
3736                 kfree(inode->i_link);
3737         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3738 }
3739
3740 static void shmem_destroy_inode(struct inode *inode)
3741 {
3742         if (S_ISREG(inode->i_mode))
3743                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3744 }
3745
3746 static void shmem_init_inode(void *foo)
3747 {
3748         struct shmem_inode_info *info = foo;
3749         inode_init_once(&info->vfs_inode);
3750 }
3751
3752 static void shmem_init_inodecache(void)
3753 {
3754         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3755                                 sizeof(struct shmem_inode_info),
3756                                 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3757 }
3758
3759 static void shmem_destroy_inodecache(void)
3760 {
3761         kmem_cache_destroy(shmem_inode_cachep);
3762 }
3763
3764 const struct address_space_operations shmem_aops = {
3765         .writepage      = shmem_writepage,
3766         .set_page_dirty = __set_page_dirty_no_writeback,
3767 #ifdef CONFIG_TMPFS
3768         .write_begin    = shmem_write_begin,
3769         .write_end      = shmem_write_end,
3770 #endif
3771 #ifdef CONFIG_MIGRATION
3772         .migratepage    = migrate_page,
3773 #endif
3774         .error_remove_page = generic_error_remove_page,
3775 };
3776 EXPORT_SYMBOL(shmem_aops);
3777
3778 static const struct file_operations shmem_file_operations = {
3779         .mmap           = shmem_mmap,
3780         .get_unmapped_area = shmem_get_unmapped_area,
3781 #ifdef CONFIG_TMPFS
3782         .llseek         = shmem_file_llseek,
3783         .read_iter      = shmem_file_read_iter,
3784         .write_iter     = generic_file_write_iter,
3785         .fsync          = noop_fsync,
3786         .splice_read    = generic_file_splice_read,
3787         .splice_write   = iter_file_splice_write,
3788         .fallocate      = shmem_fallocate,
3789 #endif
3790 };
3791
3792 static const struct inode_operations shmem_inode_operations = {
3793         .getattr        = shmem_getattr,
3794         .setattr        = shmem_setattr,
3795 #ifdef CONFIG_TMPFS_XATTR
3796         .listxattr      = shmem_listxattr,
3797         .set_acl        = simple_set_acl,
3798 #endif
3799 };
3800
3801 static const struct inode_operations shmem_dir_inode_operations = {
3802 #ifdef CONFIG_TMPFS
3803         .create         = shmem_create,
3804         .lookup         = simple_lookup,
3805         .link           = shmem_link,
3806         .unlink         = shmem_unlink,
3807         .symlink        = shmem_symlink,
3808         .mkdir          = shmem_mkdir,
3809         .rmdir          = shmem_rmdir,
3810         .mknod          = shmem_mknod,
3811         .rename         = shmem_rename2,
3812         .tmpfile        = shmem_tmpfile,
3813 #endif
3814 #ifdef CONFIG_TMPFS_XATTR
3815         .listxattr      = shmem_listxattr,
3816 #endif
3817 #ifdef CONFIG_TMPFS_POSIX_ACL
3818         .setattr        = shmem_setattr,
3819         .set_acl        = simple_set_acl,
3820 #endif
3821 };
3822
3823 static const struct inode_operations shmem_special_inode_operations = {
3824 #ifdef CONFIG_TMPFS_XATTR
3825         .listxattr      = shmem_listxattr,
3826 #endif
3827 #ifdef CONFIG_TMPFS_POSIX_ACL
3828         .setattr        = shmem_setattr,
3829         .set_acl        = simple_set_acl,
3830 #endif
3831 };
3832
3833 static const struct super_operations shmem_ops = {
3834         .alloc_inode    = shmem_alloc_inode,
3835         .free_inode     = shmem_free_in_core_inode,
3836         .destroy_inode  = shmem_destroy_inode,
3837 #ifdef CONFIG_TMPFS
3838         .statfs         = shmem_statfs,
3839         .show_options   = shmem_show_options,
3840 #endif
3841         .evict_inode    = shmem_evict_inode,
3842         .drop_inode     = generic_delete_inode,
3843         .put_super      = shmem_put_super,
3844 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3845         .nr_cached_objects      = shmem_unused_huge_count,
3846         .free_cached_objects    = shmem_unused_huge_scan,
3847 #endif
3848 };
3849
3850 static const struct vm_operations_struct shmem_vm_ops = {
3851         .fault          = shmem_fault,
3852         .map_pages      = filemap_map_pages,
3853 #ifdef CONFIG_NUMA
3854         .set_policy     = shmem_set_policy,
3855         .get_policy     = shmem_get_policy,
3856 #endif
3857 };
3858
3859 int shmem_init_fs_context(struct fs_context *fc)
3860 {
3861         struct shmem_options *ctx;
3862
3863         ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3864         if (!ctx)
3865                 return -ENOMEM;
3866
3867         ctx->mode = 0777 | S_ISVTX;
3868         ctx->uid = current_fsuid();
3869         ctx->gid = current_fsgid();
3870
3871         fc->fs_private = ctx;
3872         fc->ops = &shmem_fs_context_ops;
3873         return 0;
3874 }
3875
3876 static struct file_system_type shmem_fs_type = {
3877         .owner          = THIS_MODULE,
3878         .name           = "tmpfs",
3879         .init_fs_context = shmem_init_fs_context,
3880 #ifdef CONFIG_TMPFS
3881         .parameters     = shmem_fs_parameters,
3882 #endif
3883         .kill_sb        = kill_litter_super,
3884         .fs_flags       = FS_USERNS_MOUNT | FS_THP_SUPPORT,
3885 };
3886
3887 int __init shmem_init(void)
3888 {
3889         int error;
3890
3891         shmem_init_inodecache();
3892
3893         error = register_filesystem(&shmem_fs_type);
3894         if (error) {
3895                 pr_err("Could not register tmpfs\n");
3896                 goto out2;
3897         }
3898
3899         shm_mnt = kern_mount(&shmem_fs_type);
3900         if (IS_ERR(shm_mnt)) {
3901                 error = PTR_ERR(shm_mnt);
3902                 pr_err("Could not kern_mount tmpfs\n");
3903                 goto out1;
3904         }
3905
3906 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3907         if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3908                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3909         else
3910                 shmem_huge = 0; /* just in case it was patched */
3911 #endif
3912         return 0;
3913
3914 out1:
3915         unregister_filesystem(&shmem_fs_type);
3916 out2:
3917         shmem_destroy_inodecache();
3918         shm_mnt = ERR_PTR(error);
3919         return error;
3920 }
3921
3922 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
3923 static ssize_t shmem_enabled_show(struct kobject *kobj,
3924                                   struct kobj_attribute *attr, char *buf)
3925 {
3926         static const int values[] = {
3927                 SHMEM_HUGE_ALWAYS,
3928                 SHMEM_HUGE_WITHIN_SIZE,
3929                 SHMEM_HUGE_ADVISE,
3930                 SHMEM_HUGE_NEVER,
3931                 SHMEM_HUGE_DENY,
3932                 SHMEM_HUGE_FORCE,
3933         };
3934         int len = 0;
3935         int i;
3936
3937         for (i = 0; i < ARRAY_SIZE(values); i++) {
3938                 len += sysfs_emit_at(buf, len,
3939                                      shmem_huge == values[i] ? "%s[%s]" : "%s%s",
3940                                      i ? " " : "",
3941                                      shmem_format_huge(values[i]));
3942         }
3943
3944         len += sysfs_emit_at(buf, len, "\n");
3945
3946         return len;
3947 }
3948
3949 static ssize_t shmem_enabled_store(struct kobject *kobj,
3950                 struct kobj_attribute *attr, const char *buf, size_t count)
3951 {
3952         char tmp[16];
3953         int huge;
3954
3955         if (count + 1 > sizeof(tmp))
3956                 return -EINVAL;
3957         memcpy(tmp, buf, count);
3958         tmp[count] = '\0';
3959         if (count && tmp[count - 1] == '\n')
3960                 tmp[count - 1] = '\0';
3961
3962         huge = shmem_parse_huge(tmp);
3963         if (huge == -EINVAL)
3964                 return -EINVAL;
3965         if (!has_transparent_hugepage() &&
3966                         huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3967                 return -EINVAL;
3968
3969         shmem_huge = huge;
3970         if (shmem_huge > SHMEM_HUGE_DENY)
3971                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3972         return count;
3973 }
3974
3975 struct kobj_attribute shmem_enabled_attr =
3976         __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3977 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
3978
3979 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3980 bool shmem_huge_enabled(struct vm_area_struct *vma)
3981 {
3982         struct inode *inode = file_inode(vma->vm_file);
3983         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3984         loff_t i_size;
3985         pgoff_t off;
3986
3987         if (!transhuge_vma_enabled(vma, vma->vm_flags))
3988                 return false;
3989         if (shmem_huge == SHMEM_HUGE_FORCE)
3990                 return true;
3991         if (shmem_huge == SHMEM_HUGE_DENY)
3992                 return false;
3993         switch (sbinfo->huge) {
3994                 case SHMEM_HUGE_NEVER:
3995                         return false;
3996                 case SHMEM_HUGE_ALWAYS:
3997                         return true;
3998                 case SHMEM_HUGE_WITHIN_SIZE:
3999                         off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4000                         i_size = round_up(i_size_read(inode), PAGE_SIZE);
4001                         if (i_size >= HPAGE_PMD_SIZE &&
4002                                         i_size >> PAGE_SHIFT >= off)
4003                                 return true;
4004                         fallthrough;
4005                 case SHMEM_HUGE_ADVISE:
4006                         /* TODO: implement fadvise() hints */
4007                         return (vma->vm_flags & VM_HUGEPAGE);
4008                 default:
4009                         VM_BUG_ON(1);
4010                         return false;
4011         }
4012 }
4013 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4014
4015 #else /* !CONFIG_SHMEM */
4016
4017 /*
4018  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4019  *
4020  * This is intended for small system where the benefits of the full
4021  * shmem code (swap-backed and resource-limited) are outweighed by
4022  * their complexity. On systems without swap this code should be
4023  * effectively equivalent, but much lighter weight.
4024  */
4025
4026 static struct file_system_type shmem_fs_type = {
4027         .name           = "tmpfs",
4028         .init_fs_context = ramfs_init_fs_context,
4029         .parameters     = ramfs_fs_parameters,
4030         .kill_sb        = kill_litter_super,
4031         .fs_flags       = FS_USERNS_MOUNT,
4032 };
4033
4034 int __init shmem_init(void)
4035 {
4036         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4037
4038         shm_mnt = kern_mount(&shmem_fs_type);
4039         BUG_ON(IS_ERR(shm_mnt));
4040
4041         return 0;
4042 }
4043
4044 int shmem_unuse(unsigned int type, bool frontswap,
4045                 unsigned long *fs_pages_to_unuse)
4046 {
4047         return 0;
4048 }
4049
4050 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4051 {
4052         return 0;
4053 }
4054
4055 void shmem_unlock_mapping(struct address_space *mapping)
4056 {
4057 }
4058
4059 #ifdef CONFIG_MMU
4060 unsigned long shmem_get_unmapped_area(struct file *file,
4061                                       unsigned long addr, unsigned long len,
4062                                       unsigned long pgoff, unsigned long flags)
4063 {
4064         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4065 }
4066 #endif
4067
4068 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4069 {
4070         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4071 }
4072 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4073
4074 #define shmem_vm_ops                            generic_file_vm_ops
4075 #define shmem_file_operations                   ramfs_file_operations
4076 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
4077 #define shmem_acct_size(flags, size)            0
4078 #define shmem_unacct_size(flags, size)          do {} while (0)
4079
4080 #endif /* CONFIG_SHMEM */
4081
4082 /* common code */
4083
4084 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4085                                        unsigned long flags, unsigned int i_flags)
4086 {
4087         struct inode *inode;
4088         struct file *res;
4089
4090         if (IS_ERR(mnt))
4091                 return ERR_CAST(mnt);
4092
4093         if (size < 0 || size > MAX_LFS_FILESIZE)
4094                 return ERR_PTR(-EINVAL);
4095
4096         if (shmem_acct_size(flags, size))
4097                 return ERR_PTR(-ENOMEM);
4098
4099         inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4100                                 flags);
4101         if (unlikely(!inode)) {
4102                 shmem_unacct_size(flags, size);
4103                 return ERR_PTR(-ENOSPC);
4104         }
4105         inode->i_flags |= i_flags;
4106         inode->i_size = size;
4107         clear_nlink(inode);     /* It is unlinked */
4108         res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4109         if (!IS_ERR(res))
4110                 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4111                                 &shmem_file_operations);
4112         if (IS_ERR(res))
4113                 iput(inode);
4114         return res;
4115 }
4116
4117 /**
4118  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4119  *      kernel internal.  There will be NO LSM permission checks against the
4120  *      underlying inode.  So users of this interface must do LSM checks at a
4121  *      higher layer.  The users are the big_key and shm implementations.  LSM
4122  *      checks are provided at the key or shm level rather than the inode.
4123  * @name: name for dentry (to be seen in /proc/<pid>/maps
4124  * @size: size to be set for the file
4125  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4126  */
4127 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4128 {
4129         return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4130 }
4131
4132 /**
4133  * shmem_file_setup - get an unlinked file living in tmpfs
4134  * @name: name for dentry (to be seen in /proc/<pid>/maps
4135  * @size: size to be set for the file
4136  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4137  */
4138 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4139 {
4140         return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4141 }
4142 EXPORT_SYMBOL_GPL(shmem_file_setup);
4143
4144 /**
4145  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4146  * @mnt: the tmpfs mount where the file will be created
4147  * @name: name for dentry (to be seen in /proc/<pid>/maps
4148  * @size: size to be set for the file
4149  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4150  */
4151 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4152                                        loff_t size, unsigned long flags)
4153 {
4154         return __shmem_file_setup(mnt, name, size, flags, 0);
4155 }
4156 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4157
4158 /**
4159  * shmem_zero_setup - setup a shared anonymous mapping
4160  * @vma: the vma to be mmapped is prepared by do_mmap
4161  */
4162 int shmem_zero_setup(struct vm_area_struct *vma)
4163 {
4164         struct file *file;
4165         loff_t size = vma->vm_end - vma->vm_start;
4166
4167         /*
4168          * Cloning a new file under mmap_lock leads to a lock ordering conflict
4169          * between XFS directory reading and selinux: since this file is only
4170          * accessible to the user through its mapping, use S_PRIVATE flag to
4171          * bypass file security, in the same way as shmem_kernel_file_setup().
4172          */
4173         file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4174         if (IS_ERR(file))
4175                 return PTR_ERR(file);
4176
4177         if (vma->vm_file)
4178                 fput(vma->vm_file);
4179         vma->vm_file = file;
4180         vma->vm_ops = &shmem_vm_ops;
4181
4182         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4183                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4184                         (vma->vm_end & HPAGE_PMD_MASK)) {
4185                 khugepaged_enter(vma, vma->vm_flags);
4186         }
4187
4188         return 0;
4189 }
4190
4191 /**
4192  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4193  * @mapping:    the page's address_space
4194  * @index:      the page index
4195  * @gfp:        the page allocator flags to use if allocating
4196  *
4197  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4198  * with any new page allocations done using the specified allocation flags.
4199  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4200  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4201  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4202  *
4203  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4204  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4205  */
4206 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4207                                          pgoff_t index, gfp_t gfp)
4208 {
4209 #ifdef CONFIG_SHMEM
4210         struct inode *inode = mapping->host;
4211         struct page *page;
4212         int error;
4213
4214         BUG_ON(!shmem_mapping(mapping));
4215         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4216                                   gfp, NULL, NULL, NULL);
4217         if (error)
4218                 page = ERR_PTR(error);
4219         else
4220                 unlock_page(page);
4221         return page;
4222 #else
4223         /*
4224          * The tiny !SHMEM case uses ramfs without swap
4225          */
4226         return read_cache_page_gfp(mapping, index, gfp);
4227 #endif
4228 }
4229 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);