Merge branch 'md-fixes' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md...
[linux-2.6-microblaze.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/random.h>
33 #include <linux/sched/signal.h>
34 #include <linux/export.h>
35 #include <linux/swap.h>
36 #include <linux/uio.h>
37 #include <linux/khugepaged.h>
38 #include <linux/hugetlb.h>
39 #include <linux/frontswap.h>
40 #include <linux/fs_parser.h>
41
42 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
43
44 static struct vfsmount *shm_mnt;
45
46 #ifdef CONFIG_SHMEM
47 /*
48  * This virtual memory filesystem is heavily based on the ramfs. It
49  * extends ramfs by the ability to use swap and honor resource limits
50  * which makes it a completely usable filesystem.
51  */
52
53 #include <linux/xattr.h>
54 #include <linux/exportfs.h>
55 #include <linux/posix_acl.h>
56 #include <linux/posix_acl_xattr.h>
57 #include <linux/mman.h>
58 #include <linux/string.h>
59 #include <linux/slab.h>
60 #include <linux/backing-dev.h>
61 #include <linux/shmem_fs.h>
62 #include <linux/writeback.h>
63 #include <linux/blkdev.h>
64 #include <linux/pagevec.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/swapops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/userfaultfd_k.h>
81 #include <linux/rmap.h>
82 #include <linux/uuid.h>
83
84 #include <linux/uaccess.h>
85
86 #include "internal.h"
87
88 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
89 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
90
91 /* Pretend that each entry is of this size in directory's i_size */
92 #define BOGO_DIRENT_SIZE 20
93
94 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
95 #define SHORT_SYMLINK_LEN 128
96
97 /*
98  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99  * inode->i_private (with i_mutex making sure that it has only one user at
100  * a time): we would prefer not to enlarge the shmem inode just for that.
101  */
102 struct shmem_falloc {
103         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
104         pgoff_t start;          /* start of range currently being fallocated */
105         pgoff_t next;           /* the next page offset to be fallocated */
106         pgoff_t nr_falloced;    /* how many new pages have been fallocated */
107         pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
108 };
109
110 struct shmem_options {
111         unsigned long long blocks;
112         unsigned long long inodes;
113         struct mempolicy *mpol;
114         kuid_t uid;
115         kgid_t gid;
116         umode_t mode;
117         bool full_inums;
118         int huge;
119         int seen;
120 #define SHMEM_SEEN_BLOCKS 1
121 #define SHMEM_SEEN_INODES 2
122 #define SHMEM_SEEN_HUGE 4
123 #define SHMEM_SEEN_INUMS 8
124 };
125
126 #ifdef CONFIG_TMPFS
127 static unsigned long shmem_default_max_blocks(void)
128 {
129         return totalram_pages() / 2;
130 }
131
132 static unsigned long shmem_default_max_inodes(void)
133 {
134         unsigned long nr_pages = totalram_pages();
135
136         return min(nr_pages - totalhigh_pages(), nr_pages / 2);
137 }
138 #endif
139
140 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
142                                 struct shmem_inode_info *info, pgoff_t index);
143 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
144                              struct page **pagep, enum sgp_type sgp,
145                              gfp_t gfp, struct vm_area_struct *vma,
146                              vm_fault_t *fault_type);
147 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
148                 struct page **pagep, enum sgp_type sgp,
149                 gfp_t gfp, struct vm_area_struct *vma,
150                 struct vm_fault *vmf, vm_fault_t *fault_type);
151
152 int shmem_getpage(struct inode *inode, pgoff_t index,
153                 struct page **pagep, enum sgp_type sgp)
154 {
155         return shmem_getpage_gfp(inode, index, pagep, sgp,
156                 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
157 }
158
159 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
160 {
161         return sb->s_fs_info;
162 }
163
164 /*
165  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
166  * for shared memory and for shared anonymous (/dev/zero) mappings
167  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
168  * consistent with the pre-accounting of private mappings ...
169  */
170 static inline int shmem_acct_size(unsigned long flags, loff_t size)
171 {
172         return (flags & VM_NORESERVE) ?
173                 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
174 }
175
176 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
177 {
178         if (!(flags & VM_NORESERVE))
179                 vm_unacct_memory(VM_ACCT(size));
180 }
181
182 static inline int shmem_reacct_size(unsigned long flags,
183                 loff_t oldsize, loff_t newsize)
184 {
185         if (!(flags & VM_NORESERVE)) {
186                 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
187                         return security_vm_enough_memory_mm(current->mm,
188                                         VM_ACCT(newsize) - VM_ACCT(oldsize));
189                 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
190                         vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
191         }
192         return 0;
193 }
194
195 /*
196  * ... whereas tmpfs objects are accounted incrementally as
197  * pages are allocated, in order to allow large sparse files.
198  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
199  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
200  */
201 static inline int shmem_acct_block(unsigned long flags, long pages)
202 {
203         if (!(flags & VM_NORESERVE))
204                 return 0;
205
206         return security_vm_enough_memory_mm(current->mm,
207                         pages * VM_ACCT(PAGE_SIZE));
208 }
209
210 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
211 {
212         if (flags & VM_NORESERVE)
213                 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
214 }
215
216 static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
217 {
218         struct shmem_inode_info *info = SHMEM_I(inode);
219         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
220
221         if (shmem_acct_block(info->flags, pages))
222                 return false;
223
224         if (sbinfo->max_blocks) {
225                 if (percpu_counter_compare(&sbinfo->used_blocks,
226                                            sbinfo->max_blocks - pages) > 0)
227                         goto unacct;
228                 percpu_counter_add(&sbinfo->used_blocks, pages);
229         }
230
231         return true;
232
233 unacct:
234         shmem_unacct_blocks(info->flags, pages);
235         return false;
236 }
237
238 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
239 {
240         struct shmem_inode_info *info = SHMEM_I(inode);
241         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
242
243         if (sbinfo->max_blocks)
244                 percpu_counter_sub(&sbinfo->used_blocks, pages);
245         shmem_unacct_blocks(info->flags, pages);
246 }
247
248 static const struct super_operations shmem_ops;
249 const struct address_space_operations shmem_aops;
250 static const struct file_operations shmem_file_operations;
251 static const struct inode_operations shmem_inode_operations;
252 static const struct inode_operations shmem_dir_inode_operations;
253 static const struct inode_operations shmem_special_inode_operations;
254 static const struct vm_operations_struct shmem_vm_ops;
255 static struct file_system_type shmem_fs_type;
256
257 bool vma_is_shmem(struct vm_area_struct *vma)
258 {
259         return vma->vm_ops == &shmem_vm_ops;
260 }
261
262 static LIST_HEAD(shmem_swaplist);
263 static DEFINE_MUTEX(shmem_swaplist_mutex);
264
265 /*
266  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
267  * produces a novel ino for the newly allocated inode.
268  *
269  * It may also be called when making a hard link to permit the space needed by
270  * each dentry. However, in that case, no new inode number is needed since that
271  * internally draws from another pool of inode numbers (currently global
272  * get_next_ino()). This case is indicated by passing NULL as inop.
273  */
274 #define SHMEM_INO_BATCH 1024
275 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
276 {
277         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
278         ino_t ino;
279
280         if (!(sb->s_flags & SB_KERNMOUNT)) {
281                 spin_lock(&sbinfo->stat_lock);
282                 if (sbinfo->max_inodes) {
283                         if (!sbinfo->free_inodes) {
284                                 spin_unlock(&sbinfo->stat_lock);
285                                 return -ENOSPC;
286                         }
287                         sbinfo->free_inodes--;
288                 }
289                 if (inop) {
290                         ino = sbinfo->next_ino++;
291                         if (unlikely(is_zero_ino(ino)))
292                                 ino = sbinfo->next_ino++;
293                         if (unlikely(!sbinfo->full_inums &&
294                                      ino > UINT_MAX)) {
295                                 /*
296                                  * Emulate get_next_ino uint wraparound for
297                                  * compatibility
298                                  */
299                                 if (IS_ENABLED(CONFIG_64BIT))
300                                         pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
301                                                 __func__, MINOR(sb->s_dev));
302                                 sbinfo->next_ino = 1;
303                                 ino = sbinfo->next_ino++;
304                         }
305                         *inop = ino;
306                 }
307                 spin_unlock(&sbinfo->stat_lock);
308         } else if (inop) {
309                 /*
310                  * __shmem_file_setup, one of our callers, is lock-free: it
311                  * doesn't hold stat_lock in shmem_reserve_inode since
312                  * max_inodes is always 0, and is called from potentially
313                  * unknown contexts. As such, use a per-cpu batched allocator
314                  * which doesn't require the per-sb stat_lock unless we are at
315                  * the batch boundary.
316                  *
317                  * We don't need to worry about inode{32,64} since SB_KERNMOUNT
318                  * shmem mounts are not exposed to userspace, so we don't need
319                  * to worry about things like glibc compatibility.
320                  */
321                 ino_t *next_ino;
322                 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
323                 ino = *next_ino;
324                 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
325                         spin_lock(&sbinfo->stat_lock);
326                         ino = sbinfo->next_ino;
327                         sbinfo->next_ino += SHMEM_INO_BATCH;
328                         spin_unlock(&sbinfo->stat_lock);
329                         if (unlikely(is_zero_ino(ino)))
330                                 ino++;
331                 }
332                 *inop = ino;
333                 *next_ino = ++ino;
334                 put_cpu();
335         }
336
337         return 0;
338 }
339
340 static void shmem_free_inode(struct super_block *sb)
341 {
342         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
343         if (sbinfo->max_inodes) {
344                 spin_lock(&sbinfo->stat_lock);
345                 sbinfo->free_inodes++;
346                 spin_unlock(&sbinfo->stat_lock);
347         }
348 }
349
350 /**
351  * shmem_recalc_inode - recalculate the block usage of an inode
352  * @inode: inode to recalc
353  *
354  * We have to calculate the free blocks since the mm can drop
355  * undirtied hole pages behind our back.
356  *
357  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
358  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
359  *
360  * It has to be called with the spinlock held.
361  */
362 static void shmem_recalc_inode(struct inode *inode)
363 {
364         struct shmem_inode_info *info = SHMEM_I(inode);
365         long freed;
366
367         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
368         if (freed > 0) {
369                 info->alloced -= freed;
370                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
371                 shmem_inode_unacct_blocks(inode, freed);
372         }
373 }
374
375 bool shmem_charge(struct inode *inode, long pages)
376 {
377         struct shmem_inode_info *info = SHMEM_I(inode);
378         unsigned long flags;
379
380         if (!shmem_inode_acct_block(inode, pages))
381                 return false;
382
383         /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
384         inode->i_mapping->nrpages += pages;
385
386         spin_lock_irqsave(&info->lock, flags);
387         info->alloced += pages;
388         inode->i_blocks += pages * BLOCKS_PER_PAGE;
389         shmem_recalc_inode(inode);
390         spin_unlock_irqrestore(&info->lock, flags);
391
392         return true;
393 }
394
395 void shmem_uncharge(struct inode *inode, long pages)
396 {
397         struct shmem_inode_info *info = SHMEM_I(inode);
398         unsigned long flags;
399
400         /* nrpages adjustment done by __delete_from_page_cache() or caller */
401
402         spin_lock_irqsave(&info->lock, flags);
403         info->alloced -= pages;
404         inode->i_blocks -= pages * BLOCKS_PER_PAGE;
405         shmem_recalc_inode(inode);
406         spin_unlock_irqrestore(&info->lock, flags);
407
408         shmem_inode_unacct_blocks(inode, pages);
409 }
410
411 /*
412  * Replace item expected in xarray by a new item, while holding xa_lock.
413  */
414 static int shmem_replace_entry(struct address_space *mapping,
415                         pgoff_t index, void *expected, void *replacement)
416 {
417         XA_STATE(xas, &mapping->i_pages, index);
418         void *item;
419
420         VM_BUG_ON(!expected);
421         VM_BUG_ON(!replacement);
422         item = xas_load(&xas);
423         if (item != expected)
424                 return -ENOENT;
425         xas_store(&xas, replacement);
426         return 0;
427 }
428
429 /*
430  * Sometimes, before we decide whether to proceed or to fail, we must check
431  * that an entry was not already brought back from swap by a racing thread.
432  *
433  * Checking page is not enough: by the time a SwapCache page is locked, it
434  * might be reused, and again be SwapCache, using the same swap as before.
435  */
436 static bool shmem_confirm_swap(struct address_space *mapping,
437                                pgoff_t index, swp_entry_t swap)
438 {
439         return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
440 }
441
442 /*
443  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
444  *
445  * SHMEM_HUGE_NEVER:
446  *      disables huge pages for the mount;
447  * SHMEM_HUGE_ALWAYS:
448  *      enables huge pages for the mount;
449  * SHMEM_HUGE_WITHIN_SIZE:
450  *      only allocate huge pages if the page will be fully within i_size,
451  *      also respect fadvise()/madvise() hints;
452  * SHMEM_HUGE_ADVISE:
453  *      only allocate huge pages if requested with fadvise()/madvise();
454  */
455
456 #define SHMEM_HUGE_NEVER        0
457 #define SHMEM_HUGE_ALWAYS       1
458 #define SHMEM_HUGE_WITHIN_SIZE  2
459 #define SHMEM_HUGE_ADVISE       3
460
461 /*
462  * Special values.
463  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
464  *
465  * SHMEM_HUGE_DENY:
466  *      disables huge on shm_mnt and all mounts, for emergency use;
467  * SHMEM_HUGE_FORCE:
468  *      enables huge on shm_mnt and all mounts, w/o needing option, for testing;
469  *
470  */
471 #define SHMEM_HUGE_DENY         (-1)
472 #define SHMEM_HUGE_FORCE        (-2)
473
474 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
475 /* ifdef here to avoid bloating shmem.o when not necessary */
476
477 static int shmem_huge __read_mostly;
478
479 #if defined(CONFIG_SYSFS)
480 static int shmem_parse_huge(const char *str)
481 {
482         if (!strcmp(str, "never"))
483                 return SHMEM_HUGE_NEVER;
484         if (!strcmp(str, "always"))
485                 return SHMEM_HUGE_ALWAYS;
486         if (!strcmp(str, "within_size"))
487                 return SHMEM_HUGE_WITHIN_SIZE;
488         if (!strcmp(str, "advise"))
489                 return SHMEM_HUGE_ADVISE;
490         if (!strcmp(str, "deny"))
491                 return SHMEM_HUGE_DENY;
492         if (!strcmp(str, "force"))
493                 return SHMEM_HUGE_FORCE;
494         return -EINVAL;
495 }
496 #endif
497
498 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
499 static const char *shmem_format_huge(int huge)
500 {
501         switch (huge) {
502         case SHMEM_HUGE_NEVER:
503                 return "never";
504         case SHMEM_HUGE_ALWAYS:
505                 return "always";
506         case SHMEM_HUGE_WITHIN_SIZE:
507                 return "within_size";
508         case SHMEM_HUGE_ADVISE:
509                 return "advise";
510         case SHMEM_HUGE_DENY:
511                 return "deny";
512         case SHMEM_HUGE_FORCE:
513                 return "force";
514         default:
515                 VM_BUG_ON(1);
516                 return "bad_val";
517         }
518 }
519 #endif
520
521 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
522                 struct shrink_control *sc, unsigned long nr_to_split)
523 {
524         LIST_HEAD(list), *pos, *next;
525         LIST_HEAD(to_remove);
526         struct inode *inode;
527         struct shmem_inode_info *info;
528         struct page *page;
529         unsigned long batch = sc ? sc->nr_to_scan : 128;
530         int removed = 0, split = 0;
531
532         if (list_empty(&sbinfo->shrinklist))
533                 return SHRINK_STOP;
534
535         spin_lock(&sbinfo->shrinklist_lock);
536         list_for_each_safe(pos, next, &sbinfo->shrinklist) {
537                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
538
539                 /* pin the inode */
540                 inode = igrab(&info->vfs_inode);
541
542                 /* inode is about to be evicted */
543                 if (!inode) {
544                         list_del_init(&info->shrinklist);
545                         removed++;
546                         goto next;
547                 }
548
549                 /* Check if there's anything to gain */
550                 if (round_up(inode->i_size, PAGE_SIZE) ==
551                                 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
552                         list_move(&info->shrinklist, &to_remove);
553                         removed++;
554                         goto next;
555                 }
556
557                 list_move(&info->shrinklist, &list);
558 next:
559                 if (!--batch)
560                         break;
561         }
562         spin_unlock(&sbinfo->shrinklist_lock);
563
564         list_for_each_safe(pos, next, &to_remove) {
565                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
566                 inode = &info->vfs_inode;
567                 list_del_init(&info->shrinklist);
568                 iput(inode);
569         }
570
571         list_for_each_safe(pos, next, &list) {
572                 int ret;
573
574                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
575                 inode = &info->vfs_inode;
576
577                 if (nr_to_split && split >= nr_to_split)
578                         goto leave;
579
580                 page = find_get_page(inode->i_mapping,
581                                 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
582                 if (!page)
583                         goto drop;
584
585                 /* No huge page at the end of the file: nothing to split */
586                 if (!PageTransHuge(page)) {
587                         put_page(page);
588                         goto drop;
589                 }
590
591                 /*
592                  * Leave the inode on the list if we failed to lock
593                  * the page at this time.
594                  *
595                  * Waiting for the lock may lead to deadlock in the
596                  * reclaim path.
597                  */
598                 if (!trylock_page(page)) {
599                         put_page(page);
600                         goto leave;
601                 }
602
603                 ret = split_huge_page(page);
604                 unlock_page(page);
605                 put_page(page);
606
607                 /* If split failed leave the inode on the list */
608                 if (ret)
609                         goto leave;
610
611                 split++;
612 drop:
613                 list_del_init(&info->shrinklist);
614                 removed++;
615 leave:
616                 iput(inode);
617         }
618
619         spin_lock(&sbinfo->shrinklist_lock);
620         list_splice_tail(&list, &sbinfo->shrinklist);
621         sbinfo->shrinklist_len -= removed;
622         spin_unlock(&sbinfo->shrinklist_lock);
623
624         return split;
625 }
626
627 static long shmem_unused_huge_scan(struct super_block *sb,
628                 struct shrink_control *sc)
629 {
630         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
631
632         if (!READ_ONCE(sbinfo->shrinklist_len))
633                 return SHRINK_STOP;
634
635         return shmem_unused_huge_shrink(sbinfo, sc, 0);
636 }
637
638 static long shmem_unused_huge_count(struct super_block *sb,
639                 struct shrink_control *sc)
640 {
641         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
642         return READ_ONCE(sbinfo->shrinklist_len);
643 }
644 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
645
646 #define shmem_huge SHMEM_HUGE_DENY
647
648 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
649                 struct shrink_control *sc, unsigned long nr_to_split)
650 {
651         return 0;
652 }
653 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
654
655 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
656 {
657         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
658             (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
659             shmem_huge != SHMEM_HUGE_DENY)
660                 return true;
661         return false;
662 }
663
664 /*
665  * Like add_to_page_cache_locked, but error if expected item has gone.
666  */
667 static int shmem_add_to_page_cache(struct page *page,
668                                    struct address_space *mapping,
669                                    pgoff_t index, void *expected, gfp_t gfp,
670                                    struct mm_struct *charge_mm)
671 {
672         XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
673         unsigned long i = 0;
674         unsigned long nr = compound_nr(page);
675         int error;
676
677         VM_BUG_ON_PAGE(PageTail(page), page);
678         VM_BUG_ON_PAGE(index != round_down(index, nr), page);
679         VM_BUG_ON_PAGE(!PageLocked(page), page);
680         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
681         VM_BUG_ON(expected && PageTransHuge(page));
682
683         page_ref_add(page, nr);
684         page->mapping = mapping;
685         page->index = index;
686
687         if (!PageSwapCache(page)) {
688                 error = mem_cgroup_charge(page, charge_mm, gfp);
689                 if (error) {
690                         if (PageTransHuge(page)) {
691                                 count_vm_event(THP_FILE_FALLBACK);
692                                 count_vm_event(THP_FILE_FALLBACK_CHARGE);
693                         }
694                         goto error;
695                 }
696         }
697         cgroup_throttle_swaprate(page, gfp);
698
699         do {
700                 void *entry;
701                 xas_lock_irq(&xas);
702                 entry = xas_find_conflict(&xas);
703                 if (entry != expected)
704                         xas_set_err(&xas, -EEXIST);
705                 xas_create_range(&xas);
706                 if (xas_error(&xas))
707                         goto unlock;
708 next:
709                 xas_store(&xas, page);
710                 if (++i < nr) {
711                         xas_next(&xas);
712                         goto next;
713                 }
714                 if (PageTransHuge(page)) {
715                         count_vm_event(THP_FILE_ALLOC);
716                         __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
717                 }
718                 mapping->nrpages += nr;
719                 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
720                 __mod_lruvec_page_state(page, NR_SHMEM, nr);
721 unlock:
722                 xas_unlock_irq(&xas);
723         } while (xas_nomem(&xas, gfp));
724
725         if (xas_error(&xas)) {
726                 error = xas_error(&xas);
727                 goto error;
728         }
729
730         return 0;
731 error:
732         page->mapping = NULL;
733         page_ref_sub(page, nr);
734         return error;
735 }
736
737 /*
738  * Like delete_from_page_cache, but substitutes swap for page.
739  */
740 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
741 {
742         struct address_space *mapping = page->mapping;
743         int error;
744
745         VM_BUG_ON_PAGE(PageCompound(page), page);
746
747         xa_lock_irq(&mapping->i_pages);
748         error = shmem_replace_entry(mapping, page->index, page, radswap);
749         page->mapping = NULL;
750         mapping->nrpages--;
751         __dec_lruvec_page_state(page, NR_FILE_PAGES);
752         __dec_lruvec_page_state(page, NR_SHMEM);
753         xa_unlock_irq(&mapping->i_pages);
754         put_page(page);
755         BUG_ON(error);
756 }
757
758 /*
759  * Remove swap entry from page cache, free the swap and its page cache.
760  */
761 static int shmem_free_swap(struct address_space *mapping,
762                            pgoff_t index, void *radswap)
763 {
764         void *old;
765
766         old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
767         if (old != radswap)
768                 return -ENOENT;
769         free_swap_and_cache(radix_to_swp_entry(radswap));
770         return 0;
771 }
772
773 /*
774  * Determine (in bytes) how many of the shmem object's pages mapped by the
775  * given offsets are swapped out.
776  *
777  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
778  * as long as the inode doesn't go away and racy results are not a problem.
779  */
780 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
781                                                 pgoff_t start, pgoff_t end)
782 {
783         XA_STATE(xas, &mapping->i_pages, start);
784         struct page *page;
785         unsigned long swapped = 0;
786
787         rcu_read_lock();
788         xas_for_each(&xas, page, end - 1) {
789                 if (xas_retry(&xas, page))
790                         continue;
791                 if (xa_is_value(page))
792                         swapped++;
793
794                 if (need_resched()) {
795                         xas_pause(&xas);
796                         cond_resched_rcu();
797                 }
798         }
799
800         rcu_read_unlock();
801
802         return swapped << PAGE_SHIFT;
803 }
804
805 /*
806  * Determine (in bytes) how many of the shmem object's pages mapped by the
807  * given vma is swapped out.
808  *
809  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
810  * as long as the inode doesn't go away and racy results are not a problem.
811  */
812 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
813 {
814         struct inode *inode = file_inode(vma->vm_file);
815         struct shmem_inode_info *info = SHMEM_I(inode);
816         struct address_space *mapping = inode->i_mapping;
817         unsigned long swapped;
818
819         /* Be careful as we don't hold info->lock */
820         swapped = READ_ONCE(info->swapped);
821
822         /*
823          * The easier cases are when the shmem object has nothing in swap, or
824          * the vma maps it whole. Then we can simply use the stats that we
825          * already track.
826          */
827         if (!swapped)
828                 return 0;
829
830         if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
831                 return swapped << PAGE_SHIFT;
832
833         /* Here comes the more involved part */
834         return shmem_partial_swap_usage(mapping,
835                         linear_page_index(vma, vma->vm_start),
836                         linear_page_index(vma, vma->vm_end));
837 }
838
839 /*
840  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
841  */
842 void shmem_unlock_mapping(struct address_space *mapping)
843 {
844         struct pagevec pvec;
845         pgoff_t index = 0;
846
847         pagevec_init(&pvec);
848         /*
849          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
850          */
851         while (!mapping_unevictable(mapping)) {
852                 if (!pagevec_lookup(&pvec, mapping, &index))
853                         break;
854                 check_move_unevictable_pages(&pvec);
855                 pagevec_release(&pvec);
856                 cond_resched();
857         }
858 }
859
860 /*
861  * Check whether a hole-punch or truncation needs to split a huge page,
862  * returning true if no split was required, or the split has been successful.
863  *
864  * Eviction (or truncation to 0 size) should never need to split a huge page;
865  * but in rare cases might do so, if shmem_undo_range() failed to trylock on
866  * head, and then succeeded to trylock on tail.
867  *
868  * A split can only succeed when there are no additional references on the
869  * huge page: so the split below relies upon find_get_entries() having stopped
870  * when it found a subpage of the huge page, without getting further references.
871  */
872 static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
873 {
874         if (!PageTransCompound(page))
875                 return true;
876
877         /* Just proceed to delete a huge page wholly within the range punched */
878         if (PageHead(page) &&
879             page->index >= start && page->index + HPAGE_PMD_NR <= end)
880                 return true;
881
882         /* Try to split huge page, so we can truly punch the hole or truncate */
883         return split_huge_page(page) >= 0;
884 }
885
886 /*
887  * Remove range of pages and swap entries from page cache, and free them.
888  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
889  */
890 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
891                                                                  bool unfalloc)
892 {
893         struct address_space *mapping = inode->i_mapping;
894         struct shmem_inode_info *info = SHMEM_I(inode);
895         pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
896         pgoff_t end = (lend + 1) >> PAGE_SHIFT;
897         unsigned int partial_start = lstart & (PAGE_SIZE - 1);
898         unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
899         struct pagevec pvec;
900         pgoff_t indices[PAGEVEC_SIZE];
901         long nr_swaps_freed = 0;
902         pgoff_t index;
903         int i;
904
905         if (lend == -1)
906                 end = -1;       /* unsigned, so actually very big */
907
908         pagevec_init(&pvec);
909         index = start;
910         while (index < end && find_lock_entries(mapping, index, end - 1,
911                         &pvec, indices)) {
912                 for (i = 0; i < pagevec_count(&pvec); i++) {
913                         struct page *page = pvec.pages[i];
914
915                         index = indices[i];
916
917                         if (xa_is_value(page)) {
918                                 if (unfalloc)
919                                         continue;
920                                 nr_swaps_freed += !shmem_free_swap(mapping,
921                                                                 index, page);
922                                 continue;
923                         }
924                         index += thp_nr_pages(page) - 1;
925
926                         if (!unfalloc || !PageUptodate(page))
927                                 truncate_inode_page(mapping, page);
928                         unlock_page(page);
929                 }
930                 pagevec_remove_exceptionals(&pvec);
931                 pagevec_release(&pvec);
932                 cond_resched();
933                 index++;
934         }
935
936         if (partial_start) {
937                 struct page *page = NULL;
938                 shmem_getpage(inode, start - 1, &page, SGP_READ);
939                 if (page) {
940                         unsigned int top = PAGE_SIZE;
941                         if (start > end) {
942                                 top = partial_end;
943                                 partial_end = 0;
944                         }
945                         zero_user_segment(page, partial_start, top);
946                         set_page_dirty(page);
947                         unlock_page(page);
948                         put_page(page);
949                 }
950         }
951         if (partial_end) {
952                 struct page *page = NULL;
953                 shmem_getpage(inode, end, &page, SGP_READ);
954                 if (page) {
955                         zero_user_segment(page, 0, partial_end);
956                         set_page_dirty(page);
957                         unlock_page(page);
958                         put_page(page);
959                 }
960         }
961         if (start >= end)
962                 return;
963
964         index = start;
965         while (index < end) {
966                 cond_resched();
967
968                 if (!find_get_entries(mapping, index, end - 1, &pvec,
969                                 indices)) {
970                         /* If all gone or hole-punch or unfalloc, we're done */
971                         if (index == start || end != -1)
972                                 break;
973                         /* But if truncating, restart to make sure all gone */
974                         index = start;
975                         continue;
976                 }
977                 for (i = 0; i < pagevec_count(&pvec); i++) {
978                         struct page *page = pvec.pages[i];
979
980                         index = indices[i];
981                         if (xa_is_value(page)) {
982                                 if (unfalloc)
983                                         continue;
984                                 if (shmem_free_swap(mapping, index, page)) {
985                                         /* Swap was replaced by page: retry */
986                                         index--;
987                                         break;
988                                 }
989                                 nr_swaps_freed++;
990                                 continue;
991                         }
992
993                         lock_page(page);
994
995                         if (!unfalloc || !PageUptodate(page)) {
996                                 if (page_mapping(page) != mapping) {
997                                         /* Page was replaced by swap: retry */
998                                         unlock_page(page);
999                                         index--;
1000                                         break;
1001                                 }
1002                                 VM_BUG_ON_PAGE(PageWriteback(page), page);
1003                                 if (shmem_punch_compound(page, start, end))
1004                                         truncate_inode_page(mapping, page);
1005                                 else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1006                                         /* Wipe the page and don't get stuck */
1007                                         clear_highpage(page);
1008                                         flush_dcache_page(page);
1009                                         set_page_dirty(page);
1010                                         if (index <
1011                                             round_up(start, HPAGE_PMD_NR))
1012                                                 start = index + 1;
1013                                 }
1014                         }
1015                         unlock_page(page);
1016                 }
1017                 pagevec_remove_exceptionals(&pvec);
1018                 pagevec_release(&pvec);
1019                 index++;
1020         }
1021
1022         spin_lock_irq(&info->lock);
1023         info->swapped -= nr_swaps_freed;
1024         shmem_recalc_inode(inode);
1025         spin_unlock_irq(&info->lock);
1026 }
1027
1028 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1029 {
1030         shmem_undo_range(inode, lstart, lend, false);
1031         inode->i_ctime = inode->i_mtime = current_time(inode);
1032 }
1033 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1034
1035 static int shmem_getattr(struct user_namespace *mnt_userns,
1036                          const struct path *path, struct kstat *stat,
1037                          u32 request_mask, unsigned int query_flags)
1038 {
1039         struct inode *inode = path->dentry->d_inode;
1040         struct shmem_inode_info *info = SHMEM_I(inode);
1041         struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1042
1043         if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1044                 spin_lock_irq(&info->lock);
1045                 shmem_recalc_inode(inode);
1046                 spin_unlock_irq(&info->lock);
1047         }
1048         generic_fillattr(&init_user_ns, inode, stat);
1049
1050         if (is_huge_enabled(sb_info))
1051                 stat->blksize = HPAGE_PMD_SIZE;
1052
1053         return 0;
1054 }
1055
1056 static int shmem_setattr(struct user_namespace *mnt_userns,
1057                          struct dentry *dentry, struct iattr *attr)
1058 {
1059         struct inode *inode = d_inode(dentry);
1060         struct shmem_inode_info *info = SHMEM_I(inode);
1061         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1062         int error;
1063
1064         error = setattr_prepare(&init_user_ns, dentry, attr);
1065         if (error)
1066                 return error;
1067
1068         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1069                 loff_t oldsize = inode->i_size;
1070                 loff_t newsize = attr->ia_size;
1071
1072                 /* protected by i_mutex */
1073                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1074                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1075                         return -EPERM;
1076
1077                 if (newsize != oldsize) {
1078                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
1079                                         oldsize, newsize);
1080                         if (error)
1081                                 return error;
1082                         i_size_write(inode, newsize);
1083                         inode->i_ctime = inode->i_mtime = current_time(inode);
1084                 }
1085                 if (newsize <= oldsize) {
1086                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
1087                         if (oldsize > holebegin)
1088                                 unmap_mapping_range(inode->i_mapping,
1089                                                         holebegin, 0, 1);
1090                         if (info->alloced)
1091                                 shmem_truncate_range(inode,
1092                                                         newsize, (loff_t)-1);
1093                         /* unmap again to remove racily COWed private pages */
1094                         if (oldsize > holebegin)
1095                                 unmap_mapping_range(inode->i_mapping,
1096                                                         holebegin, 0, 1);
1097
1098                         /*
1099                          * Part of the huge page can be beyond i_size: subject
1100                          * to shrink under memory pressure.
1101                          */
1102                         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1103                                 spin_lock(&sbinfo->shrinklist_lock);
1104                                 /*
1105                                  * _careful to defend against unlocked access to
1106                                  * ->shrink_list in shmem_unused_huge_shrink()
1107                                  */
1108                                 if (list_empty_careful(&info->shrinklist)) {
1109                                         list_add_tail(&info->shrinklist,
1110                                                         &sbinfo->shrinklist);
1111                                         sbinfo->shrinklist_len++;
1112                                 }
1113                                 spin_unlock(&sbinfo->shrinklist_lock);
1114                         }
1115                 }
1116         }
1117
1118         setattr_copy(&init_user_ns, inode, attr);
1119         if (attr->ia_valid & ATTR_MODE)
1120                 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
1121         return error;
1122 }
1123
1124 static void shmem_evict_inode(struct inode *inode)
1125 {
1126         struct shmem_inode_info *info = SHMEM_I(inode);
1127         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1128
1129         if (shmem_mapping(inode->i_mapping)) {
1130                 shmem_unacct_size(info->flags, inode->i_size);
1131                 inode->i_size = 0;
1132                 shmem_truncate_range(inode, 0, (loff_t)-1);
1133                 if (!list_empty(&info->shrinklist)) {
1134                         spin_lock(&sbinfo->shrinklist_lock);
1135                         if (!list_empty(&info->shrinklist)) {
1136                                 list_del_init(&info->shrinklist);
1137                                 sbinfo->shrinklist_len--;
1138                         }
1139                         spin_unlock(&sbinfo->shrinklist_lock);
1140                 }
1141                 while (!list_empty(&info->swaplist)) {
1142                         /* Wait while shmem_unuse() is scanning this inode... */
1143                         wait_var_event(&info->stop_eviction,
1144                                        !atomic_read(&info->stop_eviction));
1145                         mutex_lock(&shmem_swaplist_mutex);
1146                         /* ...but beware of the race if we peeked too early */
1147                         if (!atomic_read(&info->stop_eviction))
1148                                 list_del_init(&info->swaplist);
1149                         mutex_unlock(&shmem_swaplist_mutex);
1150                 }
1151         }
1152
1153         simple_xattrs_free(&info->xattrs);
1154         WARN_ON(inode->i_blocks);
1155         shmem_free_inode(inode->i_sb);
1156         clear_inode(inode);
1157 }
1158
1159 extern struct swap_info_struct *swap_info[];
1160
1161 static int shmem_find_swap_entries(struct address_space *mapping,
1162                                    pgoff_t start, unsigned int nr_entries,
1163                                    struct page **entries, pgoff_t *indices,
1164                                    unsigned int type, bool frontswap)
1165 {
1166         XA_STATE(xas, &mapping->i_pages, start);
1167         struct page *page;
1168         swp_entry_t entry;
1169         unsigned int ret = 0;
1170
1171         if (!nr_entries)
1172                 return 0;
1173
1174         rcu_read_lock();
1175         xas_for_each(&xas, page, ULONG_MAX) {
1176                 if (xas_retry(&xas, page))
1177                         continue;
1178
1179                 if (!xa_is_value(page))
1180                         continue;
1181
1182                 entry = radix_to_swp_entry(page);
1183                 if (swp_type(entry) != type)
1184                         continue;
1185                 if (frontswap &&
1186                     !frontswap_test(swap_info[type], swp_offset(entry)))
1187                         continue;
1188
1189                 indices[ret] = xas.xa_index;
1190                 entries[ret] = page;
1191
1192                 if (need_resched()) {
1193                         xas_pause(&xas);
1194                         cond_resched_rcu();
1195                 }
1196                 if (++ret == nr_entries)
1197                         break;
1198         }
1199         rcu_read_unlock();
1200
1201         return ret;
1202 }
1203
1204 /*
1205  * Move the swapped pages for an inode to page cache. Returns the count
1206  * of pages swapped in, or the error in case of failure.
1207  */
1208 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1209                                     pgoff_t *indices)
1210 {
1211         int i = 0;
1212         int ret = 0;
1213         int error = 0;
1214         struct address_space *mapping = inode->i_mapping;
1215
1216         for (i = 0; i < pvec.nr; i++) {
1217                 struct page *page = pvec.pages[i];
1218
1219                 if (!xa_is_value(page))
1220                         continue;
1221                 error = shmem_swapin_page(inode, indices[i],
1222                                           &page, SGP_CACHE,
1223                                           mapping_gfp_mask(mapping),
1224                                           NULL, NULL);
1225                 if (error == 0) {
1226                         unlock_page(page);
1227                         put_page(page);
1228                         ret++;
1229                 }
1230                 if (error == -ENOMEM)
1231                         break;
1232                 error = 0;
1233         }
1234         return error ? error : ret;
1235 }
1236
1237 /*
1238  * If swap found in inode, free it and move page from swapcache to filecache.
1239  */
1240 static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1241                              bool frontswap, unsigned long *fs_pages_to_unuse)
1242 {
1243         struct address_space *mapping = inode->i_mapping;
1244         pgoff_t start = 0;
1245         struct pagevec pvec;
1246         pgoff_t indices[PAGEVEC_SIZE];
1247         bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1248         int ret = 0;
1249
1250         pagevec_init(&pvec);
1251         do {
1252                 unsigned int nr_entries = PAGEVEC_SIZE;
1253
1254                 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1255                         nr_entries = *fs_pages_to_unuse;
1256
1257                 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1258                                                   pvec.pages, indices,
1259                                                   type, frontswap);
1260                 if (pvec.nr == 0) {
1261                         ret = 0;
1262                         break;
1263                 }
1264
1265                 ret = shmem_unuse_swap_entries(inode, pvec, indices);
1266                 if (ret < 0)
1267                         break;
1268
1269                 if (frontswap_partial) {
1270                         *fs_pages_to_unuse -= ret;
1271                         if (*fs_pages_to_unuse == 0) {
1272                                 ret = FRONTSWAP_PAGES_UNUSED;
1273                                 break;
1274                         }
1275                 }
1276
1277                 start = indices[pvec.nr - 1];
1278         } while (true);
1279
1280         return ret;
1281 }
1282
1283 /*
1284  * Read all the shared memory data that resides in the swap
1285  * device 'type' back into memory, so the swap device can be
1286  * unused.
1287  */
1288 int shmem_unuse(unsigned int type, bool frontswap,
1289                 unsigned long *fs_pages_to_unuse)
1290 {
1291         struct shmem_inode_info *info, *next;
1292         int error = 0;
1293
1294         if (list_empty(&shmem_swaplist))
1295                 return 0;
1296
1297         mutex_lock(&shmem_swaplist_mutex);
1298         list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1299                 if (!info->swapped) {
1300                         list_del_init(&info->swaplist);
1301                         continue;
1302                 }
1303                 /*
1304                  * Drop the swaplist mutex while searching the inode for swap;
1305                  * but before doing so, make sure shmem_evict_inode() will not
1306                  * remove placeholder inode from swaplist, nor let it be freed
1307                  * (igrab() would protect from unlink, but not from unmount).
1308                  */
1309                 atomic_inc(&info->stop_eviction);
1310                 mutex_unlock(&shmem_swaplist_mutex);
1311
1312                 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1313                                           fs_pages_to_unuse);
1314                 cond_resched();
1315
1316                 mutex_lock(&shmem_swaplist_mutex);
1317                 next = list_next_entry(info, swaplist);
1318                 if (!info->swapped)
1319                         list_del_init(&info->swaplist);
1320                 if (atomic_dec_and_test(&info->stop_eviction))
1321                         wake_up_var(&info->stop_eviction);
1322                 if (error)
1323                         break;
1324         }
1325         mutex_unlock(&shmem_swaplist_mutex);
1326
1327         return error;
1328 }
1329
1330 /*
1331  * Move the page from the page cache to the swap cache.
1332  */
1333 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1334 {
1335         struct shmem_inode_info *info;
1336         struct address_space *mapping;
1337         struct inode *inode;
1338         swp_entry_t swap;
1339         pgoff_t index;
1340
1341         VM_BUG_ON_PAGE(PageCompound(page), page);
1342         BUG_ON(!PageLocked(page));
1343         mapping = page->mapping;
1344         index = page->index;
1345         inode = mapping->host;
1346         info = SHMEM_I(inode);
1347         if (info->flags & VM_LOCKED)
1348                 goto redirty;
1349         if (!total_swap_pages)
1350                 goto redirty;
1351
1352         /*
1353          * Our capabilities prevent regular writeback or sync from ever calling
1354          * shmem_writepage; but a stacking filesystem might use ->writepage of
1355          * its underlying filesystem, in which case tmpfs should write out to
1356          * swap only in response to memory pressure, and not for the writeback
1357          * threads or sync.
1358          */
1359         if (!wbc->for_reclaim) {
1360                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
1361                 goto redirty;
1362         }
1363
1364         /*
1365          * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1366          * value into swapfile.c, the only way we can correctly account for a
1367          * fallocated page arriving here is now to initialize it and write it.
1368          *
1369          * That's okay for a page already fallocated earlier, but if we have
1370          * not yet completed the fallocation, then (a) we want to keep track
1371          * of this page in case we have to undo it, and (b) it may not be a
1372          * good idea to continue anyway, once we're pushing into swap.  So
1373          * reactivate the page, and let shmem_fallocate() quit when too many.
1374          */
1375         if (!PageUptodate(page)) {
1376                 if (inode->i_private) {
1377                         struct shmem_falloc *shmem_falloc;
1378                         spin_lock(&inode->i_lock);
1379                         shmem_falloc = inode->i_private;
1380                         if (shmem_falloc &&
1381                             !shmem_falloc->waitq &&
1382                             index >= shmem_falloc->start &&
1383                             index < shmem_falloc->next)
1384                                 shmem_falloc->nr_unswapped++;
1385                         else
1386                                 shmem_falloc = NULL;
1387                         spin_unlock(&inode->i_lock);
1388                         if (shmem_falloc)
1389                                 goto redirty;
1390                 }
1391                 clear_highpage(page);
1392                 flush_dcache_page(page);
1393                 SetPageUptodate(page);
1394         }
1395
1396         swap = get_swap_page(page);
1397         if (!swap.val)
1398                 goto redirty;
1399
1400         /*
1401          * Add inode to shmem_unuse()'s list of swapped-out inodes,
1402          * if it's not already there.  Do it now before the page is
1403          * moved to swap cache, when its pagelock no longer protects
1404          * the inode from eviction.  But don't unlock the mutex until
1405          * we've incremented swapped, because shmem_unuse_inode() will
1406          * prune a !swapped inode from the swaplist under this mutex.
1407          */
1408         mutex_lock(&shmem_swaplist_mutex);
1409         if (list_empty(&info->swaplist))
1410                 list_add(&info->swaplist, &shmem_swaplist);
1411
1412         if (add_to_swap_cache(page, swap,
1413                         __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1414                         NULL) == 0) {
1415                 spin_lock_irq(&info->lock);
1416                 shmem_recalc_inode(inode);
1417                 info->swapped++;
1418                 spin_unlock_irq(&info->lock);
1419
1420                 swap_shmem_alloc(swap);
1421                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1422
1423                 mutex_unlock(&shmem_swaplist_mutex);
1424                 BUG_ON(page_mapped(page));
1425                 swap_writepage(page, wbc);
1426                 return 0;
1427         }
1428
1429         mutex_unlock(&shmem_swaplist_mutex);
1430         put_swap_page(page, swap);
1431 redirty:
1432         set_page_dirty(page);
1433         if (wbc->for_reclaim)
1434                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1435         unlock_page(page);
1436         return 0;
1437 }
1438
1439 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1440 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1441 {
1442         char buffer[64];
1443
1444         if (!mpol || mpol->mode == MPOL_DEFAULT)
1445                 return;         /* show nothing */
1446
1447         mpol_to_str(buffer, sizeof(buffer), mpol);
1448
1449         seq_printf(seq, ",mpol=%s", buffer);
1450 }
1451
1452 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1453 {
1454         struct mempolicy *mpol = NULL;
1455         if (sbinfo->mpol) {
1456                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1457                 mpol = sbinfo->mpol;
1458                 mpol_get(mpol);
1459                 spin_unlock(&sbinfo->stat_lock);
1460         }
1461         return mpol;
1462 }
1463 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1464 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1465 {
1466 }
1467 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1468 {
1469         return NULL;
1470 }
1471 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1472 #ifndef CONFIG_NUMA
1473 #define vm_policy vm_private_data
1474 #endif
1475
1476 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1477                 struct shmem_inode_info *info, pgoff_t index)
1478 {
1479         /* Create a pseudo vma that just contains the policy */
1480         vma_init(vma, NULL);
1481         /* Bias interleave by inode number to distribute better across nodes */
1482         vma->vm_pgoff = index + info->vfs_inode.i_ino;
1483         vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1484 }
1485
1486 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1487 {
1488         /* Drop reference taken by mpol_shared_policy_lookup() */
1489         mpol_cond_put(vma->vm_policy);
1490 }
1491
1492 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1493                         struct shmem_inode_info *info, pgoff_t index)
1494 {
1495         struct vm_area_struct pvma;
1496         struct page *page;
1497         struct vm_fault vmf = {
1498                 .vma = &pvma,
1499         };
1500
1501         shmem_pseudo_vma_init(&pvma, info, index);
1502         page = swap_cluster_readahead(swap, gfp, &vmf);
1503         shmem_pseudo_vma_destroy(&pvma);
1504
1505         return page;
1506 }
1507
1508 /*
1509  * Make sure huge_gfp is always more limited than limit_gfp.
1510  * Some of the flags set permissions, while others set limitations.
1511  */
1512 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1513 {
1514         gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1515         gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1516         gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1517         gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1518
1519         /* Allow allocations only from the originally specified zones. */
1520         result |= zoneflags;
1521
1522         /*
1523          * Minimize the result gfp by taking the union with the deny flags,
1524          * and the intersection of the allow flags.
1525          */
1526         result |= (limit_gfp & denyflags);
1527         result |= (huge_gfp & limit_gfp) & allowflags;
1528
1529         return result;
1530 }
1531
1532 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1533                 struct shmem_inode_info *info, pgoff_t index)
1534 {
1535         struct vm_area_struct pvma;
1536         struct address_space *mapping = info->vfs_inode.i_mapping;
1537         pgoff_t hindex;
1538         struct page *page;
1539
1540         hindex = round_down(index, HPAGE_PMD_NR);
1541         if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1542                                                                 XA_PRESENT))
1543                 return NULL;
1544
1545         shmem_pseudo_vma_init(&pvma, info, hindex);
1546         page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(),
1547                                true);
1548         shmem_pseudo_vma_destroy(&pvma);
1549         if (page)
1550                 prep_transhuge_page(page);
1551         else
1552                 count_vm_event(THP_FILE_FALLBACK);
1553         return page;
1554 }
1555
1556 static struct page *shmem_alloc_page(gfp_t gfp,
1557                         struct shmem_inode_info *info, pgoff_t index)
1558 {
1559         struct vm_area_struct pvma;
1560         struct page *page;
1561
1562         shmem_pseudo_vma_init(&pvma, info, index);
1563         page = alloc_page_vma(gfp, &pvma, 0);
1564         shmem_pseudo_vma_destroy(&pvma);
1565
1566         return page;
1567 }
1568
1569 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1570                 struct inode *inode,
1571                 pgoff_t index, bool huge)
1572 {
1573         struct shmem_inode_info *info = SHMEM_I(inode);
1574         struct page *page;
1575         int nr;
1576         int err = -ENOSPC;
1577
1578         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1579                 huge = false;
1580         nr = huge ? HPAGE_PMD_NR : 1;
1581
1582         if (!shmem_inode_acct_block(inode, nr))
1583                 goto failed;
1584
1585         if (huge)
1586                 page = shmem_alloc_hugepage(gfp, info, index);
1587         else
1588                 page = shmem_alloc_page(gfp, info, index);
1589         if (page) {
1590                 __SetPageLocked(page);
1591                 __SetPageSwapBacked(page);
1592                 return page;
1593         }
1594
1595         err = -ENOMEM;
1596         shmem_inode_unacct_blocks(inode, nr);
1597 failed:
1598         return ERR_PTR(err);
1599 }
1600
1601 /*
1602  * When a page is moved from swapcache to shmem filecache (either by the
1603  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1604  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1605  * ignorance of the mapping it belongs to.  If that mapping has special
1606  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1607  * we may need to copy to a suitable page before moving to filecache.
1608  *
1609  * In a future release, this may well be extended to respect cpuset and
1610  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1611  * but for now it is a simple matter of zone.
1612  */
1613 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1614 {
1615         return page_zonenum(page) > gfp_zone(gfp);
1616 }
1617
1618 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1619                                 struct shmem_inode_info *info, pgoff_t index)
1620 {
1621         struct page *oldpage, *newpage;
1622         struct address_space *swap_mapping;
1623         swp_entry_t entry;
1624         pgoff_t swap_index;
1625         int error;
1626
1627         oldpage = *pagep;
1628         entry.val = page_private(oldpage);
1629         swap_index = swp_offset(entry);
1630         swap_mapping = page_mapping(oldpage);
1631
1632         /*
1633          * We have arrived here because our zones are constrained, so don't
1634          * limit chance of success by further cpuset and node constraints.
1635          */
1636         gfp &= ~GFP_CONSTRAINT_MASK;
1637         newpage = shmem_alloc_page(gfp, info, index);
1638         if (!newpage)
1639                 return -ENOMEM;
1640
1641         get_page(newpage);
1642         copy_highpage(newpage, oldpage);
1643         flush_dcache_page(newpage);
1644
1645         __SetPageLocked(newpage);
1646         __SetPageSwapBacked(newpage);
1647         SetPageUptodate(newpage);
1648         set_page_private(newpage, entry.val);
1649         SetPageSwapCache(newpage);
1650
1651         /*
1652          * Our caller will very soon move newpage out of swapcache, but it's
1653          * a nice clean interface for us to replace oldpage by newpage there.
1654          */
1655         xa_lock_irq(&swap_mapping->i_pages);
1656         error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1657         if (!error) {
1658                 mem_cgroup_migrate(oldpage, newpage);
1659                 __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1660                 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1661         }
1662         xa_unlock_irq(&swap_mapping->i_pages);
1663
1664         if (unlikely(error)) {
1665                 /*
1666                  * Is this possible?  I think not, now that our callers check
1667                  * both PageSwapCache and page_private after getting page lock;
1668                  * but be defensive.  Reverse old to newpage for clear and free.
1669                  */
1670                 oldpage = newpage;
1671         } else {
1672                 lru_cache_add(newpage);
1673                 *pagep = newpage;
1674         }
1675
1676         ClearPageSwapCache(oldpage);
1677         set_page_private(oldpage, 0);
1678
1679         unlock_page(oldpage);
1680         put_page(oldpage);
1681         put_page(oldpage);
1682         return error;
1683 }
1684
1685 /*
1686  * Swap in the page pointed to by *pagep.
1687  * Caller has to make sure that *pagep contains a valid swapped page.
1688  * Returns 0 and the page in pagep if success. On failure, returns the
1689  * error code and NULL in *pagep.
1690  */
1691 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1692                              struct page **pagep, enum sgp_type sgp,
1693                              gfp_t gfp, struct vm_area_struct *vma,
1694                              vm_fault_t *fault_type)
1695 {
1696         struct address_space *mapping = inode->i_mapping;
1697         struct shmem_inode_info *info = SHMEM_I(inode);
1698         struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1699         struct swap_info_struct *si;
1700         struct page *page = NULL;
1701         swp_entry_t swap;
1702         int error;
1703
1704         VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1705         swap = radix_to_swp_entry(*pagep);
1706         *pagep = NULL;
1707
1708         /* Prevent swapoff from happening to us. */
1709         si = get_swap_device(swap);
1710         if (!si) {
1711                 error = EINVAL;
1712                 goto failed;
1713         }
1714         /* Look it up and read it in.. */
1715         page = lookup_swap_cache(swap, NULL, 0);
1716         if (!page) {
1717                 /* Or update major stats only when swapin succeeds?? */
1718                 if (fault_type) {
1719                         *fault_type |= VM_FAULT_MAJOR;
1720                         count_vm_event(PGMAJFAULT);
1721                         count_memcg_event_mm(charge_mm, PGMAJFAULT);
1722                 }
1723                 /* Here we actually start the io */
1724                 page = shmem_swapin(swap, gfp, info, index);
1725                 if (!page) {
1726                         error = -ENOMEM;
1727                         goto failed;
1728                 }
1729         }
1730
1731         /* We have to do this with page locked to prevent races */
1732         lock_page(page);
1733         if (!PageSwapCache(page) || page_private(page) != swap.val ||
1734             !shmem_confirm_swap(mapping, index, swap)) {
1735                 error = -EEXIST;
1736                 goto unlock;
1737         }
1738         if (!PageUptodate(page)) {
1739                 error = -EIO;
1740                 goto failed;
1741         }
1742         wait_on_page_writeback(page);
1743
1744         /*
1745          * Some architectures may have to restore extra metadata to the
1746          * physical page after reading from swap.
1747          */
1748         arch_swap_restore(swap, page);
1749
1750         if (shmem_should_replace_page(page, gfp)) {
1751                 error = shmem_replace_page(&page, gfp, info, index);
1752                 if (error)
1753                         goto failed;
1754         }
1755
1756         error = shmem_add_to_page_cache(page, mapping, index,
1757                                         swp_to_radix_entry(swap), gfp,
1758                                         charge_mm);
1759         if (error)
1760                 goto failed;
1761
1762         spin_lock_irq(&info->lock);
1763         info->swapped--;
1764         shmem_recalc_inode(inode);
1765         spin_unlock_irq(&info->lock);
1766
1767         if (sgp == SGP_WRITE)
1768                 mark_page_accessed(page);
1769
1770         delete_from_swap_cache(page);
1771         set_page_dirty(page);
1772         swap_free(swap);
1773
1774         *pagep = page;
1775         if (si)
1776                 put_swap_device(si);
1777         return 0;
1778 failed:
1779         if (!shmem_confirm_swap(mapping, index, swap))
1780                 error = -EEXIST;
1781 unlock:
1782         if (page) {
1783                 unlock_page(page);
1784                 put_page(page);
1785         }
1786
1787         if (si)
1788                 put_swap_device(si);
1789
1790         return error;
1791 }
1792
1793 /*
1794  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1795  *
1796  * If we allocate a new one we do not mark it dirty. That's up to the
1797  * vm. If we swap it in we mark it dirty since we also free the swap
1798  * entry since a page cannot live in both the swap and page cache.
1799  *
1800  * vmf and fault_type are only supplied by shmem_fault:
1801  * otherwise they are NULL.
1802  */
1803 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1804         struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1805         struct vm_area_struct *vma, struct vm_fault *vmf,
1806                         vm_fault_t *fault_type)
1807 {
1808         struct address_space *mapping = inode->i_mapping;
1809         struct shmem_inode_info *info = SHMEM_I(inode);
1810         struct shmem_sb_info *sbinfo;
1811         struct mm_struct *charge_mm;
1812         struct page *page;
1813         enum sgp_type sgp_huge = sgp;
1814         pgoff_t hindex = index;
1815         gfp_t huge_gfp;
1816         int error;
1817         int once = 0;
1818         int alloced = 0;
1819
1820         if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1821                 return -EFBIG;
1822         if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1823                 sgp = SGP_CACHE;
1824 repeat:
1825         if (sgp <= SGP_CACHE &&
1826             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1827                 return -EINVAL;
1828         }
1829
1830         sbinfo = SHMEM_SB(inode->i_sb);
1831         charge_mm = vma ? vma->vm_mm : NULL;
1832
1833         page = pagecache_get_page(mapping, index,
1834                                         FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
1835         if (xa_is_value(page)) {
1836                 error = shmem_swapin_page(inode, index, &page,
1837                                           sgp, gfp, vma, fault_type);
1838                 if (error == -EEXIST)
1839                         goto repeat;
1840
1841                 *pagep = page;
1842                 return error;
1843         }
1844
1845         if (page)
1846                 hindex = page->index;
1847         if (page && sgp == SGP_WRITE)
1848                 mark_page_accessed(page);
1849
1850         /* fallocated page? */
1851         if (page && !PageUptodate(page)) {
1852                 if (sgp != SGP_READ)
1853                         goto clear;
1854                 unlock_page(page);
1855                 put_page(page);
1856                 page = NULL;
1857                 hindex = index;
1858         }
1859         if (page || sgp == SGP_READ)
1860                 goto out;
1861
1862         /*
1863          * Fast cache lookup did not find it:
1864          * bring it back from swap or allocate.
1865          */
1866
1867         if (vma && userfaultfd_missing(vma)) {
1868                 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1869                 return 0;
1870         }
1871
1872         /* shmem_symlink() */
1873         if (!shmem_mapping(mapping))
1874                 goto alloc_nohuge;
1875         if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1876                 goto alloc_nohuge;
1877         if (shmem_huge == SHMEM_HUGE_FORCE)
1878                 goto alloc_huge;
1879         switch (sbinfo->huge) {
1880         case SHMEM_HUGE_NEVER:
1881                 goto alloc_nohuge;
1882         case SHMEM_HUGE_WITHIN_SIZE: {
1883                 loff_t i_size;
1884                 pgoff_t off;
1885
1886                 off = round_up(index, HPAGE_PMD_NR);
1887                 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1888                 if (i_size >= HPAGE_PMD_SIZE &&
1889                     i_size >> PAGE_SHIFT >= off)
1890                         goto alloc_huge;
1891
1892                 fallthrough;
1893         }
1894         case SHMEM_HUGE_ADVISE:
1895                 if (sgp_huge == SGP_HUGE)
1896                         goto alloc_huge;
1897                 /* TODO: implement fadvise() hints */
1898                 goto alloc_nohuge;
1899         }
1900
1901 alloc_huge:
1902         huge_gfp = vma_thp_gfp_mask(vma);
1903         huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1904         page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
1905         if (IS_ERR(page)) {
1906 alloc_nohuge:
1907                 page = shmem_alloc_and_acct_page(gfp, inode,
1908                                                  index, false);
1909         }
1910         if (IS_ERR(page)) {
1911                 int retry = 5;
1912
1913                 error = PTR_ERR(page);
1914                 page = NULL;
1915                 if (error != -ENOSPC)
1916                         goto unlock;
1917                 /*
1918                  * Try to reclaim some space by splitting a huge page
1919                  * beyond i_size on the filesystem.
1920                  */
1921                 while (retry--) {
1922                         int ret;
1923
1924                         ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1925                         if (ret == SHRINK_STOP)
1926                                 break;
1927                         if (ret)
1928                                 goto alloc_nohuge;
1929                 }
1930                 goto unlock;
1931         }
1932
1933         if (PageTransHuge(page))
1934                 hindex = round_down(index, HPAGE_PMD_NR);
1935         else
1936                 hindex = index;
1937
1938         if (sgp == SGP_WRITE)
1939                 __SetPageReferenced(page);
1940
1941         error = shmem_add_to_page_cache(page, mapping, hindex,
1942                                         NULL, gfp & GFP_RECLAIM_MASK,
1943                                         charge_mm);
1944         if (error)
1945                 goto unacct;
1946         lru_cache_add(page);
1947
1948         spin_lock_irq(&info->lock);
1949         info->alloced += compound_nr(page);
1950         inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1951         shmem_recalc_inode(inode);
1952         spin_unlock_irq(&info->lock);
1953         alloced = true;
1954
1955         if (PageTransHuge(page) &&
1956             DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1957                         hindex + HPAGE_PMD_NR - 1) {
1958                 /*
1959                  * Part of the huge page is beyond i_size: subject
1960                  * to shrink under memory pressure.
1961                  */
1962                 spin_lock(&sbinfo->shrinklist_lock);
1963                 /*
1964                  * _careful to defend against unlocked access to
1965                  * ->shrink_list in shmem_unused_huge_shrink()
1966                  */
1967                 if (list_empty_careful(&info->shrinklist)) {
1968                         list_add_tail(&info->shrinklist,
1969                                       &sbinfo->shrinklist);
1970                         sbinfo->shrinklist_len++;
1971                 }
1972                 spin_unlock(&sbinfo->shrinklist_lock);
1973         }
1974
1975         /*
1976          * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1977          */
1978         if (sgp == SGP_FALLOC)
1979                 sgp = SGP_WRITE;
1980 clear:
1981         /*
1982          * Let SGP_WRITE caller clear ends if write does not fill page;
1983          * but SGP_FALLOC on a page fallocated earlier must initialize
1984          * it now, lest undo on failure cancel our earlier guarantee.
1985          */
1986         if (sgp != SGP_WRITE && !PageUptodate(page)) {
1987                 int i;
1988
1989                 for (i = 0; i < compound_nr(page); i++) {
1990                         clear_highpage(page + i);
1991                         flush_dcache_page(page + i);
1992                 }
1993                 SetPageUptodate(page);
1994         }
1995
1996         /* Perhaps the file has been truncated since we checked */
1997         if (sgp <= SGP_CACHE &&
1998             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1999                 if (alloced) {
2000                         ClearPageDirty(page);
2001                         delete_from_page_cache(page);
2002                         spin_lock_irq(&info->lock);
2003                         shmem_recalc_inode(inode);
2004                         spin_unlock_irq(&info->lock);
2005                 }
2006                 error = -EINVAL;
2007                 goto unlock;
2008         }
2009 out:
2010         *pagep = page + index - hindex;
2011         return 0;
2012
2013         /*
2014          * Error recovery.
2015          */
2016 unacct:
2017         shmem_inode_unacct_blocks(inode, compound_nr(page));
2018
2019         if (PageTransHuge(page)) {
2020                 unlock_page(page);
2021                 put_page(page);
2022                 goto alloc_nohuge;
2023         }
2024 unlock:
2025         if (page) {
2026                 unlock_page(page);
2027                 put_page(page);
2028         }
2029         if (error == -ENOSPC && !once++) {
2030                 spin_lock_irq(&info->lock);
2031                 shmem_recalc_inode(inode);
2032                 spin_unlock_irq(&info->lock);
2033                 goto repeat;
2034         }
2035         if (error == -EEXIST)
2036                 goto repeat;
2037         return error;
2038 }
2039
2040 /*
2041  * This is like autoremove_wake_function, but it removes the wait queue
2042  * entry unconditionally - even if something else had already woken the
2043  * target.
2044  */
2045 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2046 {
2047         int ret = default_wake_function(wait, mode, sync, key);
2048         list_del_init(&wait->entry);
2049         return ret;
2050 }
2051
2052 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2053 {
2054         struct vm_area_struct *vma = vmf->vma;
2055         struct inode *inode = file_inode(vma->vm_file);
2056         gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2057         enum sgp_type sgp;
2058         int err;
2059         vm_fault_t ret = VM_FAULT_LOCKED;
2060
2061         /*
2062          * Trinity finds that probing a hole which tmpfs is punching can
2063          * prevent the hole-punch from ever completing: which in turn
2064          * locks writers out with its hold on i_mutex.  So refrain from
2065          * faulting pages into the hole while it's being punched.  Although
2066          * shmem_undo_range() does remove the additions, it may be unable to
2067          * keep up, as each new page needs its own unmap_mapping_range() call,
2068          * and the i_mmap tree grows ever slower to scan if new vmas are added.
2069          *
2070          * It does not matter if we sometimes reach this check just before the
2071          * hole-punch begins, so that one fault then races with the punch:
2072          * we just need to make racing faults a rare case.
2073          *
2074          * The implementation below would be much simpler if we just used a
2075          * standard mutex or completion: but we cannot take i_mutex in fault,
2076          * and bloating every shmem inode for this unlikely case would be sad.
2077          */
2078         if (unlikely(inode->i_private)) {
2079                 struct shmem_falloc *shmem_falloc;
2080
2081                 spin_lock(&inode->i_lock);
2082                 shmem_falloc = inode->i_private;
2083                 if (shmem_falloc &&
2084                     shmem_falloc->waitq &&
2085                     vmf->pgoff >= shmem_falloc->start &&
2086                     vmf->pgoff < shmem_falloc->next) {
2087                         struct file *fpin;
2088                         wait_queue_head_t *shmem_falloc_waitq;
2089                         DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2090
2091                         ret = VM_FAULT_NOPAGE;
2092                         fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2093                         if (fpin)
2094                                 ret = VM_FAULT_RETRY;
2095
2096                         shmem_falloc_waitq = shmem_falloc->waitq;
2097                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2098                                         TASK_UNINTERRUPTIBLE);
2099                         spin_unlock(&inode->i_lock);
2100                         schedule();
2101
2102                         /*
2103                          * shmem_falloc_waitq points into the shmem_fallocate()
2104                          * stack of the hole-punching task: shmem_falloc_waitq
2105                          * is usually invalid by the time we reach here, but
2106                          * finish_wait() does not dereference it in that case;
2107                          * though i_lock needed lest racing with wake_up_all().
2108                          */
2109                         spin_lock(&inode->i_lock);
2110                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2111                         spin_unlock(&inode->i_lock);
2112
2113                         if (fpin)
2114                                 fput(fpin);
2115                         return ret;
2116                 }
2117                 spin_unlock(&inode->i_lock);
2118         }
2119
2120         sgp = SGP_CACHE;
2121
2122         if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2123             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2124                 sgp = SGP_NOHUGE;
2125         else if (vma->vm_flags & VM_HUGEPAGE)
2126                 sgp = SGP_HUGE;
2127
2128         err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2129                                   gfp, vma, vmf, &ret);
2130         if (err)
2131                 return vmf_error(err);
2132         return ret;
2133 }
2134
2135 unsigned long shmem_get_unmapped_area(struct file *file,
2136                                       unsigned long uaddr, unsigned long len,
2137                                       unsigned long pgoff, unsigned long flags)
2138 {
2139         unsigned long (*get_area)(struct file *,
2140                 unsigned long, unsigned long, unsigned long, unsigned long);
2141         unsigned long addr;
2142         unsigned long offset;
2143         unsigned long inflated_len;
2144         unsigned long inflated_addr;
2145         unsigned long inflated_offset;
2146
2147         if (len > TASK_SIZE)
2148                 return -ENOMEM;
2149
2150         get_area = current->mm->get_unmapped_area;
2151         addr = get_area(file, uaddr, len, pgoff, flags);
2152
2153         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2154                 return addr;
2155         if (IS_ERR_VALUE(addr))
2156                 return addr;
2157         if (addr & ~PAGE_MASK)
2158                 return addr;
2159         if (addr > TASK_SIZE - len)
2160                 return addr;
2161
2162         if (shmem_huge == SHMEM_HUGE_DENY)
2163                 return addr;
2164         if (len < HPAGE_PMD_SIZE)
2165                 return addr;
2166         if (flags & MAP_FIXED)
2167                 return addr;
2168         /*
2169          * Our priority is to support MAP_SHARED mapped hugely;
2170          * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2171          * But if caller specified an address hint and we allocated area there
2172          * successfully, respect that as before.
2173          */
2174         if (uaddr == addr)
2175                 return addr;
2176
2177         if (shmem_huge != SHMEM_HUGE_FORCE) {
2178                 struct super_block *sb;
2179
2180                 if (file) {
2181                         VM_BUG_ON(file->f_op != &shmem_file_operations);
2182                         sb = file_inode(file)->i_sb;
2183                 } else {
2184                         /*
2185                          * Called directly from mm/mmap.c, or drivers/char/mem.c
2186                          * for "/dev/zero", to create a shared anonymous object.
2187                          */
2188                         if (IS_ERR(shm_mnt))
2189                                 return addr;
2190                         sb = shm_mnt->mnt_sb;
2191                 }
2192                 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2193                         return addr;
2194         }
2195
2196         offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2197         if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2198                 return addr;
2199         if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2200                 return addr;
2201
2202         inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2203         if (inflated_len > TASK_SIZE)
2204                 return addr;
2205         if (inflated_len < len)
2206                 return addr;
2207
2208         inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2209         if (IS_ERR_VALUE(inflated_addr))
2210                 return addr;
2211         if (inflated_addr & ~PAGE_MASK)
2212                 return addr;
2213
2214         inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2215         inflated_addr += offset - inflated_offset;
2216         if (inflated_offset > offset)
2217                 inflated_addr += HPAGE_PMD_SIZE;
2218
2219         if (inflated_addr > TASK_SIZE - len)
2220                 return addr;
2221         return inflated_addr;
2222 }
2223
2224 #ifdef CONFIG_NUMA
2225 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2226 {
2227         struct inode *inode = file_inode(vma->vm_file);
2228         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2229 }
2230
2231 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2232                                           unsigned long addr)
2233 {
2234         struct inode *inode = file_inode(vma->vm_file);
2235         pgoff_t index;
2236
2237         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2238         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2239 }
2240 #endif
2241
2242 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2243 {
2244         struct inode *inode = file_inode(file);
2245         struct shmem_inode_info *info = SHMEM_I(inode);
2246         int retval = -ENOMEM;
2247
2248         /*
2249          * What serializes the accesses to info->flags?
2250          * ipc_lock_object() when called from shmctl_do_lock(),
2251          * no serialization needed when called from shm_destroy().
2252          */
2253         if (lock && !(info->flags & VM_LOCKED)) {
2254                 if (!user_shm_lock(inode->i_size, ucounts))
2255                         goto out_nomem;
2256                 info->flags |= VM_LOCKED;
2257                 mapping_set_unevictable(file->f_mapping);
2258         }
2259         if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2260                 user_shm_unlock(inode->i_size, ucounts);
2261                 info->flags &= ~VM_LOCKED;
2262                 mapping_clear_unevictable(file->f_mapping);
2263         }
2264         retval = 0;
2265
2266 out_nomem:
2267         return retval;
2268 }
2269
2270 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2271 {
2272         struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2273         int ret;
2274
2275         ret = seal_check_future_write(info->seals, vma);
2276         if (ret)
2277                 return ret;
2278
2279         /* arm64 - allow memory tagging on RAM-based files */
2280         vma->vm_flags |= VM_MTE_ALLOWED;
2281
2282         file_accessed(file);
2283         vma->vm_ops = &shmem_vm_ops;
2284         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2285                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2286                         (vma->vm_end & HPAGE_PMD_MASK)) {
2287                 khugepaged_enter(vma, vma->vm_flags);
2288         }
2289         return 0;
2290 }
2291
2292 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2293                                      umode_t mode, dev_t dev, unsigned long flags)
2294 {
2295         struct inode *inode;
2296         struct shmem_inode_info *info;
2297         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2298         ino_t ino;
2299
2300         if (shmem_reserve_inode(sb, &ino))
2301                 return NULL;
2302
2303         inode = new_inode(sb);
2304         if (inode) {
2305                 inode->i_ino = ino;
2306                 inode_init_owner(&init_user_ns, inode, dir, mode);
2307                 inode->i_blocks = 0;
2308                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2309                 inode->i_generation = prandom_u32();
2310                 info = SHMEM_I(inode);
2311                 memset(info, 0, (char *)inode - (char *)info);
2312                 spin_lock_init(&info->lock);
2313                 atomic_set(&info->stop_eviction, 0);
2314                 info->seals = F_SEAL_SEAL;
2315                 info->flags = flags & VM_NORESERVE;
2316                 INIT_LIST_HEAD(&info->shrinklist);
2317                 INIT_LIST_HEAD(&info->swaplist);
2318                 simple_xattrs_init(&info->xattrs);
2319                 cache_no_acl(inode);
2320
2321                 switch (mode & S_IFMT) {
2322                 default:
2323                         inode->i_op = &shmem_special_inode_operations;
2324                         init_special_inode(inode, mode, dev);
2325                         break;
2326                 case S_IFREG:
2327                         inode->i_mapping->a_ops = &shmem_aops;
2328                         inode->i_op = &shmem_inode_operations;
2329                         inode->i_fop = &shmem_file_operations;
2330                         mpol_shared_policy_init(&info->policy,
2331                                                  shmem_get_sbmpol(sbinfo));
2332                         break;
2333                 case S_IFDIR:
2334                         inc_nlink(inode);
2335                         /* Some things misbehave if size == 0 on a directory */
2336                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
2337                         inode->i_op = &shmem_dir_inode_operations;
2338                         inode->i_fop = &simple_dir_operations;
2339                         break;
2340                 case S_IFLNK:
2341                         /*
2342                          * Must not load anything in the rbtree,
2343                          * mpol_free_shared_policy will not be called.
2344                          */
2345                         mpol_shared_policy_init(&info->policy, NULL);
2346                         break;
2347                 }
2348
2349                 lockdep_annotate_inode_mutex_key(inode);
2350         } else
2351                 shmem_free_inode(sb);
2352         return inode;
2353 }
2354
2355 static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2356                                   pmd_t *dst_pmd,
2357                                   struct vm_area_struct *dst_vma,
2358                                   unsigned long dst_addr,
2359                                   unsigned long src_addr,
2360                                   bool zeropage,
2361                                   struct page **pagep)
2362 {
2363         struct inode *inode = file_inode(dst_vma->vm_file);
2364         struct shmem_inode_info *info = SHMEM_I(inode);
2365         struct address_space *mapping = inode->i_mapping;
2366         gfp_t gfp = mapping_gfp_mask(mapping);
2367         pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2368         spinlock_t *ptl;
2369         void *page_kaddr;
2370         struct page *page;
2371         pte_t _dst_pte, *dst_pte;
2372         int ret;
2373         pgoff_t offset, max_off;
2374
2375         ret = -ENOMEM;
2376         if (!shmem_inode_acct_block(inode, 1)) {
2377                 /*
2378                  * We may have got a page, returned -ENOENT triggering a retry,
2379                  * and now we find ourselves with -ENOMEM. Release the page, to
2380                  * avoid a BUG_ON in our caller.
2381                  */
2382                 if (unlikely(*pagep)) {
2383                         put_page(*pagep);
2384                         *pagep = NULL;
2385                 }
2386                 goto out;
2387         }
2388
2389         if (!*pagep) {
2390                 page = shmem_alloc_page(gfp, info, pgoff);
2391                 if (!page)
2392                         goto out_unacct_blocks;
2393
2394                 if (!zeropage) {        /* mcopy_atomic */
2395                         page_kaddr = kmap_atomic(page);
2396                         ret = copy_from_user(page_kaddr,
2397                                              (const void __user *)src_addr,
2398                                              PAGE_SIZE);
2399                         kunmap_atomic(page_kaddr);
2400
2401                         /* fallback to copy_from_user outside mmap_lock */
2402                         if (unlikely(ret)) {
2403                                 *pagep = page;
2404                                 shmem_inode_unacct_blocks(inode, 1);
2405                                 /* don't free the page */
2406                                 return -ENOENT;
2407                         }
2408                 } else {                /* mfill_zeropage_atomic */
2409                         clear_highpage(page);
2410                 }
2411         } else {
2412                 page = *pagep;
2413                 *pagep = NULL;
2414         }
2415
2416         VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2417         __SetPageLocked(page);
2418         __SetPageSwapBacked(page);
2419         __SetPageUptodate(page);
2420
2421         ret = -EFAULT;
2422         offset = linear_page_index(dst_vma, dst_addr);
2423         max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2424         if (unlikely(offset >= max_off))
2425                 goto out_release;
2426
2427         ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2428                                       gfp & GFP_RECLAIM_MASK, dst_mm);
2429         if (ret)
2430                 goto out_release;
2431
2432         _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2433         if (dst_vma->vm_flags & VM_WRITE)
2434                 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2435         else {
2436                 /*
2437                  * We don't set the pte dirty if the vma has no
2438                  * VM_WRITE permission, so mark the page dirty or it
2439                  * could be freed from under us. We could do it
2440                  * unconditionally before unlock_page(), but doing it
2441                  * only if VM_WRITE is not set is faster.
2442                  */
2443                 set_page_dirty(page);
2444         }
2445
2446         dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2447
2448         ret = -EFAULT;
2449         max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2450         if (unlikely(offset >= max_off))
2451                 goto out_release_unlock;
2452
2453         ret = -EEXIST;
2454         if (!pte_none(*dst_pte))
2455                 goto out_release_unlock;
2456
2457         lru_cache_add(page);
2458
2459         spin_lock_irq(&info->lock);
2460         info->alloced++;
2461         inode->i_blocks += BLOCKS_PER_PAGE;
2462         shmem_recalc_inode(inode);
2463         spin_unlock_irq(&info->lock);
2464
2465         inc_mm_counter(dst_mm, mm_counter_file(page));
2466         page_add_file_rmap(page, false);
2467         set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2468
2469         /* No need to invalidate - it was non-present before */
2470         update_mmu_cache(dst_vma, dst_addr, dst_pte);
2471         pte_unmap_unlock(dst_pte, ptl);
2472         unlock_page(page);
2473         ret = 0;
2474 out:
2475         return ret;
2476 out_release_unlock:
2477         pte_unmap_unlock(dst_pte, ptl);
2478         ClearPageDirty(page);
2479         delete_from_page_cache(page);
2480 out_release:
2481         unlock_page(page);
2482         put_page(page);
2483 out_unacct_blocks:
2484         shmem_inode_unacct_blocks(inode, 1);
2485         goto out;
2486 }
2487
2488 int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2489                            pmd_t *dst_pmd,
2490                            struct vm_area_struct *dst_vma,
2491                            unsigned long dst_addr,
2492                            unsigned long src_addr,
2493                            struct page **pagep)
2494 {
2495         return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2496                                       dst_addr, src_addr, false, pagep);
2497 }
2498
2499 int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
2500                              pmd_t *dst_pmd,
2501                              struct vm_area_struct *dst_vma,
2502                              unsigned long dst_addr)
2503 {
2504         struct page *page = NULL;
2505
2506         return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2507                                       dst_addr, 0, true, &page);
2508 }
2509
2510 #ifdef CONFIG_TMPFS
2511 static const struct inode_operations shmem_symlink_inode_operations;
2512 static const struct inode_operations shmem_short_symlink_operations;
2513
2514 #ifdef CONFIG_TMPFS_XATTR
2515 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2516 #else
2517 #define shmem_initxattrs NULL
2518 #endif
2519
2520 static int
2521 shmem_write_begin(struct file *file, struct address_space *mapping,
2522                         loff_t pos, unsigned len, unsigned flags,
2523                         struct page **pagep, void **fsdata)
2524 {
2525         struct inode *inode = mapping->host;
2526         struct shmem_inode_info *info = SHMEM_I(inode);
2527         pgoff_t index = pos >> PAGE_SHIFT;
2528
2529         /* i_mutex is held by caller */
2530         if (unlikely(info->seals & (F_SEAL_GROW |
2531                                    F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2532                 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2533                         return -EPERM;
2534                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2535                         return -EPERM;
2536         }
2537
2538         return shmem_getpage(inode, index, pagep, SGP_WRITE);
2539 }
2540
2541 static int
2542 shmem_write_end(struct file *file, struct address_space *mapping,
2543                         loff_t pos, unsigned len, unsigned copied,
2544                         struct page *page, void *fsdata)
2545 {
2546         struct inode *inode = mapping->host;
2547
2548         if (pos + copied > inode->i_size)
2549                 i_size_write(inode, pos + copied);
2550
2551         if (!PageUptodate(page)) {
2552                 struct page *head = compound_head(page);
2553                 if (PageTransCompound(page)) {
2554                         int i;
2555
2556                         for (i = 0; i < HPAGE_PMD_NR; i++) {
2557                                 if (head + i == page)
2558                                         continue;
2559                                 clear_highpage(head + i);
2560                                 flush_dcache_page(head + i);
2561                         }
2562                 }
2563                 if (copied < PAGE_SIZE) {
2564                         unsigned from = pos & (PAGE_SIZE - 1);
2565                         zero_user_segments(page, 0, from,
2566                                         from + copied, PAGE_SIZE);
2567                 }
2568                 SetPageUptodate(head);
2569         }
2570         set_page_dirty(page);
2571         unlock_page(page);
2572         put_page(page);
2573
2574         return copied;
2575 }
2576
2577 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2578 {
2579         struct file *file = iocb->ki_filp;
2580         struct inode *inode = file_inode(file);
2581         struct address_space *mapping = inode->i_mapping;
2582         pgoff_t index;
2583         unsigned long offset;
2584         enum sgp_type sgp = SGP_READ;
2585         int error = 0;
2586         ssize_t retval = 0;
2587         loff_t *ppos = &iocb->ki_pos;
2588
2589         /*
2590          * Might this read be for a stacking filesystem?  Then when reading
2591          * holes of a sparse file, we actually need to allocate those pages,
2592          * and even mark them dirty, so it cannot exceed the max_blocks limit.
2593          */
2594         if (!iter_is_iovec(to))
2595                 sgp = SGP_CACHE;
2596
2597         index = *ppos >> PAGE_SHIFT;
2598         offset = *ppos & ~PAGE_MASK;
2599
2600         for (;;) {
2601                 struct page *page = NULL;
2602                 pgoff_t end_index;
2603                 unsigned long nr, ret;
2604                 loff_t i_size = i_size_read(inode);
2605
2606                 end_index = i_size >> PAGE_SHIFT;
2607                 if (index > end_index)
2608                         break;
2609                 if (index == end_index) {
2610                         nr = i_size & ~PAGE_MASK;
2611                         if (nr <= offset)
2612                                 break;
2613                 }
2614
2615                 error = shmem_getpage(inode, index, &page, sgp);
2616                 if (error) {
2617                         if (error == -EINVAL)
2618                                 error = 0;
2619                         break;
2620                 }
2621                 if (page) {
2622                         if (sgp == SGP_CACHE)
2623                                 set_page_dirty(page);
2624                         unlock_page(page);
2625                 }
2626
2627                 /*
2628                  * We must evaluate after, since reads (unlike writes)
2629                  * are called without i_mutex protection against truncate
2630                  */
2631                 nr = PAGE_SIZE;
2632                 i_size = i_size_read(inode);
2633                 end_index = i_size >> PAGE_SHIFT;
2634                 if (index == end_index) {
2635                         nr = i_size & ~PAGE_MASK;
2636                         if (nr <= offset) {
2637                                 if (page)
2638                                         put_page(page);
2639                                 break;
2640                         }
2641                 }
2642                 nr -= offset;
2643
2644                 if (page) {
2645                         /*
2646                          * If users can be writing to this page using arbitrary
2647                          * virtual addresses, take care about potential aliasing
2648                          * before reading the page on the kernel side.
2649                          */
2650                         if (mapping_writably_mapped(mapping))
2651                                 flush_dcache_page(page);
2652                         /*
2653                          * Mark the page accessed if we read the beginning.
2654                          */
2655                         if (!offset)
2656                                 mark_page_accessed(page);
2657                 } else {
2658                         page = ZERO_PAGE(0);
2659                         get_page(page);
2660                 }
2661
2662                 /*
2663                  * Ok, we have the page, and it's up-to-date, so
2664                  * now we can copy it to user space...
2665                  */
2666                 ret = copy_page_to_iter(page, offset, nr, to);
2667                 retval += ret;
2668                 offset += ret;
2669                 index += offset >> PAGE_SHIFT;
2670                 offset &= ~PAGE_MASK;
2671
2672                 put_page(page);
2673                 if (!iov_iter_count(to))
2674                         break;
2675                 if (ret < nr) {
2676                         error = -EFAULT;
2677                         break;
2678                 }
2679                 cond_resched();
2680         }
2681
2682         *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2683         file_accessed(file);
2684         return retval ? retval : error;
2685 }
2686
2687 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2688 {
2689         struct address_space *mapping = file->f_mapping;
2690         struct inode *inode = mapping->host;
2691
2692         if (whence != SEEK_DATA && whence != SEEK_HOLE)
2693                 return generic_file_llseek_size(file, offset, whence,
2694                                         MAX_LFS_FILESIZE, i_size_read(inode));
2695         if (offset < 0)
2696                 return -ENXIO;
2697
2698         inode_lock(inode);
2699         /* We're holding i_mutex so we can access i_size directly */
2700         offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2701         if (offset >= 0)
2702                 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2703         inode_unlock(inode);
2704         return offset;
2705 }
2706
2707 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2708                                                          loff_t len)
2709 {
2710         struct inode *inode = file_inode(file);
2711         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2712         struct shmem_inode_info *info = SHMEM_I(inode);
2713         struct shmem_falloc shmem_falloc;
2714         pgoff_t start, index, end;
2715         int error;
2716
2717         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2718                 return -EOPNOTSUPP;
2719
2720         inode_lock(inode);
2721
2722         if (mode & FALLOC_FL_PUNCH_HOLE) {
2723                 struct address_space *mapping = file->f_mapping;
2724                 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2725                 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2726                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2727
2728                 /* protected by i_mutex */
2729                 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2730                         error = -EPERM;
2731                         goto out;
2732                 }
2733
2734                 shmem_falloc.waitq = &shmem_falloc_waitq;
2735                 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2736                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2737                 spin_lock(&inode->i_lock);
2738                 inode->i_private = &shmem_falloc;
2739                 spin_unlock(&inode->i_lock);
2740
2741                 if ((u64)unmap_end > (u64)unmap_start)
2742                         unmap_mapping_range(mapping, unmap_start,
2743                                             1 + unmap_end - unmap_start, 0);
2744                 shmem_truncate_range(inode, offset, offset + len - 1);
2745                 /* No need to unmap again: hole-punching leaves COWed pages */
2746
2747                 spin_lock(&inode->i_lock);
2748                 inode->i_private = NULL;
2749                 wake_up_all(&shmem_falloc_waitq);
2750                 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2751                 spin_unlock(&inode->i_lock);
2752                 error = 0;
2753                 goto out;
2754         }
2755
2756         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2757         error = inode_newsize_ok(inode, offset + len);
2758         if (error)
2759                 goto out;
2760
2761         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2762                 error = -EPERM;
2763                 goto out;
2764         }
2765
2766         start = offset >> PAGE_SHIFT;
2767         end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2768         /* Try to avoid a swapstorm if len is impossible to satisfy */
2769         if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2770                 error = -ENOSPC;
2771                 goto out;
2772         }
2773
2774         shmem_falloc.waitq = NULL;
2775         shmem_falloc.start = start;
2776         shmem_falloc.next  = start;
2777         shmem_falloc.nr_falloced = 0;
2778         shmem_falloc.nr_unswapped = 0;
2779         spin_lock(&inode->i_lock);
2780         inode->i_private = &shmem_falloc;
2781         spin_unlock(&inode->i_lock);
2782
2783         for (index = start; index < end; index++) {
2784                 struct page *page;
2785
2786                 /*
2787                  * Good, the fallocate(2) manpage permits EINTR: we may have
2788                  * been interrupted because we are using up too much memory.
2789                  */
2790                 if (signal_pending(current))
2791                         error = -EINTR;
2792                 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2793                         error = -ENOMEM;
2794                 else
2795                         error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2796                 if (error) {
2797                         /* Remove the !PageUptodate pages we added */
2798                         if (index > start) {
2799                                 shmem_undo_range(inode,
2800                                     (loff_t)start << PAGE_SHIFT,
2801                                     ((loff_t)index << PAGE_SHIFT) - 1, true);
2802                         }
2803                         goto undone;
2804                 }
2805
2806                 /*
2807                  * Inform shmem_writepage() how far we have reached.
2808                  * No need for lock or barrier: we have the page lock.
2809                  */
2810                 shmem_falloc.next++;
2811                 if (!PageUptodate(page))
2812                         shmem_falloc.nr_falloced++;
2813
2814                 /*
2815                  * If !PageUptodate, leave it that way so that freeable pages
2816                  * can be recognized if we need to rollback on error later.
2817                  * But set_page_dirty so that memory pressure will swap rather
2818                  * than free the pages we are allocating (and SGP_CACHE pages
2819                  * might still be clean: we now need to mark those dirty too).
2820                  */
2821                 set_page_dirty(page);
2822                 unlock_page(page);
2823                 put_page(page);
2824                 cond_resched();
2825         }
2826
2827         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2828                 i_size_write(inode, offset + len);
2829         inode->i_ctime = current_time(inode);
2830 undone:
2831         spin_lock(&inode->i_lock);
2832         inode->i_private = NULL;
2833         spin_unlock(&inode->i_lock);
2834 out:
2835         inode_unlock(inode);
2836         return error;
2837 }
2838
2839 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2840 {
2841         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2842
2843         buf->f_type = TMPFS_MAGIC;
2844         buf->f_bsize = PAGE_SIZE;
2845         buf->f_namelen = NAME_MAX;
2846         if (sbinfo->max_blocks) {
2847                 buf->f_blocks = sbinfo->max_blocks;
2848                 buf->f_bavail =
2849                 buf->f_bfree  = sbinfo->max_blocks -
2850                                 percpu_counter_sum(&sbinfo->used_blocks);
2851         }
2852         if (sbinfo->max_inodes) {
2853                 buf->f_files = sbinfo->max_inodes;
2854                 buf->f_ffree = sbinfo->free_inodes;
2855         }
2856         /* else leave those fields 0 like simple_statfs */
2857
2858         buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
2859
2860         return 0;
2861 }
2862
2863 /*
2864  * File creation. Allocate an inode, and we're done..
2865  */
2866 static int
2867 shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2868             struct dentry *dentry, umode_t mode, dev_t dev)
2869 {
2870         struct inode *inode;
2871         int error = -ENOSPC;
2872
2873         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2874         if (inode) {
2875                 error = simple_acl_create(dir, inode);
2876                 if (error)
2877                         goto out_iput;
2878                 error = security_inode_init_security(inode, dir,
2879                                                      &dentry->d_name,
2880                                                      shmem_initxattrs, NULL);
2881                 if (error && error != -EOPNOTSUPP)
2882                         goto out_iput;
2883
2884                 error = 0;
2885                 dir->i_size += BOGO_DIRENT_SIZE;
2886                 dir->i_ctime = dir->i_mtime = current_time(dir);
2887                 d_instantiate(dentry, inode);
2888                 dget(dentry); /* Extra count - pin the dentry in core */
2889         }
2890         return error;
2891 out_iput:
2892         iput(inode);
2893         return error;
2894 }
2895
2896 static int
2897 shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2898               struct dentry *dentry, umode_t mode)
2899 {
2900         struct inode *inode;
2901         int error = -ENOSPC;
2902
2903         inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2904         if (inode) {
2905                 error = security_inode_init_security(inode, dir,
2906                                                      NULL,
2907                                                      shmem_initxattrs, NULL);
2908                 if (error && error != -EOPNOTSUPP)
2909                         goto out_iput;
2910                 error = simple_acl_create(dir, inode);
2911                 if (error)
2912                         goto out_iput;
2913                 d_tmpfile(dentry, inode);
2914         }
2915         return error;
2916 out_iput:
2917         iput(inode);
2918         return error;
2919 }
2920
2921 static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2922                        struct dentry *dentry, umode_t mode)
2923 {
2924         int error;
2925
2926         if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2927                                  mode | S_IFDIR, 0)))
2928                 return error;
2929         inc_nlink(dir);
2930         return 0;
2931 }
2932
2933 static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2934                         struct dentry *dentry, umode_t mode, bool excl)
2935 {
2936         return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
2937 }
2938
2939 /*
2940  * Link a file..
2941  */
2942 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2943 {
2944         struct inode *inode = d_inode(old_dentry);
2945         int ret = 0;
2946
2947         /*
2948          * No ordinary (disk based) filesystem counts links as inodes;
2949          * but each new link needs a new dentry, pinning lowmem, and
2950          * tmpfs dentries cannot be pruned until they are unlinked.
2951          * But if an O_TMPFILE file is linked into the tmpfs, the
2952          * first link must skip that, to get the accounting right.
2953          */
2954         if (inode->i_nlink) {
2955                 ret = shmem_reserve_inode(inode->i_sb, NULL);
2956                 if (ret)
2957                         goto out;
2958         }
2959
2960         dir->i_size += BOGO_DIRENT_SIZE;
2961         inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2962         inc_nlink(inode);
2963         ihold(inode);   /* New dentry reference */
2964         dget(dentry);           /* Extra pinning count for the created dentry */
2965         d_instantiate(dentry, inode);
2966 out:
2967         return ret;
2968 }
2969
2970 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2971 {
2972         struct inode *inode = d_inode(dentry);
2973
2974         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2975                 shmem_free_inode(inode->i_sb);
2976
2977         dir->i_size -= BOGO_DIRENT_SIZE;
2978         inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2979         drop_nlink(inode);
2980         dput(dentry);   /* Undo the count from "create" - this does all the work */
2981         return 0;
2982 }
2983
2984 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2985 {
2986         if (!simple_empty(dentry))
2987                 return -ENOTEMPTY;
2988
2989         drop_nlink(d_inode(dentry));
2990         drop_nlink(dir);
2991         return shmem_unlink(dir, dentry);
2992 }
2993
2994 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2995 {
2996         bool old_is_dir = d_is_dir(old_dentry);
2997         bool new_is_dir = d_is_dir(new_dentry);
2998
2999         if (old_dir != new_dir && old_is_dir != new_is_dir) {
3000                 if (old_is_dir) {
3001                         drop_nlink(old_dir);
3002                         inc_nlink(new_dir);
3003                 } else {
3004                         drop_nlink(new_dir);
3005                         inc_nlink(old_dir);
3006                 }
3007         }
3008         old_dir->i_ctime = old_dir->i_mtime =
3009         new_dir->i_ctime = new_dir->i_mtime =
3010         d_inode(old_dentry)->i_ctime =
3011         d_inode(new_dentry)->i_ctime = current_time(old_dir);
3012
3013         return 0;
3014 }
3015
3016 static int shmem_whiteout(struct user_namespace *mnt_userns,
3017                           struct inode *old_dir, struct dentry *old_dentry)
3018 {
3019         struct dentry *whiteout;
3020         int error;
3021
3022         whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3023         if (!whiteout)
3024                 return -ENOMEM;
3025
3026         error = shmem_mknod(&init_user_ns, old_dir, whiteout,
3027                             S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3028         dput(whiteout);
3029         if (error)
3030                 return error;
3031
3032         /*
3033          * Cheat and hash the whiteout while the old dentry is still in
3034          * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3035          *
3036          * d_lookup() will consistently find one of them at this point,
3037          * not sure which one, but that isn't even important.
3038          */
3039         d_rehash(whiteout);
3040         return 0;
3041 }
3042
3043 /*
3044  * The VFS layer already does all the dentry stuff for rename,
3045  * we just have to decrement the usage count for the target if
3046  * it exists so that the VFS layer correctly free's it when it
3047  * gets overwritten.
3048  */
3049 static int shmem_rename2(struct user_namespace *mnt_userns,
3050                          struct inode *old_dir, struct dentry *old_dentry,
3051                          struct inode *new_dir, struct dentry *new_dentry,
3052                          unsigned int flags)
3053 {
3054         struct inode *inode = d_inode(old_dentry);
3055         int they_are_dirs = S_ISDIR(inode->i_mode);
3056
3057         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3058                 return -EINVAL;
3059
3060         if (flags & RENAME_EXCHANGE)
3061                 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3062
3063         if (!simple_empty(new_dentry))
3064                 return -ENOTEMPTY;
3065
3066         if (flags & RENAME_WHITEOUT) {
3067                 int error;
3068
3069                 error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
3070                 if (error)
3071                         return error;
3072         }
3073
3074         if (d_really_is_positive(new_dentry)) {
3075                 (void) shmem_unlink(new_dir, new_dentry);
3076                 if (they_are_dirs) {
3077                         drop_nlink(d_inode(new_dentry));
3078                         drop_nlink(old_dir);
3079                 }
3080         } else if (they_are_dirs) {
3081                 drop_nlink(old_dir);
3082                 inc_nlink(new_dir);
3083         }
3084
3085         old_dir->i_size -= BOGO_DIRENT_SIZE;
3086         new_dir->i_size += BOGO_DIRENT_SIZE;
3087         old_dir->i_ctime = old_dir->i_mtime =
3088         new_dir->i_ctime = new_dir->i_mtime =
3089         inode->i_ctime = current_time(old_dir);
3090         return 0;
3091 }
3092
3093 static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3094                          struct dentry *dentry, const char *symname)
3095 {
3096         int error;
3097         int len;
3098         struct inode *inode;
3099         struct page *page;
3100
3101         len = strlen(symname) + 1;
3102         if (len > PAGE_SIZE)
3103                 return -ENAMETOOLONG;
3104
3105         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3106                                 VM_NORESERVE);
3107         if (!inode)
3108                 return -ENOSPC;
3109
3110         error = security_inode_init_security(inode, dir, &dentry->d_name,
3111                                              shmem_initxattrs, NULL);
3112         if (error && error != -EOPNOTSUPP) {
3113                 iput(inode);
3114                 return error;
3115         }
3116
3117         inode->i_size = len-1;
3118         if (len <= SHORT_SYMLINK_LEN) {
3119                 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3120                 if (!inode->i_link) {
3121                         iput(inode);
3122                         return -ENOMEM;
3123                 }
3124                 inode->i_op = &shmem_short_symlink_operations;
3125         } else {
3126                 inode_nohighmem(inode);
3127                 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3128                 if (error) {
3129                         iput(inode);
3130                         return error;
3131                 }
3132                 inode->i_mapping->a_ops = &shmem_aops;
3133                 inode->i_op = &shmem_symlink_inode_operations;
3134                 memcpy(page_address(page), symname, len);
3135                 SetPageUptodate(page);
3136                 set_page_dirty(page);
3137                 unlock_page(page);
3138                 put_page(page);
3139         }
3140         dir->i_size += BOGO_DIRENT_SIZE;
3141         dir->i_ctime = dir->i_mtime = current_time(dir);
3142         d_instantiate(dentry, inode);
3143         dget(dentry);
3144         return 0;
3145 }
3146
3147 static void shmem_put_link(void *arg)
3148 {
3149         mark_page_accessed(arg);
3150         put_page(arg);
3151 }
3152
3153 static const char *shmem_get_link(struct dentry *dentry,
3154                                   struct inode *inode,
3155                                   struct delayed_call *done)
3156 {
3157         struct page *page = NULL;
3158         int error;
3159         if (!dentry) {
3160                 page = find_get_page(inode->i_mapping, 0);
3161                 if (!page)
3162                         return ERR_PTR(-ECHILD);
3163                 if (!PageUptodate(page)) {
3164                         put_page(page);
3165                         return ERR_PTR(-ECHILD);
3166                 }
3167         } else {
3168                 error = shmem_getpage(inode, 0, &page, SGP_READ);
3169                 if (error)
3170                         return ERR_PTR(error);
3171                 unlock_page(page);
3172         }
3173         set_delayed_call(done, shmem_put_link, page);
3174         return page_address(page);
3175 }
3176
3177 #ifdef CONFIG_TMPFS_XATTR
3178 /*
3179  * Superblocks without xattr inode operations may get some security.* xattr
3180  * support from the LSM "for free". As soon as we have any other xattrs
3181  * like ACLs, we also need to implement the security.* handlers at
3182  * filesystem level, though.
3183  */
3184
3185 /*
3186  * Callback for security_inode_init_security() for acquiring xattrs.
3187  */
3188 static int shmem_initxattrs(struct inode *inode,
3189                             const struct xattr *xattr_array,
3190                             void *fs_info)
3191 {
3192         struct shmem_inode_info *info = SHMEM_I(inode);
3193         const struct xattr *xattr;
3194         struct simple_xattr *new_xattr;
3195         size_t len;
3196
3197         for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3198                 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3199                 if (!new_xattr)
3200                         return -ENOMEM;
3201
3202                 len = strlen(xattr->name) + 1;
3203                 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3204                                           GFP_KERNEL);
3205                 if (!new_xattr->name) {
3206                         kvfree(new_xattr);
3207                         return -ENOMEM;
3208                 }
3209
3210                 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3211                        XATTR_SECURITY_PREFIX_LEN);
3212                 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3213                        xattr->name, len);
3214
3215                 simple_xattr_list_add(&info->xattrs, new_xattr);
3216         }
3217
3218         return 0;
3219 }
3220
3221 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3222                                    struct dentry *unused, struct inode *inode,
3223                                    const char *name, void *buffer, size_t size)
3224 {
3225         struct shmem_inode_info *info = SHMEM_I(inode);
3226
3227         name = xattr_full_name(handler, name);
3228         return simple_xattr_get(&info->xattrs, name, buffer, size);
3229 }
3230
3231 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3232                                    struct user_namespace *mnt_userns,
3233                                    struct dentry *unused, struct inode *inode,
3234                                    const char *name, const void *value,
3235                                    size_t size, int flags)
3236 {
3237         struct shmem_inode_info *info = SHMEM_I(inode);
3238
3239         name = xattr_full_name(handler, name);
3240         return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3241 }
3242
3243 static const struct xattr_handler shmem_security_xattr_handler = {
3244         .prefix = XATTR_SECURITY_PREFIX,
3245         .get = shmem_xattr_handler_get,
3246         .set = shmem_xattr_handler_set,
3247 };
3248
3249 static const struct xattr_handler shmem_trusted_xattr_handler = {
3250         .prefix = XATTR_TRUSTED_PREFIX,
3251         .get = shmem_xattr_handler_get,
3252         .set = shmem_xattr_handler_set,
3253 };
3254
3255 static const struct xattr_handler *shmem_xattr_handlers[] = {
3256 #ifdef CONFIG_TMPFS_POSIX_ACL
3257         &posix_acl_access_xattr_handler,
3258         &posix_acl_default_xattr_handler,
3259 #endif
3260         &shmem_security_xattr_handler,
3261         &shmem_trusted_xattr_handler,
3262         NULL
3263 };
3264
3265 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3266 {
3267         struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3268         return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3269 }
3270 #endif /* CONFIG_TMPFS_XATTR */
3271
3272 static const struct inode_operations shmem_short_symlink_operations = {
3273         .get_link       = simple_get_link,
3274 #ifdef CONFIG_TMPFS_XATTR
3275         .listxattr      = shmem_listxattr,
3276 #endif
3277 };
3278
3279 static const struct inode_operations shmem_symlink_inode_operations = {
3280         .get_link       = shmem_get_link,
3281 #ifdef CONFIG_TMPFS_XATTR
3282         .listxattr      = shmem_listxattr,
3283 #endif
3284 };
3285
3286 static struct dentry *shmem_get_parent(struct dentry *child)
3287 {
3288         return ERR_PTR(-ESTALE);
3289 }
3290
3291 static int shmem_match(struct inode *ino, void *vfh)
3292 {
3293         __u32 *fh = vfh;
3294         __u64 inum = fh[2];
3295         inum = (inum << 32) | fh[1];
3296         return ino->i_ino == inum && fh[0] == ino->i_generation;
3297 }
3298
3299 /* Find any alias of inode, but prefer a hashed alias */
3300 static struct dentry *shmem_find_alias(struct inode *inode)
3301 {
3302         struct dentry *alias = d_find_alias(inode);
3303
3304         return alias ?: d_find_any_alias(inode);
3305 }
3306
3307
3308 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3309                 struct fid *fid, int fh_len, int fh_type)
3310 {
3311         struct inode *inode;
3312         struct dentry *dentry = NULL;
3313         u64 inum;
3314
3315         if (fh_len < 3)
3316                 return NULL;
3317
3318         inum = fid->raw[2];
3319         inum = (inum << 32) | fid->raw[1];
3320
3321         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3322                         shmem_match, fid->raw);
3323         if (inode) {
3324                 dentry = shmem_find_alias(inode);
3325                 iput(inode);
3326         }
3327
3328         return dentry;
3329 }
3330
3331 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3332                                 struct inode *parent)
3333 {
3334         if (*len < 3) {
3335                 *len = 3;
3336                 return FILEID_INVALID;
3337         }
3338
3339         if (inode_unhashed(inode)) {
3340                 /* Unfortunately insert_inode_hash is not idempotent,
3341                  * so as we hash inodes here rather than at creation
3342                  * time, we need a lock to ensure we only try
3343                  * to do it once
3344                  */
3345                 static DEFINE_SPINLOCK(lock);
3346                 spin_lock(&lock);
3347                 if (inode_unhashed(inode))
3348                         __insert_inode_hash(inode,
3349                                             inode->i_ino + inode->i_generation);
3350                 spin_unlock(&lock);
3351         }
3352
3353         fh[0] = inode->i_generation;
3354         fh[1] = inode->i_ino;
3355         fh[2] = ((__u64)inode->i_ino) >> 32;
3356
3357         *len = 3;
3358         return 1;
3359 }
3360
3361 static const struct export_operations shmem_export_ops = {
3362         .get_parent     = shmem_get_parent,
3363         .encode_fh      = shmem_encode_fh,
3364         .fh_to_dentry   = shmem_fh_to_dentry,
3365 };
3366
3367 enum shmem_param {
3368         Opt_gid,
3369         Opt_huge,
3370         Opt_mode,
3371         Opt_mpol,
3372         Opt_nr_blocks,
3373         Opt_nr_inodes,
3374         Opt_size,
3375         Opt_uid,
3376         Opt_inode32,
3377         Opt_inode64,
3378 };
3379
3380 static const struct constant_table shmem_param_enums_huge[] = {
3381         {"never",       SHMEM_HUGE_NEVER },
3382         {"always",      SHMEM_HUGE_ALWAYS },
3383         {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3384         {"advise",      SHMEM_HUGE_ADVISE },
3385         {}
3386 };
3387
3388 const struct fs_parameter_spec shmem_fs_parameters[] = {
3389         fsparam_u32   ("gid",           Opt_gid),
3390         fsparam_enum  ("huge",          Opt_huge,  shmem_param_enums_huge),
3391         fsparam_u32oct("mode",          Opt_mode),
3392         fsparam_string("mpol",          Opt_mpol),
3393         fsparam_string("nr_blocks",     Opt_nr_blocks),
3394         fsparam_string("nr_inodes",     Opt_nr_inodes),
3395         fsparam_string("size",          Opt_size),
3396         fsparam_u32   ("uid",           Opt_uid),
3397         fsparam_flag  ("inode32",       Opt_inode32),
3398         fsparam_flag  ("inode64",       Opt_inode64),
3399         {}
3400 };
3401
3402 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3403 {
3404         struct shmem_options *ctx = fc->fs_private;
3405         struct fs_parse_result result;
3406         unsigned long long size;
3407         char *rest;
3408         int opt;
3409
3410         opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3411         if (opt < 0)
3412                 return opt;
3413
3414         switch (opt) {
3415         case Opt_size:
3416                 size = memparse(param->string, &rest);
3417                 if (*rest == '%') {
3418                         size <<= PAGE_SHIFT;
3419                         size *= totalram_pages();
3420                         do_div(size, 100);
3421                         rest++;
3422                 }
3423                 if (*rest)
3424                         goto bad_value;
3425                 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3426                 ctx->seen |= SHMEM_SEEN_BLOCKS;
3427                 break;
3428         case Opt_nr_blocks:
3429                 ctx->blocks = memparse(param->string, &rest);
3430                 if (*rest)
3431                         goto bad_value;
3432                 ctx->seen |= SHMEM_SEEN_BLOCKS;
3433                 break;
3434         case Opt_nr_inodes:
3435                 ctx->inodes = memparse(param->string, &rest);
3436                 if (*rest)
3437                         goto bad_value;
3438                 ctx->seen |= SHMEM_SEEN_INODES;
3439                 break;
3440         case Opt_mode:
3441                 ctx->mode = result.uint_32 & 07777;
3442                 break;
3443         case Opt_uid:
3444                 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3445                 if (!uid_valid(ctx->uid))
3446                         goto bad_value;
3447                 break;
3448         case Opt_gid:
3449                 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3450                 if (!gid_valid(ctx->gid))
3451                         goto bad_value;
3452                 break;
3453         case Opt_huge:
3454                 ctx->huge = result.uint_32;
3455                 if (ctx->huge != SHMEM_HUGE_NEVER &&
3456                     !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3457                       has_transparent_hugepage()))
3458                         goto unsupported_parameter;
3459                 ctx->seen |= SHMEM_SEEN_HUGE;
3460                 break;
3461         case Opt_mpol:
3462                 if (IS_ENABLED(CONFIG_NUMA)) {
3463                         mpol_put(ctx->mpol);
3464                         ctx->mpol = NULL;
3465                         if (mpol_parse_str(param->string, &ctx->mpol))
3466                                 goto bad_value;
3467                         break;
3468                 }
3469                 goto unsupported_parameter;
3470         case Opt_inode32:
3471                 ctx->full_inums = false;
3472                 ctx->seen |= SHMEM_SEEN_INUMS;
3473                 break;
3474         case Opt_inode64:
3475                 if (sizeof(ino_t) < 8) {
3476                         return invalfc(fc,
3477                                        "Cannot use inode64 with <64bit inums in kernel\n");
3478                 }
3479                 ctx->full_inums = true;
3480                 ctx->seen |= SHMEM_SEEN_INUMS;
3481                 break;
3482         }
3483         return 0;
3484
3485 unsupported_parameter:
3486         return invalfc(fc, "Unsupported parameter '%s'", param->key);
3487 bad_value:
3488         return invalfc(fc, "Bad value for '%s'", param->key);
3489 }
3490
3491 static int shmem_parse_options(struct fs_context *fc, void *data)
3492 {
3493         char *options = data;
3494
3495         if (options) {
3496                 int err = security_sb_eat_lsm_opts(options, &fc->security);
3497                 if (err)
3498                         return err;
3499         }
3500
3501         while (options != NULL) {
3502                 char *this_char = options;
3503                 for (;;) {
3504                         /*
3505                          * NUL-terminate this option: unfortunately,
3506                          * mount options form a comma-separated list,
3507                          * but mpol's nodelist may also contain commas.
3508                          */
3509                         options = strchr(options, ',');
3510                         if (options == NULL)
3511                                 break;
3512                         options++;
3513                         if (!isdigit(*options)) {
3514                                 options[-1] = '\0';
3515                                 break;
3516                         }
3517                 }
3518                 if (*this_char) {
3519                         char *value = strchr(this_char, '=');
3520                         size_t len = 0;
3521                         int err;
3522
3523                         if (value) {
3524                                 *value++ = '\0';
3525                                 len = strlen(value);
3526                         }
3527                         err = vfs_parse_fs_string(fc, this_char, value, len);
3528                         if (err < 0)
3529                                 return err;
3530                 }
3531         }
3532         return 0;
3533 }
3534
3535 /*
3536  * Reconfigure a shmem filesystem.
3537  *
3538  * Note that we disallow change from limited->unlimited blocks/inodes while any
3539  * are in use; but we must separately disallow unlimited->limited, because in
3540  * that case we have no record of how much is already in use.
3541  */
3542 static int shmem_reconfigure(struct fs_context *fc)
3543 {
3544         struct shmem_options *ctx = fc->fs_private;
3545         struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3546         unsigned long inodes;
3547         const char *err;
3548
3549         spin_lock(&sbinfo->stat_lock);
3550         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3551         if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3552                 if (!sbinfo->max_blocks) {
3553                         err = "Cannot retroactively limit size";
3554                         goto out;
3555                 }
3556                 if (percpu_counter_compare(&sbinfo->used_blocks,
3557                                            ctx->blocks) > 0) {
3558                         err = "Too small a size for current use";
3559                         goto out;
3560                 }
3561         }
3562         if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3563                 if (!sbinfo->max_inodes) {
3564                         err = "Cannot retroactively limit inodes";
3565                         goto out;
3566                 }
3567                 if (ctx->inodes < inodes) {
3568                         err = "Too few inodes for current use";
3569                         goto out;
3570                 }
3571         }
3572
3573         if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3574             sbinfo->next_ino > UINT_MAX) {
3575                 err = "Current inum too high to switch to 32-bit inums";
3576                 goto out;
3577         }
3578
3579         if (ctx->seen & SHMEM_SEEN_HUGE)
3580                 sbinfo->huge = ctx->huge;
3581         if (ctx->seen & SHMEM_SEEN_INUMS)
3582                 sbinfo->full_inums = ctx->full_inums;
3583         if (ctx->seen & SHMEM_SEEN_BLOCKS)
3584                 sbinfo->max_blocks  = ctx->blocks;
3585         if (ctx->seen & SHMEM_SEEN_INODES) {
3586                 sbinfo->max_inodes  = ctx->inodes;
3587                 sbinfo->free_inodes = ctx->inodes - inodes;
3588         }
3589
3590         /*
3591          * Preserve previous mempolicy unless mpol remount option was specified.
3592          */
3593         if (ctx->mpol) {
3594                 mpol_put(sbinfo->mpol);
3595                 sbinfo->mpol = ctx->mpol;       /* transfers initial ref */
3596                 ctx->mpol = NULL;
3597         }
3598         spin_unlock(&sbinfo->stat_lock);
3599         return 0;
3600 out:
3601         spin_unlock(&sbinfo->stat_lock);
3602         return invalfc(fc, "%s", err);
3603 }
3604
3605 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3606 {
3607         struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3608
3609         if (sbinfo->max_blocks != shmem_default_max_blocks())
3610                 seq_printf(seq, ",size=%luk",
3611                         sbinfo->max_blocks << (PAGE_SHIFT - 10));
3612         if (sbinfo->max_inodes != shmem_default_max_inodes())
3613                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3614         if (sbinfo->mode != (0777 | S_ISVTX))
3615                 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3616         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3617                 seq_printf(seq, ",uid=%u",
3618                                 from_kuid_munged(&init_user_ns, sbinfo->uid));
3619         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3620                 seq_printf(seq, ",gid=%u",
3621                                 from_kgid_munged(&init_user_ns, sbinfo->gid));
3622
3623         /*
3624          * Showing inode{64,32} might be useful even if it's the system default,
3625          * since then people don't have to resort to checking both here and
3626          * /proc/config.gz to confirm 64-bit inums were successfully applied
3627          * (which may not even exist if IKCONFIG_PROC isn't enabled).
3628          *
3629          * We hide it when inode64 isn't the default and we are using 32-bit
3630          * inodes, since that probably just means the feature isn't even under
3631          * consideration.
3632          *
3633          * As such:
3634          *
3635          *                     +-----------------+-----------------+
3636          *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3637          *  +------------------+-----------------+-----------------+
3638          *  | full_inums=true  | show            | show            |
3639          *  | full_inums=false | show            | hide            |
3640          *  +------------------+-----------------+-----------------+
3641          *
3642          */
3643         if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3644                 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3645 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3646         /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3647         if (sbinfo->huge)
3648                 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3649 #endif
3650         shmem_show_mpol(seq, sbinfo->mpol);
3651         return 0;
3652 }
3653
3654 #endif /* CONFIG_TMPFS */
3655
3656 static void shmem_put_super(struct super_block *sb)
3657 {
3658         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3659
3660         free_percpu(sbinfo->ino_batch);
3661         percpu_counter_destroy(&sbinfo->used_blocks);
3662         mpol_put(sbinfo->mpol);
3663         kfree(sbinfo);
3664         sb->s_fs_info = NULL;
3665 }
3666
3667 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3668 {
3669         struct shmem_options *ctx = fc->fs_private;
3670         struct inode *inode;
3671         struct shmem_sb_info *sbinfo;
3672         int err = -ENOMEM;
3673
3674         /* Round up to L1_CACHE_BYTES to resist false sharing */
3675         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3676                                 L1_CACHE_BYTES), GFP_KERNEL);
3677         if (!sbinfo)
3678                 return -ENOMEM;
3679
3680         sb->s_fs_info = sbinfo;
3681
3682 #ifdef CONFIG_TMPFS
3683         /*
3684          * Per default we only allow half of the physical ram per
3685          * tmpfs instance, limiting inodes to one per page of lowmem;
3686          * but the internal instance is left unlimited.
3687          */
3688         if (!(sb->s_flags & SB_KERNMOUNT)) {
3689                 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3690                         ctx->blocks = shmem_default_max_blocks();
3691                 if (!(ctx->seen & SHMEM_SEEN_INODES))
3692                         ctx->inodes = shmem_default_max_inodes();
3693                 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3694                         ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3695         } else {
3696                 sb->s_flags |= SB_NOUSER;
3697         }
3698         sb->s_export_op = &shmem_export_ops;
3699         sb->s_flags |= SB_NOSEC;
3700 #else
3701         sb->s_flags |= SB_NOUSER;
3702 #endif
3703         sbinfo->max_blocks = ctx->blocks;
3704         sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3705         if (sb->s_flags & SB_KERNMOUNT) {
3706                 sbinfo->ino_batch = alloc_percpu(ino_t);
3707                 if (!sbinfo->ino_batch)
3708                         goto failed;
3709         }
3710         sbinfo->uid = ctx->uid;
3711         sbinfo->gid = ctx->gid;
3712         sbinfo->full_inums = ctx->full_inums;
3713         sbinfo->mode = ctx->mode;
3714         sbinfo->huge = ctx->huge;
3715         sbinfo->mpol = ctx->mpol;
3716         ctx->mpol = NULL;
3717
3718         spin_lock_init(&sbinfo->stat_lock);
3719         if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3720                 goto failed;
3721         spin_lock_init(&sbinfo->shrinklist_lock);
3722         INIT_LIST_HEAD(&sbinfo->shrinklist);
3723
3724         sb->s_maxbytes = MAX_LFS_FILESIZE;
3725         sb->s_blocksize = PAGE_SIZE;
3726         sb->s_blocksize_bits = PAGE_SHIFT;
3727         sb->s_magic = TMPFS_MAGIC;
3728         sb->s_op = &shmem_ops;
3729         sb->s_time_gran = 1;
3730 #ifdef CONFIG_TMPFS_XATTR
3731         sb->s_xattr = shmem_xattr_handlers;
3732 #endif
3733 #ifdef CONFIG_TMPFS_POSIX_ACL
3734         sb->s_flags |= SB_POSIXACL;
3735 #endif
3736         uuid_gen(&sb->s_uuid);
3737
3738         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3739         if (!inode)
3740                 goto failed;
3741         inode->i_uid = sbinfo->uid;
3742         inode->i_gid = sbinfo->gid;
3743         sb->s_root = d_make_root(inode);
3744         if (!sb->s_root)
3745                 goto failed;
3746         return 0;
3747
3748 failed:
3749         shmem_put_super(sb);
3750         return err;
3751 }
3752
3753 static int shmem_get_tree(struct fs_context *fc)
3754 {
3755         return get_tree_nodev(fc, shmem_fill_super);
3756 }
3757
3758 static void shmem_free_fc(struct fs_context *fc)
3759 {
3760         struct shmem_options *ctx = fc->fs_private;
3761
3762         if (ctx) {
3763                 mpol_put(ctx->mpol);
3764                 kfree(ctx);
3765         }
3766 }
3767
3768 static const struct fs_context_operations shmem_fs_context_ops = {
3769         .free                   = shmem_free_fc,
3770         .get_tree               = shmem_get_tree,
3771 #ifdef CONFIG_TMPFS
3772         .parse_monolithic       = shmem_parse_options,
3773         .parse_param            = shmem_parse_one,
3774         .reconfigure            = shmem_reconfigure,
3775 #endif
3776 };
3777
3778 static struct kmem_cache *shmem_inode_cachep;
3779
3780 static struct inode *shmem_alloc_inode(struct super_block *sb)
3781 {
3782         struct shmem_inode_info *info;
3783         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3784         if (!info)
3785                 return NULL;
3786         return &info->vfs_inode;
3787 }
3788
3789 static void shmem_free_in_core_inode(struct inode *inode)
3790 {
3791         if (S_ISLNK(inode->i_mode))
3792                 kfree(inode->i_link);
3793         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3794 }
3795
3796 static void shmem_destroy_inode(struct inode *inode)
3797 {
3798         if (S_ISREG(inode->i_mode))
3799                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3800 }
3801
3802 static void shmem_init_inode(void *foo)
3803 {
3804         struct shmem_inode_info *info = foo;
3805         inode_init_once(&info->vfs_inode);
3806 }
3807
3808 static void shmem_init_inodecache(void)
3809 {
3810         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3811                                 sizeof(struct shmem_inode_info),
3812                                 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3813 }
3814
3815 static void shmem_destroy_inodecache(void)
3816 {
3817         kmem_cache_destroy(shmem_inode_cachep);
3818 }
3819
3820 const struct address_space_operations shmem_aops = {
3821         .writepage      = shmem_writepage,
3822         .set_page_dirty = __set_page_dirty_no_writeback,
3823 #ifdef CONFIG_TMPFS
3824         .write_begin    = shmem_write_begin,
3825         .write_end      = shmem_write_end,
3826 #endif
3827 #ifdef CONFIG_MIGRATION
3828         .migratepage    = migrate_page,
3829 #endif
3830         .error_remove_page = generic_error_remove_page,
3831 };
3832 EXPORT_SYMBOL(shmem_aops);
3833
3834 static const struct file_operations shmem_file_operations = {
3835         .mmap           = shmem_mmap,
3836         .get_unmapped_area = shmem_get_unmapped_area,
3837 #ifdef CONFIG_TMPFS
3838         .llseek         = shmem_file_llseek,
3839         .read_iter      = shmem_file_read_iter,
3840         .write_iter     = generic_file_write_iter,
3841         .fsync          = noop_fsync,
3842         .splice_read    = generic_file_splice_read,
3843         .splice_write   = iter_file_splice_write,
3844         .fallocate      = shmem_fallocate,
3845 #endif
3846 };
3847
3848 static const struct inode_operations shmem_inode_operations = {
3849         .getattr        = shmem_getattr,
3850         .setattr        = shmem_setattr,
3851 #ifdef CONFIG_TMPFS_XATTR
3852         .listxattr      = shmem_listxattr,
3853         .set_acl        = simple_set_acl,
3854 #endif
3855 };
3856
3857 static const struct inode_operations shmem_dir_inode_operations = {
3858 #ifdef CONFIG_TMPFS
3859         .create         = shmem_create,
3860         .lookup         = simple_lookup,
3861         .link           = shmem_link,
3862         .unlink         = shmem_unlink,
3863         .symlink        = shmem_symlink,
3864         .mkdir          = shmem_mkdir,
3865         .rmdir          = shmem_rmdir,
3866         .mknod          = shmem_mknod,
3867         .rename         = shmem_rename2,
3868         .tmpfile        = shmem_tmpfile,
3869 #endif
3870 #ifdef CONFIG_TMPFS_XATTR
3871         .listxattr      = shmem_listxattr,
3872 #endif
3873 #ifdef CONFIG_TMPFS_POSIX_ACL
3874         .setattr        = shmem_setattr,
3875         .set_acl        = simple_set_acl,
3876 #endif
3877 };
3878
3879 static const struct inode_operations shmem_special_inode_operations = {
3880 #ifdef CONFIG_TMPFS_XATTR
3881         .listxattr      = shmem_listxattr,
3882 #endif
3883 #ifdef CONFIG_TMPFS_POSIX_ACL
3884         .setattr        = shmem_setattr,
3885         .set_acl        = simple_set_acl,
3886 #endif
3887 };
3888
3889 static const struct super_operations shmem_ops = {
3890         .alloc_inode    = shmem_alloc_inode,
3891         .free_inode     = shmem_free_in_core_inode,
3892         .destroy_inode  = shmem_destroy_inode,
3893 #ifdef CONFIG_TMPFS
3894         .statfs         = shmem_statfs,
3895         .show_options   = shmem_show_options,
3896 #endif
3897         .evict_inode    = shmem_evict_inode,
3898         .drop_inode     = generic_delete_inode,
3899         .put_super      = shmem_put_super,
3900 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3901         .nr_cached_objects      = shmem_unused_huge_count,
3902         .free_cached_objects    = shmem_unused_huge_scan,
3903 #endif
3904 };
3905
3906 static const struct vm_operations_struct shmem_vm_ops = {
3907         .fault          = shmem_fault,
3908         .map_pages      = filemap_map_pages,
3909 #ifdef CONFIG_NUMA
3910         .set_policy     = shmem_set_policy,
3911         .get_policy     = shmem_get_policy,
3912 #endif
3913 };
3914
3915 int shmem_init_fs_context(struct fs_context *fc)
3916 {
3917         struct shmem_options *ctx;
3918
3919         ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3920         if (!ctx)
3921                 return -ENOMEM;
3922
3923         ctx->mode = 0777 | S_ISVTX;
3924         ctx->uid = current_fsuid();
3925         ctx->gid = current_fsgid();
3926
3927         fc->fs_private = ctx;
3928         fc->ops = &shmem_fs_context_ops;
3929         return 0;
3930 }
3931
3932 static struct file_system_type shmem_fs_type = {
3933         .owner          = THIS_MODULE,
3934         .name           = "tmpfs",
3935         .init_fs_context = shmem_init_fs_context,
3936 #ifdef CONFIG_TMPFS
3937         .parameters     = shmem_fs_parameters,
3938 #endif
3939         .kill_sb        = kill_litter_super,
3940         .fs_flags       = FS_USERNS_MOUNT | FS_THP_SUPPORT,
3941 };
3942
3943 int __init shmem_init(void)
3944 {
3945         int error;
3946
3947         shmem_init_inodecache();
3948
3949         error = register_filesystem(&shmem_fs_type);
3950         if (error) {
3951                 pr_err("Could not register tmpfs\n");
3952                 goto out2;
3953         }
3954
3955         shm_mnt = kern_mount(&shmem_fs_type);
3956         if (IS_ERR(shm_mnt)) {
3957                 error = PTR_ERR(shm_mnt);
3958                 pr_err("Could not kern_mount tmpfs\n");
3959                 goto out1;
3960         }
3961
3962 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3963         if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3964                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3965         else
3966                 shmem_huge = 0; /* just in case it was patched */
3967 #endif
3968         return 0;
3969
3970 out1:
3971         unregister_filesystem(&shmem_fs_type);
3972 out2:
3973         shmem_destroy_inodecache();
3974         shm_mnt = ERR_PTR(error);
3975         return error;
3976 }
3977
3978 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
3979 static ssize_t shmem_enabled_show(struct kobject *kobj,
3980                                   struct kobj_attribute *attr, char *buf)
3981 {
3982         static const int values[] = {
3983                 SHMEM_HUGE_ALWAYS,
3984                 SHMEM_HUGE_WITHIN_SIZE,
3985                 SHMEM_HUGE_ADVISE,
3986                 SHMEM_HUGE_NEVER,
3987                 SHMEM_HUGE_DENY,
3988                 SHMEM_HUGE_FORCE,
3989         };
3990         int len = 0;
3991         int i;
3992
3993         for (i = 0; i < ARRAY_SIZE(values); i++) {
3994                 len += sysfs_emit_at(buf, len,
3995                                      shmem_huge == values[i] ? "%s[%s]" : "%s%s",
3996                                      i ? " " : "",
3997                                      shmem_format_huge(values[i]));
3998         }
3999
4000         len += sysfs_emit_at(buf, len, "\n");
4001
4002         return len;
4003 }
4004
4005 static ssize_t shmem_enabled_store(struct kobject *kobj,
4006                 struct kobj_attribute *attr, const char *buf, size_t count)
4007 {
4008         char tmp[16];
4009         int huge;
4010
4011         if (count + 1 > sizeof(tmp))
4012                 return -EINVAL;
4013         memcpy(tmp, buf, count);
4014         tmp[count] = '\0';
4015         if (count && tmp[count - 1] == '\n')
4016                 tmp[count - 1] = '\0';
4017
4018         huge = shmem_parse_huge(tmp);
4019         if (huge == -EINVAL)
4020                 return -EINVAL;
4021         if (!has_transparent_hugepage() &&
4022                         huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4023                 return -EINVAL;
4024
4025         shmem_huge = huge;
4026         if (shmem_huge > SHMEM_HUGE_DENY)
4027                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4028         return count;
4029 }
4030
4031 struct kobj_attribute shmem_enabled_attr =
4032         __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4033 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4034
4035 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4036 bool shmem_huge_enabled(struct vm_area_struct *vma)
4037 {
4038         struct inode *inode = file_inode(vma->vm_file);
4039         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4040         loff_t i_size;
4041         pgoff_t off;
4042
4043         if ((vma->vm_flags & VM_NOHUGEPAGE) ||
4044             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
4045                 return false;
4046         if (shmem_huge == SHMEM_HUGE_FORCE)
4047                 return true;
4048         if (shmem_huge == SHMEM_HUGE_DENY)
4049                 return false;
4050         switch (sbinfo->huge) {
4051                 case SHMEM_HUGE_NEVER:
4052                         return false;
4053                 case SHMEM_HUGE_ALWAYS:
4054                         return true;
4055                 case SHMEM_HUGE_WITHIN_SIZE:
4056                         off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4057                         i_size = round_up(i_size_read(inode), PAGE_SIZE);
4058                         if (i_size >= HPAGE_PMD_SIZE &&
4059                                         i_size >> PAGE_SHIFT >= off)
4060                                 return true;
4061                         fallthrough;
4062                 case SHMEM_HUGE_ADVISE:
4063                         /* TODO: implement fadvise() hints */
4064                         return (vma->vm_flags & VM_HUGEPAGE);
4065                 default:
4066                         VM_BUG_ON(1);
4067                         return false;
4068         }
4069 }
4070 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4071
4072 #else /* !CONFIG_SHMEM */
4073
4074 /*
4075  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4076  *
4077  * This is intended for small system where the benefits of the full
4078  * shmem code (swap-backed and resource-limited) are outweighed by
4079  * their complexity. On systems without swap this code should be
4080  * effectively equivalent, but much lighter weight.
4081  */
4082
4083 static struct file_system_type shmem_fs_type = {
4084         .name           = "tmpfs",
4085         .init_fs_context = ramfs_init_fs_context,
4086         .parameters     = ramfs_fs_parameters,
4087         .kill_sb        = kill_litter_super,
4088         .fs_flags       = FS_USERNS_MOUNT,
4089 };
4090
4091 int __init shmem_init(void)
4092 {
4093         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4094
4095         shm_mnt = kern_mount(&shmem_fs_type);
4096         BUG_ON(IS_ERR(shm_mnt));
4097
4098         return 0;
4099 }
4100
4101 int shmem_unuse(unsigned int type, bool frontswap,
4102                 unsigned long *fs_pages_to_unuse)
4103 {
4104         return 0;
4105 }
4106
4107 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4108 {
4109         return 0;
4110 }
4111
4112 void shmem_unlock_mapping(struct address_space *mapping)
4113 {
4114 }
4115
4116 #ifdef CONFIG_MMU
4117 unsigned long shmem_get_unmapped_area(struct file *file,
4118                                       unsigned long addr, unsigned long len,
4119                                       unsigned long pgoff, unsigned long flags)
4120 {
4121         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4122 }
4123 #endif
4124
4125 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4126 {
4127         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4128 }
4129 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4130
4131 #define shmem_vm_ops                            generic_file_vm_ops
4132 #define shmem_file_operations                   ramfs_file_operations
4133 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
4134 #define shmem_acct_size(flags, size)            0
4135 #define shmem_unacct_size(flags, size)          do {} while (0)
4136
4137 #endif /* CONFIG_SHMEM */
4138
4139 /* common code */
4140
4141 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4142                                        unsigned long flags, unsigned int i_flags)
4143 {
4144         struct inode *inode;
4145         struct file *res;
4146
4147         if (IS_ERR(mnt))
4148                 return ERR_CAST(mnt);
4149
4150         if (size < 0 || size > MAX_LFS_FILESIZE)
4151                 return ERR_PTR(-EINVAL);
4152
4153         if (shmem_acct_size(flags, size))
4154                 return ERR_PTR(-ENOMEM);
4155
4156         inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4157                                 flags);
4158         if (unlikely(!inode)) {
4159                 shmem_unacct_size(flags, size);
4160                 return ERR_PTR(-ENOSPC);
4161         }
4162         inode->i_flags |= i_flags;
4163         inode->i_size = size;
4164         clear_nlink(inode);     /* It is unlinked */
4165         res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4166         if (!IS_ERR(res))
4167                 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4168                                 &shmem_file_operations);
4169         if (IS_ERR(res))
4170                 iput(inode);
4171         return res;
4172 }
4173
4174 /**
4175  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4176  *      kernel internal.  There will be NO LSM permission checks against the
4177  *      underlying inode.  So users of this interface must do LSM checks at a
4178  *      higher layer.  The users are the big_key and shm implementations.  LSM
4179  *      checks are provided at the key or shm level rather than the inode.
4180  * @name: name for dentry (to be seen in /proc/<pid>/maps
4181  * @size: size to be set for the file
4182  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4183  */
4184 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4185 {
4186         return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4187 }
4188
4189 /**
4190  * shmem_file_setup - get an unlinked file living in tmpfs
4191  * @name: name for dentry (to be seen in /proc/<pid>/maps
4192  * @size: size to be set for the file
4193  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4194  */
4195 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4196 {
4197         return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4198 }
4199 EXPORT_SYMBOL_GPL(shmem_file_setup);
4200
4201 /**
4202  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4203  * @mnt: the tmpfs mount where the file will be created
4204  * @name: name for dentry (to be seen in /proc/<pid>/maps
4205  * @size: size to be set for the file
4206  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4207  */
4208 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4209                                        loff_t size, unsigned long flags)
4210 {
4211         return __shmem_file_setup(mnt, name, size, flags, 0);
4212 }
4213 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4214
4215 /**
4216  * shmem_zero_setup - setup a shared anonymous mapping
4217  * @vma: the vma to be mmapped is prepared by do_mmap
4218  */
4219 int shmem_zero_setup(struct vm_area_struct *vma)
4220 {
4221         struct file *file;
4222         loff_t size = vma->vm_end - vma->vm_start;
4223
4224         /*
4225          * Cloning a new file under mmap_lock leads to a lock ordering conflict
4226          * between XFS directory reading and selinux: since this file is only
4227          * accessible to the user through its mapping, use S_PRIVATE flag to
4228          * bypass file security, in the same way as shmem_kernel_file_setup().
4229          */
4230         file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4231         if (IS_ERR(file))
4232                 return PTR_ERR(file);
4233
4234         if (vma->vm_file)
4235                 fput(vma->vm_file);
4236         vma->vm_file = file;
4237         vma->vm_ops = &shmem_vm_ops;
4238
4239         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4240                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4241                         (vma->vm_end & HPAGE_PMD_MASK)) {
4242                 khugepaged_enter(vma, vma->vm_flags);
4243         }
4244
4245         return 0;
4246 }
4247
4248 /**
4249  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4250  * @mapping:    the page's address_space
4251  * @index:      the page index
4252  * @gfp:        the page allocator flags to use if allocating
4253  *
4254  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4255  * with any new page allocations done using the specified allocation flags.
4256  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4257  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4258  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4259  *
4260  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4261  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4262  */
4263 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4264                                          pgoff_t index, gfp_t gfp)
4265 {
4266 #ifdef CONFIG_SHMEM
4267         struct inode *inode = mapping->host;
4268         struct page *page;
4269         int error;
4270
4271         BUG_ON(!shmem_mapping(mapping));
4272         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4273                                   gfp, NULL, NULL, NULL);
4274         if (error)
4275                 page = ERR_PTR(error);
4276         else
4277                 unlock_page(page);
4278         return page;
4279 #else
4280         /*
4281          * The tiny !SHMEM case uses ramfs without swap
4282          */
4283         return read_cache_page_gfp(mapping, index, gfp);
4284 #endif
4285 }
4286 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);