ext4: Move orphan inode handling into a separate file
[linux-2.6-microblaze.git] / fs / ext4 / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/super.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  from
11  *
12  *  linux/fs/minix/inode.c
13  *
14  *  Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  *  Big-endian to little-endian byte-swapping/bitmaps by
17  *        David S. Miller (davem@caip.rutgers.edu), 1995
18  */
19
20 #include <linux/module.h>
21 #include <linux/string.h>
22 #include <linux/fs.h>
23 #include <linux/time.h>
24 #include <linux/vmalloc.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/parser.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/vfs.h>
33 #include <linux/random.h>
34 #include <linux/mount.h>
35 #include <linux/namei.h>
36 #include <linux/quotaops.h>
37 #include <linux/seq_file.h>
38 #include <linux/ctype.h>
39 #include <linux/log2.h>
40 #include <linux/crc16.h>
41 #include <linux/dax.h>
42 #include <linux/cleancache.h>
43 #include <linux/uaccess.h>
44 #include <linux/iversion.h>
45 #include <linux/unicode.h>
46 #include <linux/part_stat.h>
47 #include <linux/kthread.h>
48 #include <linux/freezer.h>
49
50 #include "ext4.h"
51 #include "ext4_extents.h"       /* Needed for trace points definition */
52 #include "ext4_jbd2.h"
53 #include "xattr.h"
54 #include "acl.h"
55 #include "mballoc.h"
56 #include "fsmap.h"
57
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/ext4.h>
60
61 static struct ext4_lazy_init *ext4_li_info;
62 static DEFINE_MUTEX(ext4_li_mtx);
63 static struct ratelimit_state ext4_mount_msg_ratelimit;
64
65 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
66                              unsigned long journal_devnum);
67 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
68 static void ext4_update_super(struct super_block *sb);
69 static int ext4_commit_super(struct super_block *sb);
70 static int ext4_mark_recovery_complete(struct super_block *sb,
71                                         struct ext4_super_block *es);
72 static int ext4_clear_journal_err(struct super_block *sb,
73                                   struct ext4_super_block *es);
74 static int ext4_sync_fs(struct super_block *sb, int wait);
75 static int ext4_remount(struct super_block *sb, int *flags, char *data);
76 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
77 static int ext4_unfreeze(struct super_block *sb);
78 static int ext4_freeze(struct super_block *sb);
79 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
80                        const char *dev_name, void *data);
81 static inline int ext2_feature_set_ok(struct super_block *sb);
82 static inline int ext3_feature_set_ok(struct super_block *sb);
83 static void ext4_destroy_lazyinit_thread(void);
84 static void ext4_unregister_li_request(struct super_block *sb);
85 static void ext4_clear_request_list(void);
86 static struct inode *ext4_get_journal_inode(struct super_block *sb,
87                                             unsigned int journal_inum);
88
89 /*
90  * Lock ordering
91  *
92  * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
93  * i_mmap_rwsem (inode->i_mmap_rwsem)!
94  *
95  * page fault path:
96  * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
97  *   page lock -> i_data_sem (rw)
98  *
99  * buffered write path:
100  * sb_start_write -> i_mutex -> mmap_lock
101  * sb_start_write -> i_mutex -> transaction start -> page lock ->
102  *   i_data_sem (rw)
103  *
104  * truncate:
105  * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
106  * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
107  *   i_data_sem (rw)
108  *
109  * direct IO:
110  * sb_start_write -> i_mutex -> mmap_lock
111  * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
112  *
113  * writepages:
114  * transaction start -> page lock(s) -> i_data_sem (rw)
115  */
116
117 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
118 static struct file_system_type ext2_fs_type = {
119         .owner          = THIS_MODULE,
120         .name           = "ext2",
121         .mount          = ext4_mount,
122         .kill_sb        = kill_block_super,
123         .fs_flags       = FS_REQUIRES_DEV,
124 };
125 MODULE_ALIAS_FS("ext2");
126 MODULE_ALIAS("ext2");
127 #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
128 #else
129 #define IS_EXT2_SB(sb) (0)
130 #endif
131
132
133 static struct file_system_type ext3_fs_type = {
134         .owner          = THIS_MODULE,
135         .name           = "ext3",
136         .mount          = ext4_mount,
137         .kill_sb        = kill_block_super,
138         .fs_flags       = FS_REQUIRES_DEV,
139 };
140 MODULE_ALIAS_FS("ext3");
141 MODULE_ALIAS("ext3");
142 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
143
144
145 static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
146                                   bh_end_io_t *end_io)
147 {
148         /*
149          * buffer's verified bit is no longer valid after reading from
150          * disk again due to write out error, clear it to make sure we
151          * recheck the buffer contents.
152          */
153         clear_buffer_verified(bh);
154
155         bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
156         get_bh(bh);
157         submit_bh(REQ_OP_READ, op_flags, bh);
158 }
159
160 void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
161                          bh_end_io_t *end_io)
162 {
163         BUG_ON(!buffer_locked(bh));
164
165         if (ext4_buffer_uptodate(bh)) {
166                 unlock_buffer(bh);
167                 return;
168         }
169         __ext4_read_bh(bh, op_flags, end_io);
170 }
171
172 int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
173 {
174         BUG_ON(!buffer_locked(bh));
175
176         if (ext4_buffer_uptodate(bh)) {
177                 unlock_buffer(bh);
178                 return 0;
179         }
180
181         __ext4_read_bh(bh, op_flags, end_io);
182
183         wait_on_buffer(bh);
184         if (buffer_uptodate(bh))
185                 return 0;
186         return -EIO;
187 }
188
189 int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
190 {
191         if (trylock_buffer(bh)) {
192                 if (wait)
193                         return ext4_read_bh(bh, op_flags, NULL);
194                 ext4_read_bh_nowait(bh, op_flags, NULL);
195                 return 0;
196         }
197         if (wait) {
198                 wait_on_buffer(bh);
199                 if (buffer_uptodate(bh))
200                         return 0;
201                 return -EIO;
202         }
203         return 0;
204 }
205
206 /*
207  * This works like __bread_gfp() except it uses ERR_PTR for error
208  * returns.  Currently with sb_bread it's impossible to distinguish
209  * between ENOMEM and EIO situations (since both result in a NULL
210  * return.
211  */
212 static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
213                                                sector_t block, int op_flags,
214                                                gfp_t gfp)
215 {
216         struct buffer_head *bh;
217         int ret;
218
219         bh = sb_getblk_gfp(sb, block, gfp);
220         if (bh == NULL)
221                 return ERR_PTR(-ENOMEM);
222         if (ext4_buffer_uptodate(bh))
223                 return bh;
224
225         ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
226         if (ret) {
227                 put_bh(bh);
228                 return ERR_PTR(ret);
229         }
230         return bh;
231 }
232
233 struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
234                                    int op_flags)
235 {
236         return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE);
237 }
238
239 struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
240                                             sector_t block)
241 {
242         return __ext4_sb_bread_gfp(sb, block, 0, 0);
243 }
244
245 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
246 {
247         struct buffer_head *bh = sb_getblk_gfp(sb, block, 0);
248
249         if (likely(bh)) {
250                 ext4_read_bh_lock(bh, REQ_RAHEAD, false);
251                 brelse(bh);
252         }
253 }
254
255 static int ext4_verify_csum_type(struct super_block *sb,
256                                  struct ext4_super_block *es)
257 {
258         if (!ext4_has_feature_metadata_csum(sb))
259                 return 1;
260
261         return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
262 }
263
264 static __le32 ext4_superblock_csum(struct super_block *sb,
265                                    struct ext4_super_block *es)
266 {
267         struct ext4_sb_info *sbi = EXT4_SB(sb);
268         int offset = offsetof(struct ext4_super_block, s_checksum);
269         __u32 csum;
270
271         csum = ext4_chksum(sbi, ~0, (char *)es, offset);
272
273         return cpu_to_le32(csum);
274 }
275
276 static int ext4_superblock_csum_verify(struct super_block *sb,
277                                        struct ext4_super_block *es)
278 {
279         if (!ext4_has_metadata_csum(sb))
280                 return 1;
281
282         return es->s_checksum == ext4_superblock_csum(sb, es);
283 }
284
285 void ext4_superblock_csum_set(struct super_block *sb)
286 {
287         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
288
289         if (!ext4_has_metadata_csum(sb))
290                 return;
291
292         es->s_checksum = ext4_superblock_csum(sb, es);
293 }
294
295 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
296                                struct ext4_group_desc *bg)
297 {
298         return le32_to_cpu(bg->bg_block_bitmap_lo) |
299                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
300                  (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
301 }
302
303 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
304                                struct ext4_group_desc *bg)
305 {
306         return le32_to_cpu(bg->bg_inode_bitmap_lo) |
307                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
308                  (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
309 }
310
311 ext4_fsblk_t ext4_inode_table(struct super_block *sb,
312                               struct ext4_group_desc *bg)
313 {
314         return le32_to_cpu(bg->bg_inode_table_lo) |
315                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
316                  (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
317 }
318
319 __u32 ext4_free_group_clusters(struct super_block *sb,
320                                struct ext4_group_desc *bg)
321 {
322         return le16_to_cpu(bg->bg_free_blocks_count_lo) |
323                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
324                  (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
325 }
326
327 __u32 ext4_free_inodes_count(struct super_block *sb,
328                               struct ext4_group_desc *bg)
329 {
330         return le16_to_cpu(bg->bg_free_inodes_count_lo) |
331                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
332                  (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
333 }
334
335 __u32 ext4_used_dirs_count(struct super_block *sb,
336                               struct ext4_group_desc *bg)
337 {
338         return le16_to_cpu(bg->bg_used_dirs_count_lo) |
339                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
340                  (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
341 }
342
343 __u32 ext4_itable_unused_count(struct super_block *sb,
344                               struct ext4_group_desc *bg)
345 {
346         return le16_to_cpu(bg->bg_itable_unused_lo) |
347                 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
348                  (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
349 }
350
351 void ext4_block_bitmap_set(struct super_block *sb,
352                            struct ext4_group_desc *bg, ext4_fsblk_t blk)
353 {
354         bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
355         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
356                 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
357 }
358
359 void ext4_inode_bitmap_set(struct super_block *sb,
360                            struct ext4_group_desc *bg, ext4_fsblk_t blk)
361 {
362         bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
363         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
364                 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
365 }
366
367 void ext4_inode_table_set(struct super_block *sb,
368                           struct ext4_group_desc *bg, ext4_fsblk_t blk)
369 {
370         bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
371         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
372                 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
373 }
374
375 void ext4_free_group_clusters_set(struct super_block *sb,
376                                   struct ext4_group_desc *bg, __u32 count)
377 {
378         bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
379         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
380                 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
381 }
382
383 void ext4_free_inodes_set(struct super_block *sb,
384                           struct ext4_group_desc *bg, __u32 count)
385 {
386         bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
387         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
388                 bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
389 }
390
391 void ext4_used_dirs_set(struct super_block *sb,
392                           struct ext4_group_desc *bg, __u32 count)
393 {
394         bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
395         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
396                 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
397 }
398
399 void ext4_itable_unused_set(struct super_block *sb,
400                           struct ext4_group_desc *bg, __u32 count)
401 {
402         bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
403         if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
404                 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
405 }
406
407 static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now)
408 {
409         now = clamp_val(now, 0, (1ull << 40) - 1);
410
411         *lo = cpu_to_le32(lower_32_bits(now));
412         *hi = upper_32_bits(now);
413 }
414
415 static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
416 {
417         return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
418 }
419 #define ext4_update_tstamp(es, tstamp) \
420         __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \
421                              ktime_get_real_seconds())
422 #define ext4_get_tstamp(es, tstamp) \
423         __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
424
425 /*
426  * The del_gendisk() function uninitializes the disk-specific data
427  * structures, including the bdi structure, without telling anyone
428  * else.  Once this happens, any attempt to call mark_buffer_dirty()
429  * (for example, by ext4_commit_super), will cause a kernel OOPS.
430  * This is a kludge to prevent these oops until we can put in a proper
431  * hook in del_gendisk() to inform the VFS and file system layers.
432  */
433 static int block_device_ejected(struct super_block *sb)
434 {
435         struct inode *bd_inode = sb->s_bdev->bd_inode;
436         struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
437
438         return bdi->dev == NULL;
439 }
440
441 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
442 {
443         struct super_block              *sb = journal->j_private;
444         struct ext4_sb_info             *sbi = EXT4_SB(sb);
445         int                             error = is_journal_aborted(journal);
446         struct ext4_journal_cb_entry    *jce;
447
448         BUG_ON(txn->t_state == T_FINISHED);
449
450         ext4_process_freed_data(sb, txn->t_tid);
451
452         spin_lock(&sbi->s_md_lock);
453         while (!list_empty(&txn->t_private_list)) {
454                 jce = list_entry(txn->t_private_list.next,
455                                  struct ext4_journal_cb_entry, jce_list);
456                 list_del_init(&jce->jce_list);
457                 spin_unlock(&sbi->s_md_lock);
458                 jce->jce_func(sb, jce, error);
459                 spin_lock(&sbi->s_md_lock);
460         }
461         spin_unlock(&sbi->s_md_lock);
462 }
463
464 /*
465  * This writepage callback for write_cache_pages()
466  * takes care of a few cases after page cleaning.
467  *
468  * write_cache_pages() already checks for dirty pages
469  * and calls clear_page_dirty_for_io(), which we want,
470  * to write protect the pages.
471  *
472  * However, we may have to redirty a page (see below.)
473  */
474 static int ext4_journalled_writepage_callback(struct page *page,
475                                               struct writeback_control *wbc,
476                                               void *data)
477 {
478         transaction_t *transaction = (transaction_t *) data;
479         struct buffer_head *bh, *head;
480         struct journal_head *jh;
481
482         bh = head = page_buffers(page);
483         do {
484                 /*
485                  * We have to redirty a page in these cases:
486                  * 1) If buffer is dirty, it means the page was dirty because it
487                  * contains a buffer that needs checkpointing. So the dirty bit
488                  * needs to be preserved so that checkpointing writes the buffer
489                  * properly.
490                  * 2) If buffer is not part of the committing transaction
491                  * (we may have just accidentally come across this buffer because
492                  * inode range tracking is not exact) or if the currently running
493                  * transaction already contains this buffer as well, dirty bit
494                  * needs to be preserved so that the buffer gets writeprotected
495                  * properly on running transaction's commit.
496                  */
497                 jh = bh2jh(bh);
498                 if (buffer_dirty(bh) ||
499                     (jh && (jh->b_transaction != transaction ||
500                             jh->b_next_transaction))) {
501                         redirty_page_for_writepage(wbc, page);
502                         goto out;
503                 }
504         } while ((bh = bh->b_this_page) != head);
505
506 out:
507         return AOP_WRITEPAGE_ACTIVATE;
508 }
509
510 static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
511 {
512         struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
513         struct writeback_control wbc = {
514                 .sync_mode =  WB_SYNC_ALL,
515                 .nr_to_write = LONG_MAX,
516                 .range_start = jinode->i_dirty_start,
517                 .range_end = jinode->i_dirty_end,
518         };
519
520         return write_cache_pages(mapping, &wbc,
521                                  ext4_journalled_writepage_callback,
522                                  jinode->i_transaction);
523 }
524
525 static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
526 {
527         int ret;
528
529         if (ext4_should_journal_data(jinode->i_vfs_inode))
530                 ret = ext4_journalled_submit_inode_data_buffers(jinode);
531         else
532                 ret = jbd2_journal_submit_inode_data_buffers(jinode);
533
534         return ret;
535 }
536
537 static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
538 {
539         int ret = 0;
540
541         if (!ext4_should_journal_data(jinode->i_vfs_inode))
542                 ret = jbd2_journal_finish_inode_data_buffers(jinode);
543
544         return ret;
545 }
546
547 static bool system_going_down(void)
548 {
549         return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
550                 || system_state == SYSTEM_RESTART;
551 }
552
553 struct ext4_err_translation {
554         int code;
555         int errno;
556 };
557
558 #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err }
559
560 static struct ext4_err_translation err_translation[] = {
561         EXT4_ERR_TRANSLATE(EIO),
562         EXT4_ERR_TRANSLATE(ENOMEM),
563         EXT4_ERR_TRANSLATE(EFSBADCRC),
564         EXT4_ERR_TRANSLATE(EFSCORRUPTED),
565         EXT4_ERR_TRANSLATE(ENOSPC),
566         EXT4_ERR_TRANSLATE(ENOKEY),
567         EXT4_ERR_TRANSLATE(EROFS),
568         EXT4_ERR_TRANSLATE(EFBIG),
569         EXT4_ERR_TRANSLATE(EEXIST),
570         EXT4_ERR_TRANSLATE(ERANGE),
571         EXT4_ERR_TRANSLATE(EOVERFLOW),
572         EXT4_ERR_TRANSLATE(EBUSY),
573         EXT4_ERR_TRANSLATE(ENOTDIR),
574         EXT4_ERR_TRANSLATE(ENOTEMPTY),
575         EXT4_ERR_TRANSLATE(ESHUTDOWN),
576         EXT4_ERR_TRANSLATE(EFAULT),
577 };
578
579 static int ext4_errno_to_code(int errno)
580 {
581         int i;
582
583         for (i = 0; i < ARRAY_SIZE(err_translation); i++)
584                 if (err_translation[i].errno == errno)
585                         return err_translation[i].code;
586         return EXT4_ERR_UNKNOWN;
587 }
588
589 static void save_error_info(struct super_block *sb, int error,
590                             __u32 ino, __u64 block,
591                             const char *func, unsigned int line)
592 {
593         struct ext4_sb_info *sbi = EXT4_SB(sb);
594
595         /* We default to EFSCORRUPTED error... */
596         if (error == 0)
597                 error = EFSCORRUPTED;
598
599         spin_lock(&sbi->s_error_lock);
600         sbi->s_add_error_count++;
601         sbi->s_last_error_code = error;
602         sbi->s_last_error_line = line;
603         sbi->s_last_error_ino = ino;
604         sbi->s_last_error_block = block;
605         sbi->s_last_error_func = func;
606         sbi->s_last_error_time = ktime_get_real_seconds();
607         if (!sbi->s_first_error_time) {
608                 sbi->s_first_error_code = error;
609                 sbi->s_first_error_line = line;
610                 sbi->s_first_error_ino = ino;
611                 sbi->s_first_error_block = block;
612                 sbi->s_first_error_func = func;
613                 sbi->s_first_error_time = sbi->s_last_error_time;
614         }
615         spin_unlock(&sbi->s_error_lock);
616 }
617
618 /* Deal with the reporting of failure conditions on a filesystem such as
619  * inconsistencies detected or read IO failures.
620  *
621  * On ext2, we can store the error state of the filesystem in the
622  * superblock.  That is not possible on ext4, because we may have other
623  * write ordering constraints on the superblock which prevent us from
624  * writing it out straight away; and given that the journal is about to
625  * be aborted, we can't rely on the current, or future, transactions to
626  * write out the superblock safely.
627  *
628  * We'll just use the jbd2_journal_abort() error code to record an error in
629  * the journal instead.  On recovery, the journal will complain about
630  * that error until we've noted it down and cleared it.
631  *
632  * If force_ro is set, we unconditionally force the filesystem into an
633  * ABORT|READONLY state, unless the error response on the fs has been set to
634  * panic in which case we take the easy way out and panic immediately. This is
635  * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
636  * at a critical moment in log management.
637  */
638 static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
639                               __u32 ino, __u64 block,
640                               const char *func, unsigned int line)
641 {
642         journal_t *journal = EXT4_SB(sb)->s_journal;
643         bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
644
645         EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
646         if (test_opt(sb, WARN_ON_ERROR))
647                 WARN_ON_ONCE(1);
648
649         if (!continue_fs && !sb_rdonly(sb)) {
650                 ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
651                 if (journal)
652                         jbd2_journal_abort(journal, -EIO);
653         }
654
655         if (!bdev_read_only(sb->s_bdev)) {
656                 save_error_info(sb, error, ino, block, func, line);
657                 /*
658                  * In case the fs should keep running, we need to writeout
659                  * superblock through the journal. Due to lock ordering
660                  * constraints, it may not be safe to do it right here so we
661                  * defer superblock flushing to a workqueue.
662                  */
663                 if (continue_fs)
664                         schedule_work(&EXT4_SB(sb)->s_error_work);
665                 else
666                         ext4_commit_super(sb);
667         }
668
669         /*
670          * We force ERRORS_RO behavior when system is rebooting. Otherwise we
671          * could panic during 'reboot -f' as the underlying device got already
672          * disabled.
673          */
674         if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
675                 panic("EXT4-fs (device %s): panic forced after error\n",
676                         sb->s_id);
677         }
678
679         if (sb_rdonly(sb) || continue_fs)
680                 return;
681
682         ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
683         /*
684          * Make sure updated value of ->s_mount_flags will be visible before
685          * ->s_flags update
686          */
687         smp_wmb();
688         sb->s_flags |= SB_RDONLY;
689 }
690
691 static void flush_stashed_error_work(struct work_struct *work)
692 {
693         struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
694                                                 s_error_work);
695         journal_t *journal = sbi->s_journal;
696         handle_t *handle;
697
698         /*
699          * If the journal is still running, we have to write out superblock
700          * through the journal to avoid collisions of other journalled sb
701          * updates.
702          *
703          * We use directly jbd2 functions here to avoid recursing back into
704          * ext4 error handling code during handling of previous errors.
705          */
706         if (!sb_rdonly(sbi->s_sb) && journal) {
707                 struct buffer_head *sbh = sbi->s_sbh;
708                 handle = jbd2_journal_start(journal, 1);
709                 if (IS_ERR(handle))
710                         goto write_directly;
711                 if (jbd2_journal_get_write_access(handle, sbh)) {
712                         jbd2_journal_stop(handle);
713                         goto write_directly;
714                 }
715                 ext4_update_super(sbi->s_sb);
716                 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
717                         ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
718                                  "superblock detected");
719                         clear_buffer_write_io_error(sbh);
720                         set_buffer_uptodate(sbh);
721                 }
722
723                 if (jbd2_journal_dirty_metadata(handle, sbh)) {
724                         jbd2_journal_stop(handle);
725                         goto write_directly;
726                 }
727                 jbd2_journal_stop(handle);
728                 ext4_notify_error_sysfs(sbi);
729                 return;
730         }
731 write_directly:
732         /*
733          * Write through journal failed. Write sb directly to get error info
734          * out and hope for the best.
735          */
736         ext4_commit_super(sbi->s_sb);
737         ext4_notify_error_sysfs(sbi);
738 }
739
740 #define ext4_error_ratelimit(sb)                                        \
741                 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),     \
742                              "EXT4-fs error")
743
744 void __ext4_error(struct super_block *sb, const char *function,
745                   unsigned int line, bool force_ro, int error, __u64 block,
746                   const char *fmt, ...)
747 {
748         struct va_format vaf;
749         va_list args;
750
751         if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
752                 return;
753
754         trace_ext4_error(sb, function, line);
755         if (ext4_error_ratelimit(sb)) {
756                 va_start(args, fmt);
757                 vaf.fmt = fmt;
758                 vaf.va = &args;
759                 printk(KERN_CRIT
760                        "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
761                        sb->s_id, function, line, current->comm, &vaf);
762                 va_end(args);
763         }
764         ext4_handle_error(sb, force_ro, error, 0, block, function, line);
765 }
766
767 void __ext4_error_inode(struct inode *inode, const char *function,
768                         unsigned int line, ext4_fsblk_t block, int error,
769                         const char *fmt, ...)
770 {
771         va_list args;
772         struct va_format vaf;
773
774         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
775                 return;
776
777         trace_ext4_error(inode->i_sb, function, line);
778         if (ext4_error_ratelimit(inode->i_sb)) {
779                 va_start(args, fmt);
780                 vaf.fmt = fmt;
781                 vaf.va = &args;
782                 if (block)
783                         printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
784                                "inode #%lu: block %llu: comm %s: %pV\n",
785                                inode->i_sb->s_id, function, line, inode->i_ino,
786                                block, current->comm, &vaf);
787                 else
788                         printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
789                                "inode #%lu: comm %s: %pV\n",
790                                inode->i_sb->s_id, function, line, inode->i_ino,
791                                current->comm, &vaf);
792                 va_end(args);
793         }
794         ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
795                           function, line);
796 }
797
798 void __ext4_error_file(struct file *file, const char *function,
799                        unsigned int line, ext4_fsblk_t block,
800                        const char *fmt, ...)
801 {
802         va_list args;
803         struct va_format vaf;
804         struct inode *inode = file_inode(file);
805         char pathname[80], *path;
806
807         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
808                 return;
809
810         trace_ext4_error(inode->i_sb, function, line);
811         if (ext4_error_ratelimit(inode->i_sb)) {
812                 path = file_path(file, pathname, sizeof(pathname));
813                 if (IS_ERR(path))
814                         path = "(unknown)";
815                 va_start(args, fmt);
816                 vaf.fmt = fmt;
817                 vaf.va = &args;
818                 if (block)
819                         printk(KERN_CRIT
820                                "EXT4-fs error (device %s): %s:%d: inode #%lu: "
821                                "block %llu: comm %s: path %s: %pV\n",
822                                inode->i_sb->s_id, function, line, inode->i_ino,
823                                block, current->comm, path, &vaf);
824                 else
825                         printk(KERN_CRIT
826                                "EXT4-fs error (device %s): %s:%d: inode #%lu: "
827                                "comm %s: path %s: %pV\n",
828                                inode->i_sb->s_id, function, line, inode->i_ino,
829                                current->comm, path, &vaf);
830                 va_end(args);
831         }
832         ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
833                           function, line);
834 }
835
836 const char *ext4_decode_error(struct super_block *sb, int errno,
837                               char nbuf[16])
838 {
839         char *errstr = NULL;
840
841         switch (errno) {
842         case -EFSCORRUPTED:
843                 errstr = "Corrupt filesystem";
844                 break;
845         case -EFSBADCRC:
846                 errstr = "Filesystem failed CRC";
847                 break;
848         case -EIO:
849                 errstr = "IO failure";
850                 break;
851         case -ENOMEM:
852                 errstr = "Out of memory";
853                 break;
854         case -EROFS:
855                 if (!sb || (EXT4_SB(sb)->s_journal &&
856                             EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
857                         errstr = "Journal has aborted";
858                 else
859                         errstr = "Readonly filesystem";
860                 break;
861         default:
862                 /* If the caller passed in an extra buffer for unknown
863                  * errors, textualise them now.  Else we just return
864                  * NULL. */
865                 if (nbuf) {
866                         /* Check for truncated error codes... */
867                         if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
868                                 errstr = nbuf;
869                 }
870                 break;
871         }
872
873         return errstr;
874 }
875
876 /* __ext4_std_error decodes expected errors from journaling functions
877  * automatically and invokes the appropriate error response.  */
878
879 void __ext4_std_error(struct super_block *sb, const char *function,
880                       unsigned int line, int errno)
881 {
882         char nbuf[16];
883         const char *errstr;
884
885         if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
886                 return;
887
888         /* Special case: if the error is EROFS, and we're not already
889          * inside a transaction, then there's really no point in logging
890          * an error. */
891         if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
892                 return;
893
894         if (ext4_error_ratelimit(sb)) {
895                 errstr = ext4_decode_error(sb, errno, nbuf);
896                 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
897                        sb->s_id, function, line, errstr);
898         }
899
900         ext4_handle_error(sb, false, -errno, 0, 0, function, line);
901 }
902
903 void __ext4_msg(struct super_block *sb,
904                 const char *prefix, const char *fmt, ...)
905 {
906         struct va_format vaf;
907         va_list args;
908
909         atomic_inc(&EXT4_SB(sb)->s_msg_count);
910         if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
911                 return;
912
913         va_start(args, fmt);
914         vaf.fmt = fmt;
915         vaf.va = &args;
916         printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
917         va_end(args);
918 }
919
920 static int ext4_warning_ratelimit(struct super_block *sb)
921 {
922         atomic_inc(&EXT4_SB(sb)->s_warning_count);
923         return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
924                             "EXT4-fs warning");
925 }
926
927 void __ext4_warning(struct super_block *sb, const char *function,
928                     unsigned int line, const char *fmt, ...)
929 {
930         struct va_format vaf;
931         va_list args;
932
933         if (!ext4_warning_ratelimit(sb))
934                 return;
935
936         va_start(args, fmt);
937         vaf.fmt = fmt;
938         vaf.va = &args;
939         printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
940                sb->s_id, function, line, &vaf);
941         va_end(args);
942 }
943
944 void __ext4_warning_inode(const struct inode *inode, const char *function,
945                           unsigned int line, const char *fmt, ...)
946 {
947         struct va_format vaf;
948         va_list args;
949
950         if (!ext4_warning_ratelimit(inode->i_sb))
951                 return;
952
953         va_start(args, fmt);
954         vaf.fmt = fmt;
955         vaf.va = &args;
956         printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
957                "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
958                function, line, inode->i_ino, current->comm, &vaf);
959         va_end(args);
960 }
961
962 void __ext4_grp_locked_error(const char *function, unsigned int line,
963                              struct super_block *sb, ext4_group_t grp,
964                              unsigned long ino, ext4_fsblk_t block,
965                              const char *fmt, ...)
966 __releases(bitlock)
967 __acquires(bitlock)
968 {
969         struct va_format vaf;
970         va_list args;
971
972         if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
973                 return;
974
975         trace_ext4_error(sb, function, line);
976         if (ext4_error_ratelimit(sb)) {
977                 va_start(args, fmt);
978                 vaf.fmt = fmt;
979                 vaf.va = &args;
980                 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
981                        sb->s_id, function, line, grp);
982                 if (ino)
983                         printk(KERN_CONT "inode %lu: ", ino);
984                 if (block)
985                         printk(KERN_CONT "block %llu:",
986                                (unsigned long long) block);
987                 printk(KERN_CONT "%pV\n", &vaf);
988                 va_end(args);
989         }
990
991         if (test_opt(sb, ERRORS_CONT)) {
992                 if (test_opt(sb, WARN_ON_ERROR))
993                         WARN_ON_ONCE(1);
994                 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
995                 if (!bdev_read_only(sb->s_bdev)) {
996                         save_error_info(sb, EFSCORRUPTED, ino, block, function,
997                                         line);
998                         schedule_work(&EXT4_SB(sb)->s_error_work);
999                 }
1000                 return;
1001         }
1002         ext4_unlock_group(sb, grp);
1003         ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
1004         /*
1005          * We only get here in the ERRORS_RO case; relocking the group
1006          * may be dangerous, but nothing bad will happen since the
1007          * filesystem will have already been marked read/only and the
1008          * journal has been aborted.  We return 1 as a hint to callers
1009          * who might what to use the return value from
1010          * ext4_grp_locked_error() to distinguish between the
1011          * ERRORS_CONT and ERRORS_RO case, and perhaps return more
1012          * aggressively from the ext4 function in question, with a
1013          * more appropriate error code.
1014          */
1015         ext4_lock_group(sb, grp);
1016         return;
1017 }
1018
1019 void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
1020                                      ext4_group_t group,
1021                                      unsigned int flags)
1022 {
1023         struct ext4_sb_info *sbi = EXT4_SB(sb);
1024         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1025         struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
1026         int ret;
1027
1028         if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
1029                 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1030                                             &grp->bb_state);
1031                 if (!ret)
1032                         percpu_counter_sub(&sbi->s_freeclusters_counter,
1033                                            grp->bb_free);
1034         }
1035
1036         if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
1037                 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
1038                                             &grp->bb_state);
1039                 if (!ret && gdp) {
1040                         int count;
1041
1042                         count = ext4_free_inodes_count(sb, gdp);
1043                         percpu_counter_sub(&sbi->s_freeinodes_counter,
1044                                            count);
1045                 }
1046         }
1047 }
1048
1049 void ext4_update_dynamic_rev(struct super_block *sb)
1050 {
1051         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1052
1053         if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
1054                 return;
1055
1056         ext4_warning(sb,
1057                      "updating to rev %d because of new feature flag, "
1058                      "running e2fsck is recommended",
1059                      EXT4_DYNAMIC_REV);
1060
1061         es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
1062         es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
1063         es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
1064         /* leave es->s_feature_*compat flags alone */
1065         /* es->s_uuid will be set by e2fsck if empty */
1066
1067         /*
1068          * The rest of the superblock fields should be zero, and if not it
1069          * means they are likely already in use, so leave them alone.  We
1070          * can leave it up to e2fsck to clean up any inconsistencies there.
1071          */
1072 }
1073
1074 /*
1075  * Open the external journal device
1076  */
1077 static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
1078 {
1079         struct block_device *bdev;
1080
1081         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
1082         if (IS_ERR(bdev))
1083                 goto fail;
1084         return bdev;
1085
1086 fail:
1087         ext4_msg(sb, KERN_ERR,
1088                  "failed to open journal device unknown-block(%u,%u) %ld",
1089                  MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
1090         return NULL;
1091 }
1092
1093 /*
1094  * Release the journal device
1095  */
1096 static void ext4_blkdev_put(struct block_device *bdev)
1097 {
1098         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1099 }
1100
1101 static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
1102 {
1103         struct block_device *bdev;
1104         bdev = sbi->s_journal_bdev;
1105         if (bdev) {
1106                 ext4_blkdev_put(bdev);
1107                 sbi->s_journal_bdev = NULL;
1108         }
1109 }
1110
1111 static inline struct inode *orphan_list_entry(struct list_head *l)
1112 {
1113         return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
1114 }
1115
1116 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
1117 {
1118         struct list_head *l;
1119
1120         ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
1121                  le32_to_cpu(sbi->s_es->s_last_orphan));
1122
1123         printk(KERN_ERR "sb_info orphan list:\n");
1124         list_for_each(l, &sbi->s_orphan) {
1125                 struct inode *inode = orphan_list_entry(l);
1126                 printk(KERN_ERR "  "
1127                        "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
1128                        inode->i_sb->s_id, inode->i_ino, inode,
1129                        inode->i_mode, inode->i_nlink,
1130                        NEXT_ORPHAN(inode));
1131         }
1132 }
1133
1134 #ifdef CONFIG_QUOTA
1135 static int ext4_quota_off(struct super_block *sb, int type);
1136
1137 static inline void ext4_quota_off_umount(struct super_block *sb)
1138 {
1139         int type;
1140
1141         /* Use our quota_off function to clear inode flags etc. */
1142         for (type = 0; type < EXT4_MAXQUOTAS; type++)
1143                 ext4_quota_off(sb, type);
1144 }
1145
1146 /*
1147  * This is a helper function which is used in the mount/remount
1148  * codepaths (which holds s_umount) to fetch the quota file name.
1149  */
1150 static inline char *get_qf_name(struct super_block *sb,
1151                                 struct ext4_sb_info *sbi,
1152                                 int type)
1153 {
1154         return rcu_dereference_protected(sbi->s_qf_names[type],
1155                                          lockdep_is_held(&sb->s_umount));
1156 }
1157 #else
1158 static inline void ext4_quota_off_umount(struct super_block *sb)
1159 {
1160 }
1161 #endif
1162
1163 static void ext4_put_super(struct super_block *sb)
1164 {
1165         struct ext4_sb_info *sbi = EXT4_SB(sb);
1166         struct ext4_super_block *es = sbi->s_es;
1167         struct buffer_head **group_desc;
1168         struct flex_groups **flex_groups;
1169         int aborted = 0;
1170         int i, err;
1171
1172         ext4_unregister_li_request(sb);
1173         ext4_quota_off_umount(sb);
1174
1175         flush_work(&sbi->s_error_work);
1176         destroy_workqueue(sbi->rsv_conversion_wq);
1177
1178         /*
1179          * Unregister sysfs before destroying jbd2 journal.
1180          * Since we could still access attr_journal_task attribute via sysfs
1181          * path which could have sbi->s_journal->j_task as NULL
1182          */
1183         ext4_unregister_sysfs(sb);
1184
1185         if (sbi->s_journal) {
1186                 aborted = is_journal_aborted(sbi->s_journal);
1187                 err = jbd2_journal_destroy(sbi->s_journal);
1188                 sbi->s_journal = NULL;
1189                 if ((err < 0) && !aborted) {
1190                         ext4_abort(sb, -err, "Couldn't clean up the journal");
1191                 }
1192         }
1193
1194         ext4_es_unregister_shrinker(sbi);
1195         del_timer_sync(&sbi->s_err_report);
1196         ext4_release_system_zone(sb);
1197         ext4_mb_release(sb);
1198         ext4_ext_release(sb);
1199
1200         if (!sb_rdonly(sb) && !aborted) {
1201                 ext4_clear_feature_journal_needs_recovery(sb);
1202                 es->s_state = cpu_to_le16(sbi->s_mount_state);
1203         }
1204         if (!sb_rdonly(sb))
1205                 ext4_commit_super(sb);
1206
1207         rcu_read_lock();
1208         group_desc = rcu_dereference(sbi->s_group_desc);
1209         for (i = 0; i < sbi->s_gdb_count; i++)
1210                 brelse(group_desc[i]);
1211         kvfree(group_desc);
1212         flex_groups = rcu_dereference(sbi->s_flex_groups);
1213         if (flex_groups) {
1214                 for (i = 0; i < sbi->s_flex_groups_allocated; i++)
1215                         kvfree(flex_groups[i]);
1216                 kvfree(flex_groups);
1217         }
1218         rcu_read_unlock();
1219         percpu_counter_destroy(&sbi->s_freeclusters_counter);
1220         percpu_counter_destroy(&sbi->s_freeinodes_counter);
1221         percpu_counter_destroy(&sbi->s_dirs_counter);
1222         percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1223         percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
1224         percpu_free_rwsem(&sbi->s_writepages_rwsem);
1225 #ifdef CONFIG_QUOTA
1226         for (i = 0; i < EXT4_MAXQUOTAS; i++)
1227                 kfree(get_qf_name(sb, sbi, i));
1228 #endif
1229
1230         /* Debugging code just in case the in-memory inode orphan list
1231          * isn't empty.  The on-disk one can be non-empty if we've
1232          * detected an error and taken the fs readonly, but the
1233          * in-memory list had better be clean by this point. */
1234         if (!list_empty(&sbi->s_orphan))
1235                 dump_orphan_list(sb, sbi);
1236         ASSERT(list_empty(&sbi->s_orphan));
1237
1238         sync_blockdev(sb->s_bdev);
1239         invalidate_bdev(sb->s_bdev);
1240         if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
1241                 /*
1242                  * Invalidate the journal device's buffers.  We don't want them
1243                  * floating about in memory - the physical journal device may
1244                  * hotswapped, and it breaks the `ro-after' testing code.
1245                  */
1246                 sync_blockdev(sbi->s_journal_bdev);
1247                 invalidate_bdev(sbi->s_journal_bdev);
1248                 ext4_blkdev_remove(sbi);
1249         }
1250
1251         ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
1252         sbi->s_ea_inode_cache = NULL;
1253
1254         ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
1255         sbi->s_ea_block_cache = NULL;
1256
1257         ext4_stop_mmpd(sbi);
1258
1259         brelse(sbi->s_sbh);
1260         sb->s_fs_info = NULL;
1261         /*
1262          * Now that we are completely done shutting down the
1263          * superblock, we need to actually destroy the kobject.
1264          */
1265         kobject_put(&sbi->s_kobj);
1266         wait_for_completion(&sbi->s_kobj_unregister);
1267         if (sbi->s_chksum_driver)
1268                 crypto_free_shash(sbi->s_chksum_driver);
1269         kfree(sbi->s_blockgroup_lock);
1270         fs_put_dax(sbi->s_daxdev);
1271         fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
1272 #ifdef CONFIG_UNICODE
1273         utf8_unload(sb->s_encoding);
1274 #endif
1275         kfree(sbi);
1276 }
1277
1278 static struct kmem_cache *ext4_inode_cachep;
1279
1280 /*
1281  * Called inside transaction, so use GFP_NOFS
1282  */
1283 static struct inode *ext4_alloc_inode(struct super_block *sb)
1284 {
1285         struct ext4_inode_info *ei;
1286
1287         ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
1288         if (!ei)
1289                 return NULL;
1290
1291         inode_set_iversion(&ei->vfs_inode, 1);
1292         spin_lock_init(&ei->i_raw_lock);
1293         INIT_LIST_HEAD(&ei->i_prealloc_list);
1294         atomic_set(&ei->i_prealloc_active, 0);
1295         spin_lock_init(&ei->i_prealloc_lock);
1296         ext4_es_init_tree(&ei->i_es_tree);
1297         rwlock_init(&ei->i_es_lock);
1298         INIT_LIST_HEAD(&ei->i_es_list);
1299         ei->i_es_all_nr = 0;
1300         ei->i_es_shk_nr = 0;
1301         ei->i_es_shrink_lblk = 0;
1302         ei->i_reserved_data_blocks = 0;
1303         spin_lock_init(&(ei->i_block_reservation_lock));
1304         ext4_init_pending_tree(&ei->i_pending_tree);
1305 #ifdef CONFIG_QUOTA
1306         ei->i_reserved_quota = 0;
1307         memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1308 #endif
1309         ei->jinode = NULL;
1310         INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1311         spin_lock_init(&ei->i_completed_io_lock);
1312         ei->i_sync_tid = 0;
1313         ei->i_datasync_tid = 0;
1314         atomic_set(&ei->i_unwritten, 0);
1315         INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1316         ext4_fc_init_inode(&ei->vfs_inode);
1317         mutex_init(&ei->i_fc_lock);
1318         return &ei->vfs_inode;
1319 }
1320
1321 static int ext4_drop_inode(struct inode *inode)
1322 {
1323         int drop = generic_drop_inode(inode);
1324
1325         if (!drop)
1326                 drop = fscrypt_drop_inode(inode);
1327
1328         trace_ext4_drop_inode(inode, drop);
1329         return drop;
1330 }
1331
1332 static void ext4_free_in_core_inode(struct inode *inode)
1333 {
1334         fscrypt_free_inode(inode);
1335         if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
1336                 pr_warn("%s: inode %ld still in fc list",
1337                         __func__, inode->i_ino);
1338         }
1339         kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
1340 }
1341
1342 static void ext4_destroy_inode(struct inode *inode)
1343 {
1344         if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1345                 ext4_msg(inode->i_sb, KERN_ERR,
1346                          "Inode %lu (%p): orphan list check failed!",
1347                          inode->i_ino, EXT4_I(inode));
1348                 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
1349                                 EXT4_I(inode), sizeof(struct ext4_inode_info),
1350                                 true);
1351                 dump_stack();
1352         }
1353 }
1354
1355 static void init_once(void *foo)
1356 {
1357         struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1358
1359         INIT_LIST_HEAD(&ei->i_orphan);
1360         init_rwsem(&ei->xattr_sem);
1361         init_rwsem(&ei->i_data_sem);
1362         init_rwsem(&ei->i_mmap_sem);
1363         inode_init_once(&ei->vfs_inode);
1364         ext4_fc_init_inode(&ei->vfs_inode);
1365 }
1366
1367 static int __init init_inodecache(void)
1368 {
1369         ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
1370                                 sizeof(struct ext4_inode_info), 0,
1371                                 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
1372                                         SLAB_ACCOUNT),
1373                                 offsetof(struct ext4_inode_info, i_data),
1374                                 sizeof_field(struct ext4_inode_info, i_data),
1375                                 init_once);
1376         if (ext4_inode_cachep == NULL)
1377                 return -ENOMEM;
1378         return 0;
1379 }
1380
1381 static void destroy_inodecache(void)
1382 {
1383         /*
1384          * Make sure all delayed rcu free inodes are flushed before we
1385          * destroy cache.
1386          */
1387         rcu_barrier();
1388         kmem_cache_destroy(ext4_inode_cachep);
1389 }
1390
1391 void ext4_clear_inode(struct inode *inode)
1392 {
1393         ext4_fc_del(inode);
1394         invalidate_inode_buffers(inode);
1395         clear_inode(inode);
1396         ext4_discard_preallocations(inode, 0);
1397         ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1398         dquot_drop(inode);
1399         if (EXT4_I(inode)->jinode) {
1400                 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
1401                                                EXT4_I(inode)->jinode);
1402                 jbd2_free_inode(EXT4_I(inode)->jinode);
1403                 EXT4_I(inode)->jinode = NULL;
1404         }
1405         fscrypt_put_encryption_info(inode);
1406         fsverity_cleanup_inode(inode);
1407 }
1408
1409 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1410                                         u64 ino, u32 generation)
1411 {
1412         struct inode *inode;
1413
1414         /*
1415          * Currently we don't know the generation for parent directory, so
1416          * a generation of 0 means "accept any"
1417          */
1418         inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
1419         if (IS_ERR(inode))
1420                 return ERR_CAST(inode);
1421         if (generation && inode->i_generation != generation) {
1422                 iput(inode);
1423                 return ERR_PTR(-ESTALE);
1424         }
1425
1426         return inode;
1427 }
1428
1429 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1430                                         int fh_len, int fh_type)
1431 {
1432         return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1433                                     ext4_nfs_get_inode);
1434 }
1435
1436 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1437                                         int fh_len, int fh_type)
1438 {
1439         return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1440                                     ext4_nfs_get_inode);
1441 }
1442
1443 static int ext4_nfs_commit_metadata(struct inode *inode)
1444 {
1445         struct writeback_control wbc = {
1446                 .sync_mode = WB_SYNC_ALL
1447         };
1448
1449         trace_ext4_nfs_commit_metadata(inode);
1450         return ext4_write_inode(inode, &wbc);
1451 }
1452
1453 #ifdef CONFIG_FS_ENCRYPTION
1454 static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
1455 {
1456         return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
1457                                  EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
1458 }
1459
1460 static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1461                                                         void *fs_data)
1462 {
1463         handle_t *handle = fs_data;
1464         int res, res2, credits, retries = 0;
1465
1466         /*
1467          * Encrypting the root directory is not allowed because e2fsck expects
1468          * lost+found to exist and be unencrypted, and encrypting the root
1469          * directory would imply encrypting the lost+found directory as well as
1470          * the filename "lost+found" itself.
1471          */
1472         if (inode->i_ino == EXT4_ROOT_INO)
1473                 return -EPERM;
1474
1475         if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
1476                 return -EINVAL;
1477
1478         if (ext4_test_inode_flag(inode, EXT4_INODE_DAX))
1479                 return -EOPNOTSUPP;
1480
1481         res = ext4_convert_inline_data(inode);
1482         if (res)
1483                 return res;
1484
1485         /*
1486          * If a journal handle was specified, then the encryption context is
1487          * being set on a new inode via inheritance and is part of a larger
1488          * transaction to create the inode.  Otherwise the encryption context is
1489          * being set on an existing inode in its own transaction.  Only in the
1490          * latter case should the "retry on ENOSPC" logic be used.
1491          */
1492
1493         if (handle) {
1494                 res = ext4_xattr_set_handle(handle, inode,
1495                                             EXT4_XATTR_INDEX_ENCRYPTION,
1496                                             EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
1497                                             ctx, len, 0);
1498                 if (!res) {
1499                         ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1500                         ext4_clear_inode_state(inode,
1501                                         EXT4_STATE_MAY_INLINE_DATA);
1502                         /*
1503                          * Update inode->i_flags - S_ENCRYPTED will be enabled,
1504                          * S_DAX may be disabled
1505                          */
1506                         ext4_set_inode_flags(inode, false);
1507                 }
1508                 return res;
1509         }
1510
1511         res = dquot_initialize(inode);
1512         if (res)
1513                 return res;
1514 retry:
1515         res = ext4_xattr_set_credits(inode, len, false /* is_create */,
1516                                      &credits);
1517         if (res)
1518                 return res;
1519
1520         handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1521         if (IS_ERR(handle))
1522                 return PTR_ERR(handle);
1523
1524         res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
1525                                     EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
1526                                     ctx, len, 0);
1527         if (!res) {
1528                 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1529                 /*
1530                  * Update inode->i_flags - S_ENCRYPTED will be enabled,
1531                  * S_DAX may be disabled
1532                  */
1533                 ext4_set_inode_flags(inode, false);
1534                 res = ext4_mark_inode_dirty(handle, inode);
1535                 if (res)
1536                         EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
1537         }
1538         res2 = ext4_journal_stop(handle);
1539
1540         if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1541                 goto retry;
1542         if (!res)
1543                 res = res2;
1544         return res;
1545 }
1546
1547 static const union fscrypt_policy *ext4_get_dummy_policy(struct super_block *sb)
1548 {
1549         return EXT4_SB(sb)->s_dummy_enc_policy.policy;
1550 }
1551
1552 static bool ext4_has_stable_inodes(struct super_block *sb)
1553 {
1554         return ext4_has_feature_stable_inodes(sb);
1555 }
1556
1557 static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
1558                                        int *ino_bits_ret, int *lblk_bits_ret)
1559 {
1560         *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
1561         *lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
1562 }
1563
1564 static const struct fscrypt_operations ext4_cryptops = {
1565         .key_prefix             = "ext4:",
1566         .get_context            = ext4_get_context,
1567         .set_context            = ext4_set_context,
1568         .get_dummy_policy       = ext4_get_dummy_policy,
1569         .empty_dir              = ext4_empty_dir,
1570         .max_namelen            = EXT4_NAME_LEN,
1571         .has_stable_inodes      = ext4_has_stable_inodes,
1572         .get_ino_and_lblk_bits  = ext4_get_ino_and_lblk_bits,
1573 };
1574 #endif
1575
1576 #ifdef CONFIG_QUOTA
1577 static const char * const quotatypes[] = INITQFNAMES;
1578 #define QTYPE2NAME(t) (quotatypes[t])
1579
1580 static int ext4_write_dquot(struct dquot *dquot);
1581 static int ext4_acquire_dquot(struct dquot *dquot);
1582 static int ext4_release_dquot(struct dquot *dquot);
1583 static int ext4_mark_dquot_dirty(struct dquot *dquot);
1584 static int ext4_write_info(struct super_block *sb, int type);
1585 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1586                          const struct path *path);
1587 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1588                                size_t len, loff_t off);
1589 static ssize_t ext4_quota_write(struct super_block *sb, int type,
1590                                 const char *data, size_t len, loff_t off);
1591 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1592                              unsigned int flags);
1593
1594 static struct dquot **ext4_get_dquots(struct inode *inode)
1595 {
1596         return EXT4_I(inode)->i_dquot;
1597 }
1598
1599 static const struct dquot_operations ext4_quota_operations = {
1600         .get_reserved_space     = ext4_get_reserved_space,
1601         .write_dquot            = ext4_write_dquot,
1602         .acquire_dquot          = ext4_acquire_dquot,
1603         .release_dquot          = ext4_release_dquot,
1604         .mark_dirty             = ext4_mark_dquot_dirty,
1605         .write_info             = ext4_write_info,
1606         .alloc_dquot            = dquot_alloc,
1607         .destroy_dquot          = dquot_destroy,
1608         .get_projid             = ext4_get_projid,
1609         .get_inode_usage        = ext4_get_inode_usage,
1610         .get_next_id            = dquot_get_next_id,
1611 };
1612
1613 static const struct quotactl_ops ext4_qctl_operations = {
1614         .quota_on       = ext4_quota_on,
1615         .quota_off      = ext4_quota_off,
1616         .quota_sync     = dquot_quota_sync,
1617         .get_state      = dquot_get_state,
1618         .set_info       = dquot_set_dqinfo,
1619         .get_dqblk      = dquot_get_dqblk,
1620         .set_dqblk      = dquot_set_dqblk,
1621         .get_nextdqblk  = dquot_get_next_dqblk,
1622 };
1623 #endif
1624
1625 static const struct super_operations ext4_sops = {
1626         .alloc_inode    = ext4_alloc_inode,
1627         .free_inode     = ext4_free_in_core_inode,
1628         .destroy_inode  = ext4_destroy_inode,
1629         .write_inode    = ext4_write_inode,
1630         .dirty_inode    = ext4_dirty_inode,
1631         .drop_inode     = ext4_drop_inode,
1632         .evict_inode    = ext4_evict_inode,
1633         .put_super      = ext4_put_super,
1634         .sync_fs        = ext4_sync_fs,
1635         .freeze_fs      = ext4_freeze,
1636         .unfreeze_fs    = ext4_unfreeze,
1637         .statfs         = ext4_statfs,
1638         .remount_fs     = ext4_remount,
1639         .show_options   = ext4_show_options,
1640 #ifdef CONFIG_QUOTA
1641         .quota_read     = ext4_quota_read,
1642         .quota_write    = ext4_quota_write,
1643         .get_dquots     = ext4_get_dquots,
1644 #endif
1645 };
1646
1647 static const struct export_operations ext4_export_ops = {
1648         .fh_to_dentry = ext4_fh_to_dentry,
1649         .fh_to_parent = ext4_fh_to_parent,
1650         .get_parent = ext4_get_parent,
1651         .commit_metadata = ext4_nfs_commit_metadata,
1652 };
1653
1654 enum {
1655         Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1656         Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1657         Opt_nouid32, Opt_debug, Opt_removed,
1658         Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1659         Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1660         Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1661         Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1662         Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1663         Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1664         Opt_inlinecrypt,
1665         Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1666         Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1667         Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1668         Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version,
1669         Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
1670         Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
1671         Opt_nowarn_on_error, Opt_mblk_io_submit,
1672         Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1673         Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1674         Opt_inode_readahead_blks, Opt_journal_ioprio,
1675         Opt_dioread_nolock, Opt_dioread_lock,
1676         Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1677         Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1678         Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan,
1679 #ifdef CONFIG_EXT4_DEBUG
1680         Opt_fc_debug_max_replay, Opt_fc_debug_force
1681 #endif
1682 };
1683
1684 static const match_table_t tokens = {
1685         {Opt_bsd_df, "bsddf"},
1686         {Opt_minix_df, "minixdf"},
1687         {Opt_grpid, "grpid"},
1688         {Opt_grpid, "bsdgroups"},
1689         {Opt_nogrpid, "nogrpid"},
1690         {Opt_nogrpid, "sysvgroups"},
1691         {Opt_resgid, "resgid=%u"},
1692         {Opt_resuid, "resuid=%u"},
1693         {Opt_sb, "sb=%u"},
1694         {Opt_err_cont, "errors=continue"},
1695         {Opt_err_panic, "errors=panic"},
1696         {Opt_err_ro, "errors=remount-ro"},
1697         {Opt_nouid32, "nouid32"},
1698         {Opt_debug, "debug"},
1699         {Opt_removed, "oldalloc"},
1700         {Opt_removed, "orlov"},
1701         {Opt_user_xattr, "user_xattr"},
1702         {Opt_nouser_xattr, "nouser_xattr"},
1703         {Opt_acl, "acl"},
1704         {Opt_noacl, "noacl"},
1705         {Opt_noload, "norecovery"},
1706         {Opt_noload, "noload"},
1707         {Opt_removed, "nobh"},
1708         {Opt_removed, "bh"},
1709         {Opt_commit, "commit=%u"},
1710         {Opt_min_batch_time, "min_batch_time=%u"},
1711         {Opt_max_batch_time, "max_batch_time=%u"},
1712         {Opt_journal_dev, "journal_dev=%u"},
1713         {Opt_journal_path, "journal_path=%s"},
1714         {Opt_journal_checksum, "journal_checksum"},
1715         {Opt_nojournal_checksum, "nojournal_checksum"},
1716         {Opt_journal_async_commit, "journal_async_commit"},
1717         {Opt_abort, "abort"},
1718         {Opt_data_journal, "data=journal"},
1719         {Opt_data_ordered, "data=ordered"},
1720         {Opt_data_writeback, "data=writeback"},
1721         {Opt_data_err_abort, "data_err=abort"},
1722         {Opt_data_err_ignore, "data_err=ignore"},
1723         {Opt_offusrjquota, "usrjquota="},
1724         {Opt_usrjquota, "usrjquota=%s"},
1725         {Opt_offgrpjquota, "grpjquota="},
1726         {Opt_grpjquota, "grpjquota=%s"},
1727         {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
1728         {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1729         {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1730         {Opt_grpquota, "grpquota"},
1731         {Opt_noquota, "noquota"},
1732         {Opt_quota, "quota"},
1733         {Opt_usrquota, "usrquota"},
1734         {Opt_prjquota, "prjquota"},
1735         {Opt_barrier, "barrier=%u"},
1736         {Opt_barrier, "barrier"},
1737         {Opt_nobarrier, "nobarrier"},
1738         {Opt_i_version, "i_version"},
1739         {Opt_dax, "dax"},
1740         {Opt_dax_always, "dax=always"},
1741         {Opt_dax_inode, "dax=inode"},
1742         {Opt_dax_never, "dax=never"},
1743         {Opt_stripe, "stripe=%u"},
1744         {Opt_delalloc, "delalloc"},
1745         {Opt_warn_on_error, "warn_on_error"},
1746         {Opt_nowarn_on_error, "nowarn_on_error"},
1747         {Opt_lazytime, "lazytime"},
1748         {Opt_nolazytime, "nolazytime"},
1749         {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1750         {Opt_nodelalloc, "nodelalloc"},
1751         {Opt_removed, "mblk_io_submit"},
1752         {Opt_removed, "nomblk_io_submit"},
1753         {Opt_block_validity, "block_validity"},
1754         {Opt_noblock_validity, "noblock_validity"},
1755         {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1756         {Opt_journal_ioprio, "journal_ioprio=%u"},
1757         {Opt_auto_da_alloc, "auto_da_alloc=%u"},
1758         {Opt_auto_da_alloc, "auto_da_alloc"},
1759         {Opt_noauto_da_alloc, "noauto_da_alloc"},
1760         {Opt_dioread_nolock, "dioread_nolock"},
1761         {Opt_dioread_lock, "nodioread_nolock"},
1762         {Opt_dioread_lock, "dioread_lock"},
1763         {Opt_discard, "discard"},
1764         {Opt_nodiscard, "nodiscard"},
1765         {Opt_init_itable, "init_itable=%u"},
1766         {Opt_init_itable, "init_itable"},
1767         {Opt_noinit_itable, "noinit_itable"},
1768 #ifdef CONFIG_EXT4_DEBUG
1769         {Opt_fc_debug_force, "fc_debug_force"},
1770         {Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"},
1771 #endif
1772         {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1773         {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
1774         {Opt_test_dummy_encryption, "test_dummy_encryption"},
1775         {Opt_inlinecrypt, "inlinecrypt"},
1776         {Opt_nombcache, "nombcache"},
1777         {Opt_nombcache, "no_mbcache"},  /* for backward compatibility */
1778         {Opt_removed, "prefetch_block_bitmaps"},
1779         {Opt_no_prefetch_block_bitmaps, "no_prefetch_block_bitmaps"},
1780         {Opt_mb_optimize_scan, "mb_optimize_scan=%d"},
1781         {Opt_removed, "check=none"},    /* mount option from ext2/3 */
1782         {Opt_removed, "nocheck"},       /* mount option from ext2/3 */
1783         {Opt_removed, "reservation"},   /* mount option from ext2/3 */
1784         {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
1785         {Opt_removed, "journal=%u"},    /* mount option from ext2/3 */
1786         {Opt_err, NULL},
1787 };
1788
1789 static ext4_fsblk_t get_sb_block(void **data)
1790 {
1791         ext4_fsblk_t    sb_block;
1792         char            *options = (char *) *data;
1793
1794         if (!options || strncmp(options, "sb=", 3) != 0)
1795                 return 1;       /* Default location */
1796
1797         options += 3;
1798         /* TODO: use simple_strtoll with >32bit ext4 */
1799         sb_block = simple_strtoul(options, &options, 0);
1800         if (*options && *options != ',') {
1801                 printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1802                        (char *) *data);
1803                 return 1;
1804         }
1805         if (*options == ',')
1806                 options++;
1807         *data = (void *) options;
1808
1809         return sb_block;
1810 }
1811
1812 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1813 #define DEFAULT_MB_OPTIMIZE_SCAN        (-1)
1814
1815 static const char deprecated_msg[] =
1816         "Mount option \"%s\" will be removed by %s\n"
1817         "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1818
1819 #ifdef CONFIG_QUOTA
1820 static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1821 {
1822         struct ext4_sb_info *sbi = EXT4_SB(sb);
1823         char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
1824         int ret = -1;
1825
1826         if (sb_any_quota_loaded(sb) && !old_qname) {
1827                 ext4_msg(sb, KERN_ERR,
1828                         "Cannot change journaled "
1829                         "quota options when quota turned on");
1830                 return -1;
1831         }
1832         if (ext4_has_feature_quota(sb)) {
1833                 ext4_msg(sb, KERN_INFO, "Journaled quota options "
1834                          "ignored when QUOTA feature is enabled");
1835                 return 1;
1836         }
1837         qname = match_strdup(args);
1838         if (!qname) {
1839                 ext4_msg(sb, KERN_ERR,
1840                         "Not enough memory for storing quotafile name");
1841                 return -1;
1842         }
1843         if (old_qname) {
1844                 if (strcmp(old_qname, qname) == 0)
1845                         ret = 1;
1846                 else
1847                         ext4_msg(sb, KERN_ERR,
1848                                  "%s quota file already specified",
1849                                  QTYPE2NAME(qtype));
1850                 goto errout;
1851         }
1852         if (strchr(qname, '/')) {
1853                 ext4_msg(sb, KERN_ERR,
1854                         "quotafile must be on filesystem root");
1855                 goto errout;
1856         }
1857         rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
1858         set_opt(sb, QUOTA);
1859         return 1;
1860 errout:
1861         kfree(qname);
1862         return ret;
1863 }
1864
1865 static int clear_qf_name(struct super_block *sb, int qtype)
1866 {
1867
1868         struct ext4_sb_info *sbi = EXT4_SB(sb);
1869         char *old_qname = get_qf_name(sb, sbi, qtype);
1870
1871         if (sb_any_quota_loaded(sb) && old_qname) {
1872                 ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
1873                         " when quota turned on");
1874                 return -1;
1875         }
1876         rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
1877         synchronize_rcu();
1878         kfree(old_qname);
1879         return 1;
1880 }
1881 #endif
1882
1883 #define MOPT_SET        0x0001
1884 #define MOPT_CLEAR      0x0002
1885 #define MOPT_NOSUPPORT  0x0004
1886 #define MOPT_EXPLICIT   0x0008
1887 #define MOPT_CLEAR_ERR  0x0010
1888 #define MOPT_GTE0       0x0020
1889 #ifdef CONFIG_QUOTA
1890 #define MOPT_Q          0
1891 #define MOPT_QFMT       0x0040
1892 #else
1893 #define MOPT_Q          MOPT_NOSUPPORT
1894 #define MOPT_QFMT       MOPT_NOSUPPORT
1895 #endif
1896 #define MOPT_DATAJ      0x0080
1897 #define MOPT_NO_EXT2    0x0100
1898 #define MOPT_NO_EXT3    0x0200
1899 #define MOPT_EXT4_ONLY  (MOPT_NO_EXT2 | MOPT_NO_EXT3)
1900 #define MOPT_STRING     0x0400
1901 #define MOPT_SKIP       0x0800
1902 #define MOPT_2          0x1000
1903
1904 static const struct mount_opts {
1905         int     token;
1906         int     mount_opt;
1907         int     flags;
1908 } ext4_mount_opts[] = {
1909         {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
1910         {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
1911         {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
1912         {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
1913         {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
1914         {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1915         {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
1916          MOPT_EXT4_ONLY | MOPT_SET},
1917         {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
1918          MOPT_EXT4_ONLY | MOPT_CLEAR},
1919         {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
1920         {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1921         {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1922          MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1923         {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1924          MOPT_EXT4_ONLY | MOPT_CLEAR},
1925         {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
1926         {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1927         {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1928          MOPT_EXT4_ONLY | MOPT_CLEAR},
1929         {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1930          MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1931         {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1932                                     EXT4_MOUNT_JOURNAL_CHECKSUM),
1933          MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1934         {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1935         {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
1936         {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
1937         {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1938         {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1939          MOPT_NO_EXT2},
1940         {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1941          MOPT_NO_EXT2},
1942         {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
1943         {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
1944         {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
1945         {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
1946         {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
1947         {Opt_commit, 0, MOPT_GTE0},
1948         {Opt_max_batch_time, 0, MOPT_GTE0},
1949         {Opt_min_batch_time, 0, MOPT_GTE0},
1950         {Opt_inode_readahead_blks, 0, MOPT_GTE0},
1951         {Opt_init_itable, 0, MOPT_GTE0},
1952         {Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET | MOPT_SKIP},
1953         {Opt_dax_always, EXT4_MOUNT_DAX_ALWAYS,
1954                 MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
1955         {Opt_dax_inode, EXT4_MOUNT2_DAX_INODE,
1956                 MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
1957         {Opt_dax_never, EXT4_MOUNT2_DAX_NEVER,
1958                 MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
1959         {Opt_stripe, 0, MOPT_GTE0},
1960         {Opt_resuid, 0, MOPT_GTE0},
1961         {Opt_resgid, 0, MOPT_GTE0},
1962         {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1963         {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
1964         {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1965         {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
1966         {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
1967         {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
1968          MOPT_NO_EXT2 | MOPT_DATAJ},
1969         {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
1970         {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
1971 #ifdef CONFIG_EXT4_FS_POSIX_ACL
1972         {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
1973         {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1974 #else
1975         {Opt_acl, 0, MOPT_NOSUPPORT},
1976         {Opt_noacl, 0, MOPT_NOSUPPORT},
1977 #endif
1978         {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
1979         {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1980         {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
1981         {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
1982         {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
1983                                                         MOPT_SET | MOPT_Q},
1984         {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
1985                                                         MOPT_SET | MOPT_Q},
1986         {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
1987                                                         MOPT_SET | MOPT_Q},
1988         {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1989                        EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
1990                                                         MOPT_CLEAR | MOPT_Q},
1991         {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
1992         {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
1993         {Opt_offusrjquota, 0, MOPT_Q},
1994         {Opt_offgrpjquota, 0, MOPT_Q},
1995         {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
1996         {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
1997         {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1998         {Opt_max_dir_size_kb, 0, MOPT_GTE0},
1999         {Opt_test_dummy_encryption, 0, MOPT_STRING},
2000         {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
2001         {Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS,
2002          MOPT_SET},
2003         {Opt_mb_optimize_scan, EXT4_MOUNT2_MB_OPTIMIZE_SCAN, MOPT_GTE0},
2004 #ifdef CONFIG_EXT4_DEBUG
2005         {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
2006          MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
2007         {Opt_fc_debug_max_replay, 0, MOPT_GTE0},
2008 #endif
2009         {Opt_err, 0, 0}
2010 };
2011
2012 #ifdef CONFIG_UNICODE
2013 static const struct ext4_sb_encodings {
2014         __u16 magic;
2015         char *name;
2016         char *version;
2017 } ext4_sb_encoding_map[] = {
2018         {EXT4_ENC_UTF8_12_1, "utf8", "12.1.0"},
2019 };
2020
2021 static int ext4_sb_read_encoding(const struct ext4_super_block *es,
2022                                  const struct ext4_sb_encodings **encoding,
2023                                  __u16 *flags)
2024 {
2025         __u16 magic = le16_to_cpu(es->s_encoding);
2026         int i;
2027
2028         for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
2029                 if (magic == ext4_sb_encoding_map[i].magic)
2030                         break;
2031
2032         if (i >= ARRAY_SIZE(ext4_sb_encoding_map))
2033                 return -EINVAL;
2034
2035         *encoding = &ext4_sb_encoding_map[i];
2036         *flags = le16_to_cpu(es->s_encoding_flags);
2037
2038         return 0;
2039 }
2040 #endif
2041
2042 static int ext4_set_test_dummy_encryption(struct super_block *sb,
2043                                           const char *opt,
2044                                           const substring_t *arg,
2045                                           bool is_remount)
2046 {
2047 #ifdef CONFIG_FS_ENCRYPTION
2048         struct ext4_sb_info *sbi = EXT4_SB(sb);
2049         int err;
2050
2051         /*
2052          * This mount option is just for testing, and it's not worthwhile to
2053          * implement the extra complexity (e.g. RCU protection) that would be
2054          * needed to allow it to be set or changed during remount.  We do allow
2055          * it to be specified during remount, but only if there is no change.
2056          */
2057         if (is_remount && !sbi->s_dummy_enc_policy.policy) {
2058                 ext4_msg(sb, KERN_WARNING,
2059                          "Can't set test_dummy_encryption on remount");
2060                 return -1;
2061         }
2062         err = fscrypt_set_test_dummy_encryption(sb, arg->from,
2063                                                 &sbi->s_dummy_enc_policy);
2064         if (err) {
2065                 if (err == -EEXIST)
2066                         ext4_msg(sb, KERN_WARNING,
2067                                  "Can't change test_dummy_encryption on remount");
2068                 else if (err == -EINVAL)
2069                         ext4_msg(sb, KERN_WARNING,
2070                                  "Value of option \"%s\" is unrecognized", opt);
2071                 else
2072                         ext4_msg(sb, KERN_WARNING,
2073                                  "Error processing option \"%s\" [%d]",
2074                                  opt, err);
2075                 return -1;
2076         }
2077         ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
2078 #else
2079         ext4_msg(sb, KERN_WARNING,
2080                  "Test dummy encryption mount option ignored");
2081 #endif
2082         return 1;
2083 }
2084
2085 struct ext4_parsed_options {
2086         unsigned long journal_devnum;
2087         unsigned int journal_ioprio;
2088         int mb_optimize_scan;
2089 };
2090
2091 static int handle_mount_opt(struct super_block *sb, char *opt, int token,
2092                             substring_t *args, struct ext4_parsed_options *parsed_opts,
2093                             int is_remount)
2094 {
2095         struct ext4_sb_info *sbi = EXT4_SB(sb);
2096         const struct mount_opts *m;
2097         kuid_t uid;
2098         kgid_t gid;
2099         int arg = 0;
2100
2101 #ifdef CONFIG_QUOTA
2102         if (token == Opt_usrjquota)
2103                 return set_qf_name(sb, USRQUOTA, &args[0]);
2104         else if (token == Opt_grpjquota)
2105                 return set_qf_name(sb, GRPQUOTA, &args[0]);
2106         else if (token == Opt_offusrjquota)
2107                 return clear_qf_name(sb, USRQUOTA);
2108         else if (token == Opt_offgrpjquota)
2109                 return clear_qf_name(sb, GRPQUOTA);
2110 #endif
2111         switch (token) {
2112         case Opt_noacl:
2113         case Opt_nouser_xattr:
2114                 ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
2115                 break;
2116         case Opt_sb:
2117                 return 1;       /* handled by get_sb_block() */
2118         case Opt_removed:
2119                 ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
2120                 return 1;
2121         case Opt_abort:
2122                 ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
2123                 return 1;
2124         case Opt_i_version:
2125                 sb->s_flags |= SB_I_VERSION;
2126                 return 1;
2127         case Opt_lazytime:
2128                 sb->s_flags |= SB_LAZYTIME;
2129                 return 1;
2130         case Opt_nolazytime:
2131                 sb->s_flags &= ~SB_LAZYTIME;
2132                 return 1;
2133         case Opt_inlinecrypt:
2134 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
2135                 sb->s_flags |= SB_INLINECRYPT;
2136 #else
2137                 ext4_msg(sb, KERN_ERR, "inline encryption not supported");
2138 #endif
2139                 return 1;
2140         }
2141
2142         for (m = ext4_mount_opts; m->token != Opt_err; m++)
2143                 if (token == m->token)
2144                         break;
2145
2146         if (m->token == Opt_err) {
2147                 ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
2148                          "or missing value", opt);
2149                 return -1;
2150         }
2151
2152         if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
2153                 ext4_msg(sb, KERN_ERR,
2154                          "Mount option \"%s\" incompatible with ext2", opt);
2155                 return -1;
2156         }
2157         if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
2158                 ext4_msg(sb, KERN_ERR,
2159                          "Mount option \"%s\" incompatible with ext3", opt);
2160                 return -1;
2161         }
2162
2163         if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
2164                 return -1;
2165         if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
2166                 return -1;
2167         if (m->flags & MOPT_EXPLICIT) {
2168                 if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
2169                         set_opt2(sb, EXPLICIT_DELALLOC);
2170                 } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
2171                         set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
2172                 } else
2173                         return -1;
2174         }
2175         if (m->flags & MOPT_CLEAR_ERR)
2176                 clear_opt(sb, ERRORS_MASK);
2177         if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
2178                 ext4_msg(sb, KERN_ERR, "Cannot change quota "
2179                          "options when quota turned on");
2180                 return -1;
2181         }
2182
2183         if (m->flags & MOPT_NOSUPPORT) {
2184                 ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
2185         } else if (token == Opt_commit) {
2186                 if (arg == 0)
2187                         arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
2188                 else if (arg > INT_MAX / HZ) {
2189                         ext4_msg(sb, KERN_ERR,
2190                                  "Invalid commit interval %d, "
2191                                  "must be smaller than %d",
2192                                  arg, INT_MAX / HZ);
2193                         return -1;
2194                 }
2195                 sbi->s_commit_interval = HZ * arg;
2196         } else if (token == Opt_debug_want_extra_isize) {
2197                 if ((arg & 1) ||
2198                     (arg < 4) ||
2199                     (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
2200                         ext4_msg(sb, KERN_ERR,
2201                                  "Invalid want_extra_isize %d", arg);
2202                         return -1;
2203                 }
2204                 sbi->s_want_extra_isize = arg;
2205         } else if (token == Opt_max_batch_time) {
2206                 sbi->s_max_batch_time = arg;
2207         } else if (token == Opt_min_batch_time) {
2208                 sbi->s_min_batch_time = arg;
2209         } else if (token == Opt_inode_readahead_blks) {
2210                 if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
2211                         ext4_msg(sb, KERN_ERR,
2212                                  "EXT4-fs: inode_readahead_blks must be "
2213                                  "0 or a power of 2 smaller than 2^31");
2214                         return -1;
2215                 }
2216                 sbi->s_inode_readahead_blks = arg;
2217         } else if (token == Opt_init_itable) {
2218                 set_opt(sb, INIT_INODE_TABLE);
2219                 if (!args->from)
2220                         arg = EXT4_DEF_LI_WAIT_MULT;
2221                 sbi->s_li_wait_mult = arg;
2222         } else if (token == Opt_max_dir_size_kb) {
2223                 sbi->s_max_dir_size_kb = arg;
2224 #ifdef CONFIG_EXT4_DEBUG
2225         } else if (token == Opt_fc_debug_max_replay) {
2226                 sbi->s_fc_debug_max_replay = arg;
2227 #endif
2228         } else if (token == Opt_stripe) {
2229                 sbi->s_stripe = arg;
2230         } else if (token == Opt_resuid) {
2231                 uid = make_kuid(current_user_ns(), arg);
2232                 if (!uid_valid(uid)) {
2233                         ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
2234                         return -1;
2235                 }
2236                 sbi->s_resuid = uid;
2237         } else if (token == Opt_resgid) {
2238                 gid = make_kgid(current_user_ns(), arg);
2239                 if (!gid_valid(gid)) {
2240                         ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
2241                         return -1;
2242                 }
2243                 sbi->s_resgid = gid;
2244         } else if (token == Opt_journal_dev) {
2245                 if (is_remount) {
2246                         ext4_msg(sb, KERN_ERR,
2247                                  "Cannot specify journal on remount");
2248                         return -1;
2249                 }
2250                 parsed_opts->journal_devnum = arg;
2251         } else if (token == Opt_journal_path) {
2252                 char *journal_path;
2253                 struct inode *journal_inode;
2254                 struct path path;
2255                 int error;
2256
2257                 if (is_remount) {
2258                         ext4_msg(sb, KERN_ERR,
2259                                  "Cannot specify journal on remount");
2260                         return -1;
2261                 }
2262                 journal_path = match_strdup(&args[0]);
2263                 if (!journal_path) {
2264                         ext4_msg(sb, KERN_ERR, "error: could not dup "
2265                                 "journal device string");
2266                         return -1;
2267                 }
2268
2269                 error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
2270                 if (error) {
2271                         ext4_msg(sb, KERN_ERR, "error: could not find "
2272                                 "journal device path: error %d", error);
2273                         kfree(journal_path);
2274                         return -1;
2275                 }
2276
2277                 journal_inode = d_inode(path.dentry);
2278                 if (!S_ISBLK(journal_inode->i_mode)) {
2279                         ext4_msg(sb, KERN_ERR, "error: journal path %s "
2280                                 "is not a block device", journal_path);
2281                         path_put(&path);
2282                         kfree(journal_path);
2283                         return -1;
2284                 }
2285
2286                 parsed_opts->journal_devnum = new_encode_dev(journal_inode->i_rdev);
2287                 path_put(&path);
2288                 kfree(journal_path);
2289         } else if (token == Opt_journal_ioprio) {
2290                 if (arg > 7) {
2291                         ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
2292                                  " (must be 0-7)");
2293                         return -1;
2294                 }
2295                 parsed_opts->journal_ioprio =
2296                         IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
2297         } else if (token == Opt_test_dummy_encryption) {
2298                 return ext4_set_test_dummy_encryption(sb, opt, &args[0],
2299                                                       is_remount);
2300         } else if (m->flags & MOPT_DATAJ) {
2301                 if (is_remount) {
2302                         if (!sbi->s_journal)
2303                                 ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
2304                         else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
2305                                 ext4_msg(sb, KERN_ERR,
2306                                          "Cannot change data mode on remount");
2307                                 return -1;
2308                         }
2309                 } else {
2310                         clear_opt(sb, DATA_FLAGS);
2311                         sbi->s_mount_opt |= m->mount_opt;
2312                 }
2313 #ifdef CONFIG_QUOTA
2314         } else if (m->flags & MOPT_QFMT) {
2315                 if (sb_any_quota_loaded(sb) &&
2316                     sbi->s_jquota_fmt != m->mount_opt) {
2317                         ext4_msg(sb, KERN_ERR, "Cannot change journaled "
2318                                  "quota options when quota turned on");
2319                         return -1;
2320                 }
2321                 if (ext4_has_feature_quota(sb)) {
2322                         ext4_msg(sb, KERN_INFO,
2323                                  "Quota format mount options ignored "
2324                                  "when QUOTA feature is enabled");
2325                         return 1;
2326                 }
2327                 sbi->s_jquota_fmt = m->mount_opt;
2328 #endif
2329         } else if (token == Opt_dax || token == Opt_dax_always ||
2330                    token == Opt_dax_inode || token == Opt_dax_never) {
2331 #ifdef CONFIG_FS_DAX
2332                 switch (token) {
2333                 case Opt_dax:
2334                 case Opt_dax_always:
2335                         if (is_remount &&
2336                             (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2337                              (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
2338                         fail_dax_change_remount:
2339                                 ext4_msg(sb, KERN_ERR, "can't change "
2340                                          "dax mount option while remounting");
2341                                 return -1;
2342                         }
2343                         if (is_remount &&
2344                             (test_opt(sb, DATA_FLAGS) ==
2345                              EXT4_MOUNT_JOURNAL_DATA)) {
2346                                     ext4_msg(sb, KERN_ERR, "can't mount with "
2347                                              "both data=journal and dax");
2348                                     return -1;
2349                         }
2350                         ext4_msg(sb, KERN_WARNING,
2351                                 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
2352                         sbi->s_mount_opt |= EXT4_MOUNT_DAX_ALWAYS;
2353                         sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
2354                         break;
2355                 case Opt_dax_never:
2356                         if (is_remount &&
2357                             (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2358                              (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS)))
2359                                 goto fail_dax_change_remount;
2360                         sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
2361                         sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
2362                         break;
2363                 case Opt_dax_inode:
2364                         if (is_remount &&
2365                             ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2366                              (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2367                              !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE)))
2368                                 goto fail_dax_change_remount;
2369                         sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
2370                         sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
2371                         /* Strictly for printing options */
2372                         sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_INODE;
2373                         break;
2374                 }
2375 #else
2376                 ext4_msg(sb, KERN_INFO, "dax option not supported");
2377                 sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
2378                 sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
2379                 return -1;
2380 #endif
2381         } else if (token == Opt_data_err_abort) {
2382                 sbi->s_mount_opt |= m->mount_opt;
2383         } else if (token == Opt_data_err_ignore) {
2384                 sbi->s_mount_opt &= ~m->mount_opt;
2385         } else if (token == Opt_mb_optimize_scan) {
2386                 if (arg != 0 && arg != 1) {
2387                         ext4_msg(sb, KERN_WARNING,
2388                                  "mb_optimize_scan should be set to 0 or 1.");
2389                         return -1;
2390                 }
2391                 parsed_opts->mb_optimize_scan = arg;
2392         } else {
2393                 if (!args->from)
2394                         arg = 1;
2395                 if (m->flags & MOPT_CLEAR)
2396                         arg = !arg;
2397                 else if (unlikely(!(m->flags & MOPT_SET))) {
2398                         ext4_msg(sb, KERN_WARNING,
2399                                  "buggy handling of option %s", opt);
2400                         WARN_ON(1);
2401                         return -1;
2402                 }
2403                 if (m->flags & MOPT_2) {
2404                         if (arg != 0)
2405                                 sbi->s_mount_opt2 |= m->mount_opt;
2406                         else
2407                                 sbi->s_mount_opt2 &= ~m->mount_opt;
2408                 } else {
2409                         if (arg != 0)
2410                                 sbi->s_mount_opt |= m->mount_opt;
2411                         else
2412                                 sbi->s_mount_opt &= ~m->mount_opt;
2413                 }
2414         }
2415         return 1;
2416 }
2417
2418 static int parse_options(char *options, struct super_block *sb,
2419                          struct ext4_parsed_options *ret_opts,
2420                          int is_remount)
2421 {
2422         struct ext4_sb_info __maybe_unused *sbi = EXT4_SB(sb);
2423         char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
2424         substring_t args[MAX_OPT_ARGS];
2425         int token;
2426
2427         if (!options)
2428                 return 1;
2429
2430         while ((p = strsep(&options, ",")) != NULL) {
2431                 if (!*p)
2432                         continue;
2433                 /*
2434                  * Initialize args struct so we know whether arg was
2435                  * found; some options take optional arguments.
2436                  */
2437                 args[0].to = args[0].from = NULL;
2438                 token = match_token(p, tokens, args);
2439                 if (handle_mount_opt(sb, p, token, args, ret_opts,
2440                                      is_remount) < 0)
2441                         return 0;
2442         }
2443 #ifdef CONFIG_QUOTA
2444         /*
2445          * We do the test below only for project quotas. 'usrquota' and
2446          * 'grpquota' mount options are allowed even without quota feature
2447          * to support legacy quotas in quota files.
2448          */
2449         if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
2450                 ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
2451                          "Cannot enable project quota enforcement.");
2452                 return 0;
2453         }
2454         usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
2455         grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
2456         if (usr_qf_name || grp_qf_name) {
2457                 if (test_opt(sb, USRQUOTA) && usr_qf_name)
2458                         clear_opt(sb, USRQUOTA);
2459
2460                 if (test_opt(sb, GRPQUOTA) && grp_qf_name)
2461                         clear_opt(sb, GRPQUOTA);
2462
2463                 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
2464                         ext4_msg(sb, KERN_ERR, "old and new quota "
2465                                         "format mixing");
2466                         return 0;
2467                 }
2468
2469                 if (!sbi->s_jquota_fmt) {
2470                         ext4_msg(sb, KERN_ERR, "journaled quota format "
2471                                         "not specified");
2472                         return 0;
2473                 }
2474         }
2475 #endif
2476         if (test_opt(sb, DIOREAD_NOLOCK)) {
2477                 int blocksize =
2478                         BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
2479                 if (blocksize < PAGE_SIZE)
2480                         ext4_msg(sb, KERN_WARNING, "Warning: mounting with an "
2481                                  "experimental mount option 'dioread_nolock' "
2482                                  "for blocksize < PAGE_SIZE");
2483         }
2484         return 1;
2485 }
2486
2487 static inline void ext4_show_quota_options(struct seq_file *seq,
2488                                            struct super_block *sb)
2489 {
2490 #if defined(CONFIG_QUOTA)
2491         struct ext4_sb_info *sbi = EXT4_SB(sb);
2492         char *usr_qf_name, *grp_qf_name;
2493
2494         if (sbi->s_jquota_fmt) {
2495                 char *fmtname = "";
2496
2497                 switch (sbi->s_jquota_fmt) {
2498                 case QFMT_VFS_OLD:
2499                         fmtname = "vfsold";
2500                         break;
2501                 case QFMT_VFS_V0:
2502                         fmtname = "vfsv0";
2503                         break;
2504                 case QFMT_VFS_V1:
2505                         fmtname = "vfsv1";
2506                         break;
2507                 }
2508                 seq_printf(seq, ",jqfmt=%s", fmtname);
2509         }
2510
2511         rcu_read_lock();
2512         usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
2513         grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
2514         if (usr_qf_name)
2515                 seq_show_option(seq, "usrjquota", usr_qf_name);
2516         if (grp_qf_name)
2517                 seq_show_option(seq, "grpjquota", grp_qf_name);
2518         rcu_read_unlock();
2519 #endif
2520 }
2521
2522 static const char *token2str(int token)
2523 {
2524         const struct match_token *t;
2525
2526         for (t = tokens; t->token != Opt_err; t++)
2527                 if (t->token == token && !strchr(t->pattern, '='))
2528                         break;
2529         return t->pattern;
2530 }
2531
2532 /*
2533  * Show an option if
2534  *  - it's set to a non-default value OR
2535  *  - if the per-sb default is different from the global default
2536  */
2537 static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2538                               int nodefs)
2539 {
2540         struct ext4_sb_info *sbi = EXT4_SB(sb);
2541         struct ext4_super_block *es = sbi->s_es;
2542         int def_errors, def_mount_opt = sbi->s_def_mount_opt;
2543         const struct mount_opts *m;
2544         char sep = nodefs ? '\n' : ',';
2545
2546 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
2547 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2548
2549         if (sbi->s_sb_block != 1)
2550                 SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
2551
2552         for (m = ext4_mount_opts; m->token != Opt_err; m++) {
2553                 int want_set = m->flags & MOPT_SET;
2554                 if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2555                     (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP)
2556                         continue;
2557                 if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2558                         continue; /* skip if same as the default */
2559                 if ((want_set &&
2560                      (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
2561                     (!want_set && (sbi->s_mount_opt & m->mount_opt)))
2562                         continue; /* select Opt_noFoo vs Opt_Foo */
2563                 SEQ_OPTS_PRINT("%s", token2str(m->token));
2564         }
2565
2566         if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2567             le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2568                 SEQ_OPTS_PRINT("resuid=%u",
2569                                 from_kuid_munged(&init_user_ns, sbi->s_resuid));
2570         if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2571             le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2572                 SEQ_OPTS_PRINT("resgid=%u",
2573                                 from_kgid_munged(&init_user_ns, sbi->s_resgid));
2574         def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2575         if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
2576                 SEQ_OPTS_PUTS("errors=remount-ro");
2577         if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2578                 SEQ_OPTS_PUTS("errors=continue");
2579         if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2580                 SEQ_OPTS_PUTS("errors=panic");
2581         if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2582                 SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2583         if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2584                 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2585         if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2586                 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2587         if (sb->s_flags & SB_I_VERSION)
2588                 SEQ_OPTS_PUTS("i_version");
2589         if (nodefs || sbi->s_stripe)
2590                 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2591         if (nodefs || EXT4_MOUNT_DATA_FLAGS &
2592                         (sbi->s_mount_opt ^ def_mount_opt)) {
2593                 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
2594                         SEQ_OPTS_PUTS("data=journal");
2595                 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
2596                         SEQ_OPTS_PUTS("data=ordered");
2597                 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
2598                         SEQ_OPTS_PUTS("data=writeback");
2599         }
2600         if (nodefs ||
2601             sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2602                 SEQ_OPTS_PRINT("inode_readahead_blks=%u",
2603                                sbi->s_inode_readahead_blks);
2604
2605         if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
2606                        (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2607                 SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2608         if (nodefs || sbi->s_max_dir_size_kb)
2609                 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2610         if (test_opt(sb, DATA_ERR_ABORT))
2611                 SEQ_OPTS_PUTS("data_err=abort");
2612
2613         fscrypt_show_test_dummy_encryption(seq, sep, sb);
2614
2615         if (sb->s_flags & SB_INLINECRYPT)
2616                 SEQ_OPTS_PUTS("inlinecrypt");
2617
2618         if (test_opt(sb, DAX_ALWAYS)) {
2619                 if (IS_EXT2_SB(sb))
2620                         SEQ_OPTS_PUTS("dax");
2621                 else
2622                         SEQ_OPTS_PUTS("dax=always");
2623         } else if (test_opt2(sb, DAX_NEVER)) {
2624                 SEQ_OPTS_PUTS("dax=never");
2625         } else if (test_opt2(sb, DAX_INODE)) {
2626                 SEQ_OPTS_PUTS("dax=inode");
2627         }
2628         ext4_show_quota_options(seq, sb);
2629         return 0;
2630 }
2631
2632 static int ext4_show_options(struct seq_file *seq, struct dentry *root)
2633 {
2634         return _ext4_show_options(seq, root->d_sb, 0);
2635 }
2636
2637 int ext4_seq_options_show(struct seq_file *seq, void *offset)
2638 {
2639         struct super_block *sb = seq->private;
2640         int rc;
2641
2642         seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
2643         rc = _ext4_show_options(seq, sb, 1);
2644         seq_puts(seq, "\n");
2645         return rc;
2646 }
2647
2648 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2649                             int read_only)
2650 {
2651         struct ext4_sb_info *sbi = EXT4_SB(sb);
2652         int err = 0;
2653
2654         if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2655                 ext4_msg(sb, KERN_ERR, "revision level too high, "
2656                          "forcing read-only mode");
2657                 err = -EROFS;
2658                 goto done;
2659         }
2660         if (read_only)
2661                 goto done;
2662         if (!(sbi->s_mount_state & EXT4_VALID_FS))
2663                 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
2664                          "running e2fsck is recommended");
2665         else if (sbi->s_mount_state & EXT4_ERROR_FS)
2666                 ext4_msg(sb, KERN_WARNING,
2667                          "warning: mounting fs with errors, "
2668                          "running e2fsck is recommended");
2669         else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2670                  le16_to_cpu(es->s_mnt_count) >=
2671                  (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2672                 ext4_msg(sb, KERN_WARNING,
2673                          "warning: maximal mount count reached, "
2674                          "running e2fsck is recommended");
2675         else if (le32_to_cpu(es->s_checkinterval) &&
2676                  (ext4_get_tstamp(es, s_lastcheck) +
2677                   le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
2678                 ext4_msg(sb, KERN_WARNING,
2679                          "warning: checktime reached, "
2680                          "running e2fsck is recommended");
2681         if (!sbi->s_journal)
2682                 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2683         if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2684                 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
2685         le16_add_cpu(&es->s_mnt_count, 1);
2686         ext4_update_tstamp(es, s_mtime);
2687         if (sbi->s_journal)
2688                 ext4_set_feature_journal_needs_recovery(sb);
2689
2690         err = ext4_commit_super(sb);
2691 done:
2692         if (test_opt(sb, DEBUG))
2693                 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2694                                 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2695                         sb->s_blocksize,
2696                         sbi->s_groups_count,
2697                         EXT4_BLOCKS_PER_GROUP(sb),
2698                         EXT4_INODES_PER_GROUP(sb),
2699                         sbi->s_mount_opt, sbi->s_mount_opt2);
2700
2701         cleancache_init_fs(sb);
2702         return err;
2703 }
2704
2705 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
2706 {
2707         struct ext4_sb_info *sbi = EXT4_SB(sb);
2708         struct flex_groups **old_groups, **new_groups;
2709         int size, i, j;
2710
2711         if (!sbi->s_log_groups_per_flex)
2712                 return 0;
2713
2714         size = ext4_flex_group(sbi, ngroup - 1) + 1;
2715         if (size <= sbi->s_flex_groups_allocated)
2716                 return 0;
2717
2718         new_groups = kvzalloc(roundup_pow_of_two(size *
2719                               sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
2720         if (!new_groups) {
2721                 ext4_msg(sb, KERN_ERR,
2722                          "not enough memory for %d flex group pointers", size);
2723                 return -ENOMEM;
2724         }
2725         for (i = sbi->s_flex_groups_allocated; i < size; i++) {
2726                 new_groups[i] = kvzalloc(roundup_pow_of_two(
2727                                          sizeof(struct flex_groups)),
2728                                          GFP_KERNEL);
2729                 if (!new_groups[i]) {
2730                         for (j = sbi->s_flex_groups_allocated; j < i; j++)
2731                                 kvfree(new_groups[j]);
2732                         kvfree(new_groups);
2733                         ext4_msg(sb, KERN_ERR,
2734                                  "not enough memory for %d flex groups", size);
2735                         return -ENOMEM;
2736                 }
2737         }
2738         rcu_read_lock();
2739         old_groups = rcu_dereference(sbi->s_flex_groups);
2740         if (old_groups)
2741                 memcpy(new_groups, old_groups,
2742                        (sbi->s_flex_groups_allocated *
2743                         sizeof(struct flex_groups *)));
2744         rcu_read_unlock();
2745         rcu_assign_pointer(sbi->s_flex_groups, new_groups);
2746         sbi->s_flex_groups_allocated = size;
2747         if (old_groups)
2748                 ext4_kvfree_array_rcu(old_groups);
2749         return 0;
2750 }
2751
2752 static int ext4_fill_flex_info(struct super_block *sb)
2753 {
2754         struct ext4_sb_info *sbi = EXT4_SB(sb);
2755         struct ext4_group_desc *gdp = NULL;
2756         struct flex_groups *fg;
2757         ext4_group_t flex_group;
2758         int i, err;
2759
2760         sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2761         if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2762                 sbi->s_log_groups_per_flex = 0;
2763                 return 1;
2764         }
2765
2766         err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
2767         if (err)
2768                 goto failed;
2769
2770         for (i = 0; i < sbi->s_groups_count; i++) {
2771                 gdp = ext4_get_group_desc(sb, i, NULL);
2772
2773                 flex_group = ext4_flex_group(sbi, i);
2774                 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
2775                 atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
2776                 atomic64_add(ext4_free_group_clusters(sb, gdp),
2777                              &fg->free_clusters);
2778                 atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
2779         }
2780
2781         return 1;
2782 failed:
2783         return 0;
2784 }
2785
2786 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2787                                    struct ext4_group_desc *gdp)
2788 {
2789         int offset = offsetof(struct ext4_group_desc, bg_checksum);
2790         __u16 crc = 0;
2791         __le32 le_group = cpu_to_le32(block_group);
2792         struct ext4_sb_info *sbi = EXT4_SB(sb);
2793
2794         if (ext4_has_metadata_csum(sbi->s_sb)) {
2795                 /* Use new metadata_csum algorithm */
2796                 __u32 csum32;
2797                 __u16 dummy_csum = 0;
2798
2799                 csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
2800                                      sizeof(le_group));
2801                 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
2802                 csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
2803                                      sizeof(dummy_csum));
2804                 offset += sizeof(dummy_csum);
2805                 if (offset < sbi->s_desc_size)
2806                         csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
2807                                              sbi->s_desc_size - offset);
2808
2809                 crc = csum32 & 0xFFFF;
2810                 goto out;
2811         }
2812
2813         /* old crc16 code */
2814         if (!ext4_has_feature_gdt_csum(sb))
2815                 return 0;
2816
2817         crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
2818         crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
2819         crc = crc16(crc, (__u8 *)gdp, offset);
2820         offset += sizeof(gdp->bg_checksum); /* skip checksum */
2821         /* for checksum of struct ext4_group_desc do the rest...*/
2822         if (ext4_has_feature_64bit(sb) &&
2823             offset < le16_to_cpu(sbi->s_es->s_desc_size))
2824                 crc = crc16(crc, (__u8 *)gdp + offset,
2825                             le16_to_cpu(sbi->s_es->s_desc_size) -
2826                                 offset);
2827
2828 out:
2829         return cpu_to_le16(crc);
2830 }
2831
2832 int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2833                                 struct ext4_group_desc *gdp)
2834 {
2835         if (ext4_has_group_desc_csum(sb) &&
2836             (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2837                 return 0;
2838
2839         return 1;
2840 }
2841
2842 void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
2843                               struct ext4_group_desc *gdp)
2844 {
2845         if (!ext4_has_group_desc_csum(sb))
2846                 return;
2847         gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2848 }
2849
2850 /* Called at mount-time, super-block is locked */
2851 static int ext4_check_descriptors(struct super_block *sb,
2852                                   ext4_fsblk_t sb_block,
2853                                   ext4_group_t *first_not_zeroed)
2854 {
2855         struct ext4_sb_info *sbi = EXT4_SB(sb);
2856         ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
2857         ext4_fsblk_t last_block;
2858         ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
2859         ext4_fsblk_t block_bitmap;
2860         ext4_fsblk_t inode_bitmap;
2861         ext4_fsblk_t inode_table;
2862         int flexbg_flag = 0;
2863         ext4_group_t i, grp = sbi->s_groups_count;
2864
2865         if (ext4_has_feature_flex_bg(sb))
2866                 flexbg_flag = 1;
2867
2868         ext4_debug("Checking group descriptors");
2869
2870         for (i = 0; i < sbi->s_groups_count; i++) {
2871                 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
2872
2873                 if (i == sbi->s_groups_count - 1 || flexbg_flag)
2874                         last_block = ext4_blocks_count(sbi->s_es) - 1;
2875                 else
2876                         last_block = first_block +
2877                                 (EXT4_BLOCKS_PER_GROUP(sb) - 1);
2878
2879                 if ((grp == sbi->s_groups_count) &&
2880                    !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
2881                         grp = i;
2882
2883                 block_bitmap = ext4_block_bitmap(sb, gdp);
2884                 if (block_bitmap == sb_block) {
2885                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2886                                  "Block bitmap for group %u overlaps "
2887                                  "superblock", i);
2888                         if (!sb_rdonly(sb))
2889                                 return 0;
2890                 }
2891                 if (block_bitmap >= sb_block + 1 &&
2892                     block_bitmap <= last_bg_block) {
2893                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2894                                  "Block bitmap for group %u overlaps "
2895                                  "block group descriptors", i);
2896                         if (!sb_rdonly(sb))
2897                                 return 0;
2898                 }
2899                 if (block_bitmap < first_block || block_bitmap > last_block) {
2900                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2901                                "Block bitmap for group %u not in group "
2902                                "(block %llu)!", i, block_bitmap);
2903                         return 0;
2904                 }
2905                 inode_bitmap = ext4_inode_bitmap(sb, gdp);
2906                 if (inode_bitmap == sb_block) {
2907                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2908                                  "Inode bitmap for group %u overlaps "
2909                                  "superblock", i);
2910                         if (!sb_rdonly(sb))
2911                                 return 0;
2912                 }
2913                 if (inode_bitmap >= sb_block + 1 &&
2914                     inode_bitmap <= last_bg_block) {
2915                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2916                                  "Inode bitmap for group %u overlaps "
2917                                  "block group descriptors", i);
2918                         if (!sb_rdonly(sb))
2919                                 return 0;
2920                 }
2921                 if (inode_bitmap < first_block || inode_bitmap > last_block) {
2922                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2923                                "Inode bitmap for group %u not in group "
2924                                "(block %llu)!", i, inode_bitmap);
2925                         return 0;
2926                 }
2927                 inode_table = ext4_inode_table(sb, gdp);
2928                 if (inode_table == sb_block) {
2929                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2930                                  "Inode table for group %u overlaps "
2931                                  "superblock", i);
2932                         if (!sb_rdonly(sb))
2933                                 return 0;
2934                 }
2935                 if (inode_table >= sb_block + 1 &&
2936                     inode_table <= last_bg_block) {
2937                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2938                                  "Inode table for group %u overlaps "
2939                                  "block group descriptors", i);
2940                         if (!sb_rdonly(sb))
2941                                 return 0;
2942                 }
2943                 if (inode_table < first_block ||
2944                     inode_table + sbi->s_itb_per_group - 1 > last_block) {
2945                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2946                                "Inode table for group %u not in group "
2947                                "(block %llu)!", i, inode_table);
2948                         return 0;
2949                 }
2950                 ext4_lock_group(sb, i);
2951                 if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2952                         ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2953                                  "Checksum for group %u failed (%u!=%u)",
2954                                  i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2955                                      gdp)), le16_to_cpu(gdp->bg_checksum));
2956                         if (!sb_rdonly(sb)) {
2957                                 ext4_unlock_group(sb, i);
2958                                 return 0;
2959                         }
2960                 }
2961                 ext4_unlock_group(sb, i);
2962                 if (!flexbg_flag)
2963                         first_block += EXT4_BLOCKS_PER_GROUP(sb);
2964         }
2965         if (NULL != first_not_zeroed)
2966                 *first_not_zeroed = grp;
2967         return 1;
2968 }
2969
2970 /*
2971  * Maximal extent format file size.
2972  * Resulting logical blkno at s_maxbytes must fit in our on-disk
2973  * extent format containers, within a sector_t, and within i_blocks
2974  * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
2975  * so that won't be a limiting factor.
2976  *
2977  * However there is other limiting factor. We do store extents in the form
2978  * of starting block and length, hence the resulting length of the extent
2979  * covering maximum file size must fit into on-disk format containers as
2980  * well. Given that length is always by 1 unit bigger than max unit (because
2981  * we count 0 as well) we have to lower the s_maxbytes by one fs block.
2982  *
2983  * Note, this does *not* consider any metadata overhead for vfs i_blocks.
2984  */
2985 static loff_t ext4_max_size(int blkbits, int has_huge_files)
2986 {
2987         loff_t res;
2988         loff_t upper_limit = MAX_LFS_FILESIZE;
2989
2990         BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));
2991
2992         if (!has_huge_files) {
2993                 upper_limit = (1LL << 32) - 1;
2994
2995                 /* total blocks in file system block size */
2996                 upper_limit >>= (blkbits - 9);
2997                 upper_limit <<= blkbits;
2998         }
2999
3000         /*
3001          * 32-bit extent-start container, ee_block. We lower the maxbytes
3002          * by one fs block, so ee_len can cover the extent of maximum file
3003          * size
3004          */
3005         res = (1LL << 32) - 1;
3006         res <<= blkbits;
3007
3008         /* Sanity check against vm- & vfs- imposed limits */
3009         if (res > upper_limit)
3010                 res = upper_limit;
3011
3012         return res;
3013 }
3014
3015 /*
3016  * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
3017  * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
3018  * We need to be 1 filesystem block less than the 2^48 sector limit.
3019  */
3020 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
3021 {
3022         loff_t res = EXT4_NDIR_BLOCKS;
3023         int meta_blocks;
3024         loff_t upper_limit;
3025         /* This is calculated to be the largest file size for a dense, block
3026          * mapped file such that the file's total number of 512-byte sectors,
3027          * including data and all indirect blocks, does not exceed (2^48 - 1).
3028          *
3029          * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
3030          * number of 512-byte sectors of the file.
3031          */
3032
3033         if (!has_huge_files) {
3034                 /*
3035                  * !has_huge_files or implies that the inode i_block field
3036                  * represents total file blocks in 2^32 512-byte sectors ==
3037                  * size of vfs inode i_blocks * 8
3038                  */
3039                 upper_limit = (1LL << 32) - 1;
3040
3041                 /* total blocks in file system block size */
3042                 upper_limit >>= (bits - 9);
3043
3044         } else {
3045                 /*
3046                  * We use 48 bit ext4_inode i_blocks
3047                  * With EXT4_HUGE_FILE_FL set the i_blocks
3048                  * represent total number of blocks in
3049                  * file system block size
3050                  */
3051                 upper_limit = (1LL << 48) - 1;
3052
3053         }
3054
3055         /* indirect blocks */
3056         meta_blocks = 1;
3057         /* double indirect blocks */
3058         meta_blocks += 1 + (1LL << (bits-2));
3059         /* tripple indirect blocks */
3060         meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
3061
3062         upper_limit -= meta_blocks;
3063         upper_limit <<= bits;
3064
3065         res += 1LL << (bits-2);
3066         res += 1LL << (2*(bits-2));
3067         res += 1LL << (3*(bits-2));
3068         res <<= bits;
3069         if (res > upper_limit)
3070                 res = upper_limit;
3071
3072         if (res > MAX_LFS_FILESIZE)
3073                 res = MAX_LFS_FILESIZE;
3074
3075         return res;
3076 }
3077
3078 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
3079                                    ext4_fsblk_t logical_sb_block, int nr)
3080 {
3081         struct ext4_sb_info *sbi = EXT4_SB(sb);
3082         ext4_group_t bg, first_meta_bg;
3083         int has_super = 0;
3084
3085         first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
3086
3087         if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
3088                 return logical_sb_block + nr + 1;
3089         bg = sbi->s_desc_per_block * nr;
3090         if (ext4_bg_has_super(sb, bg))
3091                 has_super = 1;
3092
3093         /*
3094          * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
3095          * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
3096          * on modern mke2fs or blksize > 1k on older mke2fs) then we must
3097          * compensate.
3098          */
3099         if (sb->s_blocksize == 1024 && nr == 0 &&
3100             le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
3101                 has_super++;
3102
3103         return (has_super + ext4_group_first_block_no(sb, bg));
3104 }
3105
3106 /**
3107  * ext4_get_stripe_size: Get the stripe size.
3108  * @sbi: In memory super block info
3109  *
3110  * If we have specified it via mount option, then
3111  * use the mount option value. If the value specified at mount time is
3112  * greater than the blocks per group use the super block value.
3113  * If the super block value is greater than blocks per group return 0.
3114  * Allocator needs it be less than blocks per group.
3115  *
3116  */
3117 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
3118 {
3119         unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
3120         unsigned long stripe_width =
3121                         le32_to_cpu(sbi->s_es->s_raid_stripe_width);
3122         int ret;
3123
3124         if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
3125                 ret = sbi->s_stripe;
3126         else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
3127                 ret = stripe_width;
3128         else if (stride && stride <= sbi->s_blocks_per_group)
3129                 ret = stride;
3130         else
3131                 ret = 0;
3132
3133         /*
3134          * If the stripe width is 1, this makes no sense and
3135          * we set it to 0 to turn off stripe handling code.
3136          */
3137         if (ret <= 1)
3138                 ret = 0;
3139
3140         return ret;
3141 }
3142
3143 /*
3144  * Check whether this filesystem can be mounted based on
3145  * the features present and the RDONLY/RDWR mount requested.
3146  * Returns 1 if this filesystem can be mounted as requested,
3147  * 0 if it cannot be.
3148  */
3149 int ext4_feature_set_ok(struct super_block *sb, int readonly)
3150 {
3151         if (ext4_has_unknown_ext4_incompat_features(sb)) {
3152                 ext4_msg(sb, KERN_ERR,
3153                         "Couldn't mount because of "
3154                         "unsupported optional features (%x)",
3155                         (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
3156                         ~EXT4_FEATURE_INCOMPAT_SUPP));
3157                 return 0;
3158         }
3159
3160 #ifndef CONFIG_UNICODE
3161         if (ext4_has_feature_casefold(sb)) {
3162                 ext4_msg(sb, KERN_ERR,
3163                          "Filesystem with casefold feature cannot be "
3164                          "mounted without CONFIG_UNICODE");
3165                 return 0;
3166         }
3167 #endif
3168
3169         if (readonly)
3170                 return 1;
3171
3172         if (ext4_has_feature_readonly(sb)) {
3173                 ext4_msg(sb, KERN_INFO, "filesystem is read-only");
3174                 sb->s_flags |= SB_RDONLY;
3175                 return 1;
3176         }
3177
3178         /* Check that feature set is OK for a read-write mount */
3179         if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
3180                 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
3181                          "unsupported optional features (%x)",
3182                          (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
3183                                 ~EXT4_FEATURE_RO_COMPAT_SUPP));
3184                 return 0;
3185         }
3186         if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
3187                 ext4_msg(sb, KERN_ERR,
3188                          "Can't support bigalloc feature without "
3189                          "extents feature\n");
3190                 return 0;
3191         }
3192
3193 #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3194         if (!readonly && (ext4_has_feature_quota(sb) ||
3195                           ext4_has_feature_project(sb))) {
3196                 ext4_msg(sb, KERN_ERR,
3197                          "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
3198                 return 0;
3199         }
3200 #endif  /* CONFIG_QUOTA */
3201         return 1;
3202 }
3203
3204 /*
3205  * This function is called once a day if we have errors logged
3206  * on the file system
3207  */
3208 static void print_daily_error_info(struct timer_list *t)
3209 {
3210         struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
3211         struct super_block *sb = sbi->s_sb;
3212         struct ext4_super_block *es = sbi->s_es;
3213
3214         if (es->s_error_count)
3215                 /* fsck newer than v1.41.13 is needed to clean this condition. */
3216                 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
3217                          le32_to_cpu(es->s_error_count));
3218         if (es->s_first_error_time) {
3219                 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
3220                        sb->s_id,
3221                        ext4_get_tstamp(es, s_first_error_time),
3222                        (int) sizeof(es->s_first_error_func),
3223                        es->s_first_error_func,
3224                        le32_to_cpu(es->s_first_error_line));
3225                 if (es->s_first_error_ino)
3226                         printk(KERN_CONT ": inode %u",
3227                                le32_to_cpu(es->s_first_error_ino));
3228                 if (es->s_first_error_block)
3229                         printk(KERN_CONT ": block %llu", (unsigned long long)
3230                                le64_to_cpu(es->s_first_error_block));
3231                 printk(KERN_CONT "\n");
3232         }
3233         if (es->s_last_error_time) {
3234                 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
3235                        sb->s_id,
3236                        ext4_get_tstamp(es, s_last_error_time),
3237                        (int) sizeof(es->s_last_error_func),
3238                        es->s_last_error_func,
3239                        le32_to_cpu(es->s_last_error_line));
3240                 if (es->s_last_error_ino)
3241                         printk(KERN_CONT ": inode %u",
3242                                le32_to_cpu(es->s_last_error_ino));
3243                 if (es->s_last_error_block)
3244                         printk(KERN_CONT ": block %llu", (unsigned long long)
3245                                le64_to_cpu(es->s_last_error_block));
3246                 printk(KERN_CONT "\n");
3247         }
3248         mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
3249 }
3250
3251 /* Find next suitable group and run ext4_init_inode_table */
3252 static int ext4_run_li_request(struct ext4_li_request *elr)
3253 {
3254         struct ext4_group_desc *gdp = NULL;
3255         struct super_block *sb = elr->lr_super;
3256         ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
3257         ext4_group_t group = elr->lr_next_group;
3258         unsigned long timeout = 0;
3259         unsigned int prefetch_ios = 0;
3260         int ret = 0;
3261
3262         if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
3263                 elr->lr_next_group = ext4_mb_prefetch(sb, group,
3264                                 EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
3265                 if (prefetch_ios)
3266                         ext4_mb_prefetch_fini(sb, elr->lr_next_group,
3267                                               prefetch_ios);
3268                 trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
3269                                             prefetch_ios);
3270                 if (group >= elr->lr_next_group) {
3271                         ret = 1;
3272                         if (elr->lr_first_not_zeroed != ngroups &&
3273                             !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
3274                                 elr->lr_next_group = elr->lr_first_not_zeroed;
3275                                 elr->lr_mode = EXT4_LI_MODE_ITABLE;
3276                                 ret = 0;
3277                         }
3278                 }
3279                 return ret;
3280         }
3281
3282         for (; group < ngroups; group++) {
3283                 gdp = ext4_get_group_desc(sb, group, NULL);
3284                 if (!gdp) {
3285                         ret = 1;
3286                         break;
3287                 }
3288
3289                 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3290                         break;
3291         }
3292
3293         if (group >= ngroups)
3294                 ret = 1;
3295
3296         if (!ret) {
3297                 timeout = jiffies;
3298                 ret = ext4_init_inode_table(sb, group,
3299                                             elr->lr_timeout ? 0 : 1);
3300                 trace_ext4_lazy_itable_init(sb, group);
3301                 if (elr->lr_timeout == 0) {
3302                         timeout = (jiffies - timeout) *
3303                                 EXT4_SB(elr->lr_super)->s_li_wait_mult;
3304                         elr->lr_timeout = timeout;
3305                 }
3306                 elr->lr_next_sched = jiffies + elr->lr_timeout;
3307                 elr->lr_next_group = group + 1;
3308         }
3309         return ret;
3310 }
3311
3312 /*
3313  * Remove lr_request from the list_request and free the
3314  * request structure. Should be called with li_list_mtx held
3315  */
3316 static void ext4_remove_li_request(struct ext4_li_request *elr)
3317 {
3318         if (!elr)
3319                 return;
3320
3321         list_del(&elr->lr_request);
3322         EXT4_SB(elr->lr_super)->s_li_request = NULL;
3323         kfree(elr);
3324 }
3325
3326 static void ext4_unregister_li_request(struct super_block *sb)
3327 {
3328         mutex_lock(&ext4_li_mtx);
3329         if (!ext4_li_info) {
3330                 mutex_unlock(&ext4_li_mtx);
3331                 return;
3332         }
3333
3334         mutex_lock(&ext4_li_info->li_list_mtx);
3335         ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3336         mutex_unlock(&ext4_li_info->li_list_mtx);
3337         mutex_unlock(&ext4_li_mtx);
3338 }
3339
3340 static struct task_struct *ext4_lazyinit_task;
3341
3342 /*
3343  * This is the function where ext4lazyinit thread lives. It walks
3344  * through the request list searching for next scheduled filesystem.
3345  * When such a fs is found, run the lazy initialization request
3346  * (ext4_rn_li_request) and keep track of the time spend in this
3347  * function. Based on that time we compute next schedule time of
3348  * the request. When walking through the list is complete, compute
3349  * next waking time and put itself into sleep.
3350  */
3351 static int ext4_lazyinit_thread(void *arg)
3352 {
3353         struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
3354         struct list_head *pos, *n;
3355         struct ext4_li_request *elr;
3356         unsigned long next_wakeup, cur;
3357
3358         BUG_ON(NULL == eli);
3359
3360 cont_thread:
3361         while (true) {
3362                 next_wakeup = MAX_JIFFY_OFFSET;
3363
3364                 mutex_lock(&eli->li_list_mtx);
3365                 if (list_empty(&eli->li_request_list)) {
3366                         mutex_unlock(&eli->li_list_mtx);
3367                         goto exit_thread;
3368                 }
3369                 list_for_each_safe(pos, n, &eli->li_request_list) {
3370                         int err = 0;
3371                         int progress = 0;
3372                         elr = list_entry(pos, struct ext4_li_request,
3373                                          lr_request);
3374
3375                         if (time_before(jiffies, elr->lr_next_sched)) {
3376                                 if (time_before(elr->lr_next_sched, next_wakeup))
3377                                         next_wakeup = elr->lr_next_sched;
3378                                 continue;
3379                         }
3380                         if (down_read_trylock(&elr->lr_super->s_umount)) {
3381                                 if (sb_start_write_trylock(elr->lr_super)) {
3382                                         progress = 1;
3383                                         /*
3384                                          * We hold sb->s_umount, sb can not
3385                                          * be removed from the list, it is
3386                                          * now safe to drop li_list_mtx
3387                                          */
3388                                         mutex_unlock(&eli->li_list_mtx);
3389                                         err = ext4_run_li_request(elr);
3390                                         sb_end_write(elr->lr_super);
3391                                         mutex_lock(&eli->li_list_mtx);
3392                                         n = pos->next;
3393                                 }
3394                                 up_read((&elr->lr_super->s_umount));
3395                         }
3396                         /* error, remove the lazy_init job */
3397                         if (err) {
3398                                 ext4_remove_li_request(elr);
3399                                 continue;
3400                         }
3401                         if (!progress) {
3402                                 elr->lr_next_sched = jiffies +
3403                                         (prandom_u32()
3404                                          % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3405                         }
3406                         if (time_before(elr->lr_next_sched, next_wakeup))
3407                                 next_wakeup = elr->lr_next_sched;
3408                 }
3409                 mutex_unlock(&eli->li_list_mtx);
3410
3411                 try_to_freeze();
3412
3413                 cur = jiffies;
3414                 if ((time_after_eq(cur, next_wakeup)) ||
3415                     (MAX_JIFFY_OFFSET == next_wakeup)) {
3416                         cond_resched();
3417                         continue;
3418                 }
3419
3420                 schedule_timeout_interruptible(next_wakeup - cur);
3421
3422                 if (kthread_should_stop()) {
3423                         ext4_clear_request_list();
3424                         goto exit_thread;
3425                 }
3426         }
3427
3428 exit_thread:
3429         /*
3430          * It looks like the request list is empty, but we need
3431          * to check it under the li_list_mtx lock, to prevent any
3432          * additions into it, and of course we should lock ext4_li_mtx
3433          * to atomically free the list and ext4_li_info, because at
3434          * this point another ext4 filesystem could be registering
3435          * new one.
3436          */
3437         mutex_lock(&ext4_li_mtx);
3438         mutex_lock(&eli->li_list_mtx);
3439         if (!list_empty(&eli->li_request_list)) {
3440                 mutex_unlock(&eli->li_list_mtx);
3441                 mutex_unlock(&ext4_li_mtx);
3442                 goto cont_thread;
3443         }
3444         mutex_unlock(&eli->li_list_mtx);
3445         kfree(ext4_li_info);
3446         ext4_li_info = NULL;
3447         mutex_unlock(&ext4_li_mtx);
3448
3449         return 0;
3450 }
3451
3452 static void ext4_clear_request_list(void)
3453 {
3454         struct list_head *pos, *n;
3455         struct ext4_li_request *elr;
3456
3457         mutex_lock(&ext4_li_info->li_list_mtx);
3458         list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
3459                 elr = list_entry(pos, struct ext4_li_request,
3460                                  lr_request);
3461                 ext4_remove_li_request(elr);
3462         }
3463         mutex_unlock(&ext4_li_info->li_list_mtx);
3464 }
3465
3466 static int ext4_run_lazyinit_thread(void)
3467 {
3468         ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
3469                                          ext4_li_info, "ext4lazyinit");
3470         if (IS_ERR(ext4_lazyinit_task)) {
3471                 int err = PTR_ERR(ext4_lazyinit_task);
3472                 ext4_clear_request_list();
3473                 kfree(ext4_li_info);
3474                 ext4_li_info = NULL;
3475                 printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3476                                  "initialization thread\n",
3477                                  err);
3478                 return err;
3479         }
3480         ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
3481         return 0;
3482 }
3483
3484 /*
3485  * Check whether it make sense to run itable init. thread or not.
3486  * If there is at least one uninitialized inode table, return
3487  * corresponding group number, else the loop goes through all
3488  * groups and return total number of groups.
3489  */
3490 static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3491 {
3492         ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
3493         struct ext4_group_desc *gdp = NULL;
3494
3495         if (!ext4_has_group_desc_csum(sb))
3496                 return ngroups;
3497
3498         for (group = 0; group < ngroups; group++) {
3499                 gdp = ext4_get_group_desc(sb, group, NULL);
3500                 if (!gdp)
3501                         continue;
3502
3503                 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3504                         break;
3505         }
3506
3507         return group;
3508 }
3509
3510 static int ext4_li_info_new(void)
3511 {
3512         struct ext4_lazy_init *eli = NULL;
3513
3514         eli = kzalloc(sizeof(*eli), GFP_KERNEL);
3515         if (!eli)
3516                 return -ENOMEM;
3517
3518         INIT_LIST_HEAD(&eli->li_request_list);
3519         mutex_init(&eli->li_list_mtx);
3520
3521         eli->li_state |= EXT4_LAZYINIT_QUIT;
3522
3523         ext4_li_info = eli;
3524
3525         return 0;
3526 }
3527
3528 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
3529                                             ext4_group_t start)
3530 {
3531         struct ext4_li_request *elr;
3532
3533         elr = kzalloc(sizeof(*elr), GFP_KERNEL);
3534         if (!elr)
3535                 return NULL;
3536
3537         elr->lr_super = sb;
3538         elr->lr_first_not_zeroed = start;
3539         if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) {
3540                 elr->lr_mode = EXT4_LI_MODE_ITABLE;
3541                 elr->lr_next_group = start;
3542         } else {
3543                 elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
3544         }
3545
3546         /*
3547          * Randomize first schedule time of the request to
3548          * spread the inode table initialization requests
3549          * better.
3550          */
3551         elr->lr_next_sched = jiffies + (prandom_u32() %
3552                                 (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3553         return elr;
3554 }
3555
3556 int ext4_register_li_request(struct super_block *sb,
3557                              ext4_group_t first_not_zeroed)
3558 {
3559         struct ext4_sb_info *sbi = EXT4_SB(sb);
3560         struct ext4_li_request *elr = NULL;
3561         ext4_group_t ngroups = sbi->s_groups_count;
3562         int ret = 0;
3563
3564         mutex_lock(&ext4_li_mtx);
3565         if (sbi->s_li_request != NULL) {
3566                 /*
3567                  * Reset timeout so it can be computed again, because
3568                  * s_li_wait_mult might have changed.
3569                  */
3570                 sbi->s_li_request->lr_timeout = 0;
3571                 goto out;
3572         }
3573
3574         if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) &&
3575             (first_not_zeroed == ngroups || sb_rdonly(sb) ||
3576              !test_opt(sb, INIT_INODE_TABLE)))
3577                 goto out;
3578
3579         elr = ext4_li_request_new(sb, first_not_zeroed);
3580         if (!elr) {
3581                 ret = -ENOMEM;
3582                 goto out;
3583         }
3584
3585         if (NULL == ext4_li_info) {
3586                 ret = ext4_li_info_new();
3587                 if (ret)
3588                         goto out;
3589         }
3590
3591         mutex_lock(&ext4_li_info->li_list_mtx);
3592         list_add(&elr->lr_request, &ext4_li_info->li_request_list);
3593         mutex_unlock(&ext4_li_info->li_list_mtx);
3594
3595         sbi->s_li_request = elr;
3596         /*
3597          * set elr to NULL here since it has been inserted to
3598          * the request_list and the removal and free of it is
3599          * handled by ext4_clear_request_list from now on.
3600          */
3601         elr = NULL;
3602
3603         if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
3604                 ret = ext4_run_lazyinit_thread();
3605                 if (ret)
3606                         goto out;
3607         }
3608 out:
3609         mutex_unlock(&ext4_li_mtx);
3610         if (ret)
3611                 kfree(elr);
3612         return ret;
3613 }
3614
3615 /*
3616  * We do not need to lock anything since this is called on
3617  * module unload.
3618  */
3619 static void ext4_destroy_lazyinit_thread(void)
3620 {
3621         /*
3622          * If thread exited earlier
3623          * there's nothing to be done.
3624          */
3625         if (!ext4_li_info || !ext4_lazyinit_task)
3626                 return;
3627
3628         kthread_stop(ext4_lazyinit_task);
3629 }
3630
3631 static int set_journal_csum_feature_set(struct super_block *sb)
3632 {
3633         int ret = 1;
3634         int compat, incompat;
3635         struct ext4_sb_info *sbi = EXT4_SB(sb);
3636
3637         if (ext4_has_metadata_csum(sb)) {
3638                 /* journal checksum v3 */
3639                 compat = 0;
3640                 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3641         } else {
3642                 /* journal checksum v1 */
3643                 compat = JBD2_FEATURE_COMPAT_CHECKSUM;
3644                 incompat = 0;
3645         }
3646
3647         jbd2_journal_clear_features(sbi->s_journal,
3648                         JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3649                         JBD2_FEATURE_INCOMPAT_CSUM_V3 |
3650                         JBD2_FEATURE_INCOMPAT_CSUM_V2);
3651         if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
3652                 ret = jbd2_journal_set_features(sbi->s_journal,
3653                                 compat, 0,
3654                                 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
3655                                 incompat);
3656         } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
3657                 ret = jbd2_journal_set_features(sbi->s_journal,
3658                                 compat, 0,
3659                                 incompat);
3660                 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3661                                 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3662         } else {
3663                 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3664                                 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3665         }
3666
3667         return ret;
3668 }
3669
3670 /*
3671  * Note: calculating the overhead so we can be compatible with
3672  * historical BSD practice is quite difficult in the face of
3673  * clusters/bigalloc.  This is because multiple metadata blocks from
3674  * different block group can end up in the same allocation cluster.
3675  * Calculating the exact overhead in the face of clustered allocation
3676  * requires either O(all block bitmaps) in memory or O(number of block
3677  * groups**2) in time.  We will still calculate the superblock for
3678  * older file systems --- and if we come across with a bigalloc file
3679  * system with zero in s_overhead_clusters the estimate will be close to
3680  * correct especially for very large cluster sizes --- but for newer
3681  * file systems, it's better to calculate this figure once at mkfs
3682  * time, and store it in the superblock.  If the superblock value is
3683  * present (even for non-bigalloc file systems), we will use it.
3684  */
3685 static int count_overhead(struct super_block *sb, ext4_group_t grp,
3686                           char *buf)
3687 {
3688         struct ext4_sb_info     *sbi = EXT4_SB(sb);
3689         struct ext4_group_desc  *gdp;
3690         ext4_fsblk_t            first_block, last_block, b;
3691         ext4_group_t            i, ngroups = ext4_get_groups_count(sb);
3692         int                     s, j, count = 0;
3693
3694         if (!ext4_has_feature_bigalloc(sb))
3695                 return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
3696                         sbi->s_itb_per_group + 2);
3697
3698         first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
3699                 (grp * EXT4_BLOCKS_PER_GROUP(sb));
3700         last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
3701         for (i = 0; i < ngroups; i++) {
3702                 gdp = ext4_get_group_desc(sb, i, NULL);
3703                 b = ext4_block_bitmap(sb, gdp);
3704                 if (b >= first_block && b <= last_block) {
3705                         ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3706                         count++;
3707                 }
3708                 b = ext4_inode_bitmap(sb, gdp);
3709                 if (b >= first_block && b <= last_block) {
3710                         ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3711                         count++;
3712                 }
3713                 b = ext4_inode_table(sb, gdp);
3714                 if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
3715                         for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
3716                                 int c = EXT4_B2C(sbi, b - first_block);
3717                                 ext4_set_bit(c, buf);
3718                                 count++;
3719                         }
3720                 if (i != grp)
3721                         continue;
3722                 s = 0;
3723                 if (ext4_bg_has_super(sb, grp)) {
3724                         ext4_set_bit(s++, buf);
3725                         count++;
3726                 }
3727                 j = ext4_bg_num_gdb(sb, grp);
3728                 if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
3729                         ext4_error(sb, "Invalid number of block group "
3730                                    "descriptor blocks: %d", j);
3731                         j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3732                 }
3733                 count += j;
3734                 for (; j > 0; j--)
3735                         ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3736         }
3737         if (!count)
3738                 return 0;
3739         return EXT4_CLUSTERS_PER_GROUP(sb) -
3740                 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
3741 }
3742
3743 /*
3744  * Compute the overhead and stash it in sbi->s_overhead
3745  */
3746 int ext4_calculate_overhead(struct super_block *sb)
3747 {
3748         struct ext4_sb_info *sbi = EXT4_SB(sb);
3749         struct ext4_super_block *es = sbi->s_es;
3750         struct inode *j_inode;
3751         unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3752         ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3753         ext4_fsblk_t overhead = 0;
3754         char *buf = (char *) get_zeroed_page(GFP_NOFS);
3755
3756         if (!buf)
3757                 return -ENOMEM;
3758
3759         /*
3760          * Compute the overhead (FS structures).  This is constant
3761          * for a given filesystem unless the number of block groups
3762          * changes so we cache the previous value until it does.
3763          */
3764
3765         /*
3766          * All of the blocks before first_data_block are overhead
3767          */
3768         overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
3769
3770         /*
3771          * Add the overhead found in each block group
3772          */
3773         for (i = 0; i < ngroups; i++) {
3774                 int blks;
3775
3776                 blks = count_overhead(sb, i, buf);
3777                 overhead += blks;
3778                 if (blks)
3779                         memset(buf, 0, PAGE_SIZE);
3780                 cond_resched();
3781         }
3782
3783         /*
3784          * Add the internal journal blocks whether the journal has been
3785          * loaded or not
3786          */
3787         if (sbi->s_journal && !sbi->s_journal_bdev)
3788                 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
3789         else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
3790                 /* j_inum for internal journal is non-zero */
3791                 j_inode = ext4_get_journal_inode(sb, j_inum);
3792                 if (j_inode) {
3793                         j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
3794                         overhead += EXT4_NUM_B2C(sbi, j_blocks);
3795                         iput(j_inode);
3796                 } else {
3797                         ext4_msg(sb, KERN_ERR, "can't get journal size");
3798                 }
3799         }
3800         sbi->s_overhead = overhead;
3801         smp_wmb();
3802         free_page((unsigned long) buf);
3803         return 0;
3804 }
3805
3806 static void ext4_set_resv_clusters(struct super_block *sb)
3807 {
3808         ext4_fsblk_t resv_clusters;
3809         struct ext4_sb_info *sbi = EXT4_SB(sb);
3810
3811         /*
3812          * There's no need to reserve anything when we aren't using extents.
3813          * The space estimates are exact, there are no unwritten extents,
3814          * hole punching doesn't need new metadata... This is needed especially
3815          * to keep ext2/3 backward compatibility.
3816          */
3817         if (!ext4_has_feature_extents(sb))
3818                 return;
3819         /*
3820          * By default we reserve 2% or 4096 clusters, whichever is smaller.
3821          * This should cover the situations where we can not afford to run
3822          * out of space like for example punch hole, or converting
3823          * unwritten extents in delalloc path. In most cases such
3824          * allocation would require 1, or 2 blocks, higher numbers are
3825          * very rare.
3826          */
3827         resv_clusters = (ext4_blocks_count(sbi->s_es) >>
3828                          sbi->s_cluster_bits);
3829
3830         do_div(resv_clusters, 50);
3831         resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
3832
3833         atomic64_set(&sbi->s_resv_clusters, resv_clusters);
3834 }
3835
3836 static const char *ext4_quota_mode(struct super_block *sb)
3837 {
3838 #ifdef CONFIG_QUOTA
3839         if (!ext4_quota_capable(sb))
3840                 return "none";
3841
3842         if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb))
3843                 return "journalled";
3844         else
3845                 return "writeback";
3846 #else
3847         return "disabled";
3848 #endif
3849 }
3850
3851 static void ext4_setup_csum_trigger(struct super_block *sb,
3852                                     enum ext4_journal_trigger_type type,
3853                                     void (*trigger)(
3854                                         struct jbd2_buffer_trigger_type *type,
3855                                         struct buffer_head *bh,
3856                                         void *mapped_data,
3857                                         size_t size))
3858 {
3859         struct ext4_sb_info *sbi = EXT4_SB(sb);
3860
3861         sbi->s_journal_triggers[type].sb = sb;
3862         sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger;
3863 }
3864
3865 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3866 {
3867         struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
3868         char *orig_data = kstrdup(data, GFP_KERNEL);
3869         struct buffer_head *bh, **group_desc;
3870         struct ext4_super_block *es = NULL;
3871         struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3872         struct flex_groups **flex_groups;
3873         ext4_fsblk_t block;
3874         ext4_fsblk_t sb_block = get_sb_block(&data);
3875         ext4_fsblk_t logical_sb_block;
3876         unsigned long offset = 0;
3877         unsigned long def_mount_opts;
3878         struct inode *root;
3879         const char *descr;
3880         int ret = -ENOMEM;
3881         int blocksize, clustersize;
3882         unsigned int db_count;
3883         unsigned int i;
3884         int needs_recovery, has_huge_files;
3885         __u64 blocks_count;
3886         int err = 0;
3887         ext4_group_t first_not_zeroed;
3888         struct ext4_parsed_options parsed_opts;
3889
3890         /* Set defaults for the variables that will be set during parsing */
3891         parsed_opts.journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3892         parsed_opts.journal_devnum = 0;
3893         parsed_opts.mb_optimize_scan = DEFAULT_MB_OPTIMIZE_SCAN;
3894
3895         if ((data && !orig_data) || !sbi)
3896                 goto out_free_base;
3897
3898         sbi->s_daxdev = dax_dev;
3899         sbi->s_blockgroup_lock =
3900                 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3901         if (!sbi->s_blockgroup_lock)
3902                 goto out_free_base;
3903
3904         sb->s_fs_info = sbi;
3905         sbi->s_sb = sb;
3906         sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
3907         sbi->s_sb_block = sb_block;
3908         sbi->s_sectors_written_start =
3909                 part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
3910
3911         /* Cleanup superblock name */
3912         strreplace(sb->s_id, '/', '!');
3913
3914         /* -EINVAL is default */
3915         ret = -EINVAL;
3916         blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3917         if (!blocksize) {
3918                 ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3919                 goto out_fail;
3920         }
3921
3922         /*
3923          * The ext4 superblock will not be buffer aligned for other than 1kB
3924          * block sizes.  We need to calculate the offset from buffer start.
3925          */
3926         if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3927                 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
3928                 offset = do_div(logical_sb_block, blocksize);
3929         } else {
3930                 logical_sb_block = sb_block;
3931         }
3932
3933         bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
3934         if (IS_ERR(bh)) {
3935                 ext4_msg(sb, KERN_ERR, "unable to read superblock");
3936                 ret = PTR_ERR(bh);
3937                 goto out_fail;
3938         }
3939         /*
3940          * Note: s_es must be initialized as soon as possible because
3941          *       some ext4 macro-instructions depend on its value
3942          */
3943         es = (struct ext4_super_block *) (bh->b_data + offset);
3944         sbi->s_es = es;
3945         sb->s_magic = le16_to_cpu(es->s_magic);
3946         if (sb->s_magic != EXT4_SUPER_MAGIC)
3947                 goto cantfind_ext4;
3948         sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3949
3950         /* Warn if metadata_csum and gdt_csum are both set. */
3951         if (ext4_has_feature_metadata_csum(sb) &&
3952             ext4_has_feature_gdt_csum(sb))
3953                 ext4_warning(sb, "metadata_csum and uninit_bg are "
3954                              "redundant flags; please run fsck.");
3955
3956         /* Check for a known checksum algorithm */
3957         if (!ext4_verify_csum_type(sb, es)) {
3958                 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3959                          "unknown checksum algorithm.");
3960                 silent = 1;
3961                 goto cantfind_ext4;
3962         }
3963
3964         /* Load the checksum driver */
3965         sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
3966         if (IS_ERR(sbi->s_chksum_driver)) {
3967                 ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
3968                 ret = PTR_ERR(sbi->s_chksum_driver);
3969                 sbi->s_chksum_driver = NULL;
3970                 goto failed_mount;
3971         }
3972
3973         /* Check superblock checksum */
3974         if (!ext4_superblock_csum_verify(sb, es)) {
3975                 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3976                          "invalid superblock checksum.  Run e2fsck?");
3977                 silent = 1;
3978                 ret = -EFSBADCRC;
3979                 goto cantfind_ext4;
3980         }
3981
3982         /* Precompute checksum seed for all metadata */
3983         if (ext4_has_feature_csum_seed(sb))
3984                 sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
3985         else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
3986                 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
3987                                                sizeof(es->s_uuid));
3988
3989         /* Set defaults before we parse the mount options */
3990         def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3991         set_opt(sb, INIT_INODE_TABLE);
3992         if (def_mount_opts & EXT4_DEFM_DEBUG)
3993                 set_opt(sb, DEBUG);
3994         if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3995                 set_opt(sb, GRPID);
3996         if (def_mount_opts & EXT4_DEFM_UID16)
3997                 set_opt(sb, NO_UID32);
3998         /* xattr user namespace & acls are now defaulted on */
3999         set_opt(sb, XATTR_USER);
4000 #ifdef CONFIG_EXT4_FS_POSIX_ACL
4001         set_opt(sb, POSIX_ACL);
4002 #endif
4003         if (ext4_has_feature_fast_commit(sb))
4004                 set_opt2(sb, JOURNAL_FAST_COMMIT);
4005         /* don't forget to enable journal_csum when metadata_csum is enabled. */
4006         if (ext4_has_metadata_csum(sb))
4007                 set_opt(sb, JOURNAL_CHECKSUM);
4008
4009         if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
4010                 set_opt(sb, JOURNAL_DATA);
4011         else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
4012                 set_opt(sb, ORDERED_DATA);
4013         else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
4014                 set_opt(sb, WRITEBACK_DATA);
4015
4016         if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
4017                 set_opt(sb, ERRORS_PANIC);
4018         else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
4019                 set_opt(sb, ERRORS_CONT);
4020         else
4021                 set_opt(sb, ERRORS_RO);
4022         /* block_validity enabled by default; disable with noblock_validity */
4023         set_opt(sb, BLOCK_VALIDITY);
4024         if (def_mount_opts & EXT4_DEFM_DISCARD)
4025                 set_opt(sb, DISCARD);
4026
4027         sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
4028         sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
4029         sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
4030         sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
4031         sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
4032
4033         if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
4034                 set_opt(sb, BARRIER);
4035
4036         /*
4037          * enable delayed allocation by default
4038          * Use -o nodelalloc to turn it off
4039          */
4040         if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
4041             ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
4042                 set_opt(sb, DELALLOC);
4043
4044         /*
4045          * set default s_li_wait_mult for lazyinit, for the case there is
4046          * no mount option specified.
4047          */
4048         sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
4049
4050         if (le32_to_cpu(es->s_log_block_size) >
4051             (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
4052                 ext4_msg(sb, KERN_ERR,
4053                          "Invalid log block size: %u",
4054                          le32_to_cpu(es->s_log_block_size));
4055                 goto failed_mount;
4056         }
4057         if (le32_to_cpu(es->s_log_cluster_size) >
4058             (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
4059                 ext4_msg(sb, KERN_ERR,
4060                          "Invalid log cluster size: %u",
4061                          le32_to_cpu(es->s_log_cluster_size));
4062                 goto failed_mount;
4063         }
4064
4065         blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
4066
4067         if (blocksize == PAGE_SIZE)
4068                 set_opt(sb, DIOREAD_NOLOCK);
4069
4070         if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
4071                 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
4072                 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
4073         } else {
4074                 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
4075                 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
4076                 if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
4077                         ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
4078                                  sbi->s_first_ino);
4079                         goto failed_mount;
4080                 }
4081                 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
4082                     (!is_power_of_2(sbi->s_inode_size)) ||
4083                     (sbi->s_inode_size > blocksize)) {
4084                         ext4_msg(sb, KERN_ERR,
4085                                "unsupported inode size: %d",
4086                                sbi->s_inode_size);
4087                         ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
4088                         goto failed_mount;
4089                 }
4090                 /*
4091                  * i_atime_extra is the last extra field available for
4092                  * [acm]times in struct ext4_inode. Checking for that
4093                  * field should suffice to ensure we have extra space
4094                  * for all three.
4095                  */
4096                 if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
4097                         sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
4098                         sb->s_time_gran = 1;
4099                         sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
4100                 } else {
4101                         sb->s_time_gran = NSEC_PER_SEC;
4102                         sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
4103                 }
4104                 sb->s_time_min = EXT4_TIMESTAMP_MIN;
4105         }
4106         if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
4107                 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4108                         EXT4_GOOD_OLD_INODE_SIZE;
4109                 if (ext4_has_feature_extra_isize(sb)) {
4110                         unsigned v, max = (sbi->s_inode_size -
4111                                            EXT4_GOOD_OLD_INODE_SIZE);
4112
4113                         v = le16_to_cpu(es->s_want_extra_isize);
4114                         if (v > max) {
4115                                 ext4_msg(sb, KERN_ERR,
4116                                          "bad s_want_extra_isize: %d", v);
4117                                 goto failed_mount;
4118                         }
4119                         if (sbi->s_want_extra_isize < v)
4120                                 sbi->s_want_extra_isize = v;
4121
4122                         v = le16_to_cpu(es->s_min_extra_isize);
4123                         if (v > max) {
4124                                 ext4_msg(sb, KERN_ERR,
4125                                          "bad s_min_extra_isize: %d", v);
4126                                 goto failed_mount;
4127                         }
4128                         if (sbi->s_want_extra_isize < v)
4129                                 sbi->s_want_extra_isize = v;
4130                 }
4131         }
4132
4133         if (sbi->s_es->s_mount_opts[0]) {
4134                 char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
4135                                               sizeof(sbi->s_es->s_mount_opts),
4136                                               GFP_KERNEL);
4137                 if (!s_mount_opts)
4138                         goto failed_mount;
4139                 if (!parse_options(s_mount_opts, sb, &parsed_opts, 0)) {
4140                         ext4_msg(sb, KERN_WARNING,
4141                                  "failed to parse options in superblock: %s",
4142                                  s_mount_opts);
4143                 }
4144                 kfree(s_mount_opts);
4145         }
4146         sbi->s_def_mount_opt = sbi->s_mount_opt;
4147         if (!parse_options((char *) data, sb, &parsed_opts, 0))
4148                 goto failed_mount;
4149
4150 #ifdef CONFIG_UNICODE
4151         if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
4152                 const struct ext4_sb_encodings *encoding_info;
4153                 struct unicode_map *encoding;
4154                 __u16 encoding_flags;
4155
4156                 if (ext4_sb_read_encoding(es, &encoding_info,
4157                                           &encoding_flags)) {
4158                         ext4_msg(sb, KERN_ERR,
4159                                  "Encoding requested by superblock is unknown");
4160                         goto failed_mount;
4161                 }
4162
4163                 encoding = utf8_load(encoding_info->version);
4164                 if (IS_ERR(encoding)) {
4165                         ext4_msg(sb, KERN_ERR,
4166                                  "can't mount with superblock charset: %s-%s "
4167                                  "not supported by the kernel. flags: 0x%x.",
4168                                  encoding_info->name, encoding_info->version,
4169                                  encoding_flags);
4170                         goto failed_mount;
4171                 }
4172                 ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
4173                          "%s-%s with flags 0x%hx", encoding_info->name,
4174                          encoding_info->version?:"\b", encoding_flags);
4175
4176                 sb->s_encoding = encoding;
4177                 sb->s_encoding_flags = encoding_flags;
4178         }
4179 #endif
4180
4181         if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4182                 printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, O_DIRECT and fast_commit support!\n");
4183                 /* can't mount with both data=journal and dioread_nolock. */
4184                 clear_opt(sb, DIOREAD_NOLOCK);
4185                 clear_opt2(sb, JOURNAL_FAST_COMMIT);
4186                 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
4187                         ext4_msg(sb, KERN_ERR, "can't mount with "
4188                                  "both data=journal and delalloc");
4189                         goto failed_mount;
4190                 }
4191                 if (test_opt(sb, DAX_ALWAYS)) {
4192                         ext4_msg(sb, KERN_ERR, "can't mount with "
4193                                  "both data=journal and dax");
4194                         goto failed_mount;
4195                 }
4196                 if (ext4_has_feature_encrypt(sb)) {
4197                         ext4_msg(sb, KERN_WARNING,
4198                                  "encrypted files will use data=ordered "
4199                                  "instead of data journaling mode");
4200                 }
4201                 if (test_opt(sb, DELALLOC))
4202                         clear_opt(sb, DELALLOC);
4203         } else {
4204                 sb->s_iflags |= SB_I_CGROUPWB;
4205         }
4206
4207         sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4208                 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
4209
4210         if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
4211             (ext4_has_compat_features(sb) ||
4212              ext4_has_ro_compat_features(sb) ||
4213              ext4_has_incompat_features(sb)))
4214                 ext4_msg(sb, KERN_WARNING,
4215                        "feature flags set on rev 0 fs, "
4216                        "running e2fsck is recommended");
4217
4218         if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
4219                 set_opt2(sb, HURD_COMPAT);
4220                 if (ext4_has_feature_64bit(sb)) {
4221                         ext4_msg(sb, KERN_ERR,
4222                                  "The Hurd can't support 64-bit file systems");
4223                         goto failed_mount;
4224                 }
4225
4226                 /*
4227                  * ea_inode feature uses l_i_version field which is not
4228                  * available in HURD_COMPAT mode.
4229                  */
4230                 if (ext4_has_feature_ea_inode(sb)) {
4231                         ext4_msg(sb, KERN_ERR,
4232                                  "ea_inode feature is not supported for Hurd");
4233                         goto failed_mount;
4234                 }
4235         }
4236
4237         if (IS_EXT2_SB(sb)) {
4238                 if (ext2_feature_set_ok(sb))
4239                         ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
4240                                  "using the ext4 subsystem");
4241                 else {
4242                         /*
4243                          * If we're probing be silent, if this looks like
4244                          * it's actually an ext[34] filesystem.
4245                          */
4246                         if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4247                                 goto failed_mount;
4248                         ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
4249                                  "to feature incompatibilities");
4250                         goto failed_mount;
4251                 }
4252         }
4253
4254         if (IS_EXT3_SB(sb)) {
4255                 if (ext3_feature_set_ok(sb))
4256                         ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
4257                                  "using the ext4 subsystem");
4258                 else {
4259                         /*
4260                          * If we're probing be silent, if this looks like
4261                          * it's actually an ext4 filesystem.
4262                          */
4263                         if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4264                                 goto failed_mount;
4265                         ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
4266                                  "to feature incompatibilities");
4267                         goto failed_mount;
4268                 }
4269         }
4270
4271         /*
4272          * Check feature flags regardless of the revision level, since we
4273          * previously didn't change the revision level when setting the flags,
4274          * so there is a chance incompat flags are set on a rev 0 filesystem.
4275          */
4276         if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
4277                 goto failed_mount;
4278
4279         if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
4280                 ext4_msg(sb, KERN_ERR,
4281                          "Number of reserved GDT blocks insanely large: %d",
4282                          le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
4283                 goto failed_mount;
4284         }
4285
4286         if (bdev_dax_supported(sb->s_bdev, blocksize))
4287                 set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
4288
4289         if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
4290                 if (ext4_has_feature_inline_data(sb)) {
4291                         ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
4292                                         " that may contain inline data");
4293                         goto failed_mount;
4294                 }
4295                 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
4296                         ext4_msg(sb, KERN_ERR,
4297                                 "DAX unsupported by block device.");
4298                         goto failed_mount;
4299                 }
4300         }
4301
4302         if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
4303                 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
4304                          es->s_encryption_level);
4305                 goto failed_mount;
4306         }
4307
4308         if (sb->s_blocksize != blocksize) {
4309                 /*
4310                  * bh must be released before kill_bdev(), otherwise
4311                  * it won't be freed and its page also. kill_bdev()
4312                  * is called by sb_set_blocksize().
4313                  */
4314                 brelse(bh);
4315                 /* Validate the filesystem blocksize */
4316                 if (!sb_set_blocksize(sb, blocksize)) {
4317                         ext4_msg(sb, KERN_ERR, "bad block size %d",
4318                                         blocksize);
4319                         bh = NULL;
4320                         goto failed_mount;
4321                 }
4322
4323                 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
4324                 offset = do_div(logical_sb_block, blocksize);
4325                 bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
4326                 if (IS_ERR(bh)) {
4327                         ext4_msg(sb, KERN_ERR,
4328                                "Can't read superblock on 2nd try");
4329                         ret = PTR_ERR(bh);
4330                         bh = NULL;
4331                         goto failed_mount;
4332                 }
4333                 es = (struct ext4_super_block *)(bh->b_data + offset);
4334                 sbi->s_es = es;
4335                 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
4336                         ext4_msg(sb, KERN_ERR,
4337                                "Magic mismatch, very weird!");
4338                         goto failed_mount;
4339                 }
4340         }
4341
4342         has_huge_files = ext4_has_feature_huge_file(sb);
4343         sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
4344                                                       has_huge_files);
4345         sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
4346
4347         sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
4348         if (ext4_has_feature_64bit(sb)) {
4349                 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
4350                     sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
4351                     !is_power_of_2(sbi->s_desc_size)) {
4352                         ext4_msg(sb, KERN_ERR,
4353                                "unsupported descriptor size %lu",
4354                                sbi->s_desc_size);
4355                         goto failed_mount;
4356                 }
4357         } else
4358                 sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
4359
4360         sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
4361         sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
4362
4363         sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
4364         if (sbi->s_inodes_per_block == 0)
4365                 goto cantfind_ext4;
4366         if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
4367             sbi->s_inodes_per_group > blocksize * 8) {
4368                 ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
4369                          sbi->s_inodes_per_group);
4370                 goto failed_mount;
4371         }
4372         sbi->s_itb_per_group = sbi->s_inodes_per_group /
4373                                         sbi->s_inodes_per_block;
4374         sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
4375         sbi->s_sbh = bh;
4376         sbi->s_mount_state = le16_to_cpu(es->s_state);
4377         sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
4378         sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
4379
4380         for (i = 0; i < 4; i++)
4381                 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
4382         sbi->s_def_hash_version = es->s_def_hash_version;
4383         if (ext4_has_feature_dir_index(sb)) {
4384                 i = le32_to_cpu(es->s_flags);
4385                 if (i & EXT2_FLAGS_UNSIGNED_HASH)
4386                         sbi->s_hash_unsigned = 3;
4387                 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
4388 #ifdef __CHAR_UNSIGNED__
4389                         if (!sb_rdonly(sb))
4390                                 es->s_flags |=
4391                                         cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
4392                         sbi->s_hash_unsigned = 3;
4393 #else
4394                         if (!sb_rdonly(sb))
4395                                 es->s_flags |=
4396                                         cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
4397 #endif
4398                 }
4399         }
4400
4401         /* Handle clustersize */
4402         clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
4403         if (ext4_has_feature_bigalloc(sb)) {
4404                 if (clustersize < blocksize) {
4405                         ext4_msg(sb, KERN_ERR,
4406                                  "cluster size (%d) smaller than "
4407                                  "block size (%d)", clustersize, blocksize);
4408                         goto failed_mount;
4409                 }
4410                 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
4411                         le32_to_cpu(es->s_log_block_size);
4412                 sbi->s_clusters_per_group =
4413                         le32_to_cpu(es->s_clusters_per_group);
4414                 if (sbi->s_clusters_per_group > blocksize * 8) {
4415                         ext4_msg(sb, KERN_ERR,
4416                                  "#clusters per group too big: %lu",
4417                                  sbi->s_clusters_per_group);
4418                         goto failed_mount;
4419                 }
4420                 if (sbi->s_blocks_per_group !=
4421                     (sbi->s_clusters_per_group * (clustersize / blocksize))) {
4422                         ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
4423                                  "clusters per group (%lu) inconsistent",
4424                                  sbi->s_blocks_per_group,
4425                                  sbi->s_clusters_per_group);
4426                         goto failed_mount;
4427                 }
4428         } else {
4429                 if (clustersize != blocksize) {
4430                         ext4_msg(sb, KERN_ERR,
4431                                  "fragment/cluster size (%d) != "
4432                                  "block size (%d)", clustersize, blocksize);
4433                         goto failed_mount;
4434                 }
4435                 if (sbi->s_blocks_per_group > blocksize * 8) {
4436                         ext4_msg(sb, KERN_ERR,
4437                                  "#blocks per group too big: %lu",
4438                                  sbi->s_blocks_per_group);
4439                         goto failed_mount;
4440                 }
4441                 sbi->s_clusters_per_group = sbi->s_blocks_per_group;
4442                 sbi->s_cluster_bits = 0;
4443         }
4444         sbi->s_cluster_ratio = clustersize / blocksize;
4445
4446         /* Do we have standard group size of clustersize * 8 blocks ? */
4447         if (sbi->s_blocks_per_group == clustersize << 3)
4448                 set_opt2(sb, STD_GROUP_SIZE);
4449
4450         /*
4451          * Test whether we have more sectors than will fit in sector_t,
4452          * and whether the max offset is addressable by the page cache.
4453          */
4454         err = generic_check_addressable(sb->s_blocksize_bits,
4455                                         ext4_blocks_count(es));
4456         if (err) {
4457                 ext4_msg(sb, KERN_ERR, "filesystem"
4458                          " too large to mount safely on this system");
4459                 goto failed_mount;
4460         }
4461
4462         if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
4463                 goto cantfind_ext4;
4464
4465         /* check blocks count against device size */
4466         blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
4467         if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4468                 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
4469                        "exceeds size of device (%llu blocks)",
4470                        ext4_blocks_count(es), blocks_count);
4471                 goto failed_mount;
4472         }
4473
4474         /*
4475          * It makes no sense for the first data block to be beyond the end
4476          * of the filesystem.
4477          */
4478         if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4479                 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4480                          "block %u is beyond end of filesystem (%llu)",
4481                          le32_to_cpu(es->s_first_data_block),
4482                          ext4_blocks_count(es));
4483                 goto failed_mount;
4484         }
4485         if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
4486             (sbi->s_cluster_ratio == 1)) {
4487                 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4488                          "block is 0 with a 1k block and cluster size");
4489                 goto failed_mount;
4490         }
4491
4492         blocks_count = (ext4_blocks_count(es) -
4493                         le32_to_cpu(es->s_first_data_block) +
4494                         EXT4_BLOCKS_PER_GROUP(sb) - 1);
4495         do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4496         if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
4497                 ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
4498                        "(block count %llu, first data block %u, "
4499                        "blocks per group %lu)", blocks_count,
4500                        ext4_blocks_count(es),
4501                        le32_to_cpu(es->s_first_data_block),
4502                        EXT4_BLOCKS_PER_GROUP(sb));
4503                 goto failed_mount;
4504         }
4505         sbi->s_groups_count = blocks_count;
4506         sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
4507                         (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4508         if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
4509             le32_to_cpu(es->s_inodes_count)) {
4510                 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
4511                          le32_to_cpu(es->s_inodes_count),
4512                          ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
4513                 ret = -EINVAL;
4514                 goto failed_mount;
4515         }
4516         db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
4517                    EXT4_DESC_PER_BLOCK(sb);
4518         if (ext4_has_feature_meta_bg(sb)) {
4519                 if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4520                         ext4_msg(sb, KERN_WARNING,
4521                                  "first meta block group too large: %u "
4522                                  "(group descriptor block count %u)",
4523                                  le32_to_cpu(es->s_first_meta_bg), db_count);
4524                         goto failed_mount;
4525                 }
4526         }
4527         rcu_assign_pointer(sbi->s_group_desc,
4528                            kvmalloc_array(db_count,
4529                                           sizeof(struct buffer_head *),
4530                                           GFP_KERNEL));
4531         if (sbi->s_group_desc == NULL) {
4532                 ext4_msg(sb, KERN_ERR, "not enough memory");
4533                 ret = -ENOMEM;
4534                 goto failed_mount;
4535         }
4536
4537         bgl_lock_init(sbi->s_blockgroup_lock);
4538
4539         /* Pre-read the descriptors into the buffer cache */
4540         for (i = 0; i < db_count; i++) {
4541                 block = descriptor_loc(sb, logical_sb_block, i);
4542                 ext4_sb_breadahead_unmovable(sb, block);
4543         }
4544
4545         for (i = 0; i < db_count; i++) {
4546                 struct buffer_head *bh;
4547
4548                 block = descriptor_loc(sb, logical_sb_block, i);
4549                 bh = ext4_sb_bread_unmovable(sb, block);
4550                 if (IS_ERR(bh)) {
4551                         ext4_msg(sb, KERN_ERR,
4552                                "can't read group descriptor %d", i);
4553                         db_count = i;
4554                         ret = PTR_ERR(bh);
4555                         goto failed_mount2;
4556                 }
4557                 rcu_read_lock();
4558                 rcu_dereference(sbi->s_group_desc)[i] = bh;
4559                 rcu_read_unlock();
4560         }
4561         sbi->s_gdb_count = db_count;
4562         if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4563                 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4564                 ret = -EFSCORRUPTED;
4565                 goto failed_mount2;
4566         }
4567
4568         timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4569         spin_lock_init(&sbi->s_error_lock);
4570         INIT_WORK(&sbi->s_error_work, flush_stashed_error_work);
4571
4572         /* Register extent status tree shrinker */
4573         if (ext4_es_register_shrinker(sbi))
4574                 goto failed_mount3;
4575
4576         sbi->s_stripe = ext4_get_stripe_size(sbi);
4577         sbi->s_extent_max_zeroout_kb = 32;
4578
4579         /*
4580          * set up enough so that it can read an inode
4581          */
4582         sb->s_op = &ext4_sops;
4583         sb->s_export_op = &ext4_export_ops;
4584         sb->s_xattr = ext4_xattr_handlers;
4585 #ifdef CONFIG_FS_ENCRYPTION
4586         sb->s_cop = &ext4_cryptops;
4587 #endif
4588 #ifdef CONFIG_FS_VERITY
4589         sb->s_vop = &ext4_verityops;
4590 #endif
4591 #ifdef CONFIG_QUOTA
4592         sb->dq_op = &ext4_quota_operations;
4593         if (ext4_has_feature_quota(sb))
4594                 sb->s_qcop = &dquot_quotactl_sysfile_ops;
4595         else
4596                 sb->s_qcop = &ext4_qctl_operations;
4597         sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4598 #endif
4599         memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
4600
4601         INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4602         mutex_init(&sbi->s_orphan_lock);
4603
4604         /* Initialize fast commit stuff */
4605         atomic_set(&sbi->s_fc_subtid, 0);
4606         atomic_set(&sbi->s_fc_ineligible_updates, 0);
4607         INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
4608         INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
4609         INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
4610         INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
4611         sbi->s_fc_bytes = 0;
4612         ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
4613         ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
4614         spin_lock_init(&sbi->s_fc_lock);
4615         memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
4616         sbi->s_fc_replay_state.fc_regions = NULL;
4617         sbi->s_fc_replay_state.fc_regions_size = 0;
4618         sbi->s_fc_replay_state.fc_regions_used = 0;
4619         sbi->s_fc_replay_state.fc_regions_valid = 0;
4620         sbi->s_fc_replay_state.fc_modified_inodes = NULL;
4621         sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
4622         sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
4623
4624         sb->s_root = NULL;
4625
4626         needs_recovery = (es->s_last_orphan != 0 ||
4627                           ext4_has_feature_journal_needs_recovery(sb));
4628
4629         if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
4630                 if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4631                         goto failed_mount3a;
4632
4633         /*
4634          * The first inode we look at is the journal inode.  Don't try
4635          * root first: it may be modified in the journal!
4636          */
4637         if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4638                 err = ext4_load_journal(sb, es, parsed_opts.journal_devnum);
4639                 if (err)
4640                         goto failed_mount3a;
4641         } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
4642                    ext4_has_feature_journal_needs_recovery(sb)) {
4643                 ext4_msg(sb, KERN_ERR, "required journal recovery "
4644                        "suppressed and not mounted read-only");
4645                 goto failed_mount_wq;
4646         } else {
4647                 /* Nojournal mode, all journal mount options are illegal */
4648                 if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
4649                         ext4_msg(sb, KERN_ERR, "can't mount with "
4650                                  "journal_checksum, fs mounted w/o journal");
4651                         goto failed_mount_wq;
4652                 }
4653                 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4654                         ext4_msg(sb, KERN_ERR, "can't mount with "
4655                                  "journal_async_commit, fs mounted w/o journal");
4656                         goto failed_mount_wq;
4657                 }
4658                 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
4659                         ext4_msg(sb, KERN_ERR, "can't mount with "
4660                                  "commit=%lu, fs mounted w/o journal",
4661                                  sbi->s_commit_interval / HZ);
4662                         goto failed_mount_wq;
4663                 }
4664                 if (EXT4_MOUNT_DATA_FLAGS &
4665                     (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
4666                         ext4_msg(sb, KERN_ERR, "can't mount with "
4667                                  "data=, fs mounted w/o journal");
4668                         goto failed_mount_wq;
4669                 }
4670                 sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
4671                 clear_opt(sb, JOURNAL_CHECKSUM);
4672                 clear_opt(sb, DATA_FLAGS);
4673                 clear_opt2(sb, JOURNAL_FAST_COMMIT);
4674                 sbi->s_journal = NULL;
4675                 needs_recovery = 0;
4676                 goto no_journal;
4677         }
4678
4679         if (ext4_has_feature_64bit(sb) &&
4680             !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4681                                        JBD2_FEATURE_INCOMPAT_64BIT)) {
4682                 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4683                 goto failed_mount_wq;
4684         }
4685
4686         if (!set_journal_csum_feature_set(sb)) {
4687                 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
4688                          "feature set");
4689                 goto failed_mount_wq;
4690         }
4691
4692         if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
4693                 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4694                                           JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
4695                 ext4_msg(sb, KERN_ERR,
4696                         "Failed to set fast commit journal feature");
4697                 goto failed_mount_wq;
4698         }
4699
4700         /* We have now updated the journal if required, so we can
4701          * validate the data journaling mode. */
4702         switch (test_opt(sb, DATA_FLAGS)) {
4703         case 0:
4704                 /* No mode set, assume a default based on the journal
4705                  * capabilities: ORDERED_DATA if the journal can
4706                  * cope, else JOURNAL_DATA
4707                  */
4708                 if (jbd2_journal_check_available_features
4709                     (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4710                         set_opt(sb, ORDERED_DATA);
4711                         sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
4712                 } else {
4713                         set_opt(sb, JOURNAL_DATA);
4714                         sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
4715                 }
4716                 break;
4717
4718         case EXT4_MOUNT_ORDERED_DATA:
4719         case EXT4_MOUNT_WRITEBACK_DATA:
4720                 if (!jbd2_journal_check_available_features
4721                     (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4722                         ext4_msg(sb, KERN_ERR, "Journal does not support "
4723                                "requested data journaling mode");
4724                         goto failed_mount_wq;
4725                 }
4726                 break;
4727         default:
4728                 break;
4729         }
4730
4731         if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
4732             test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4733                 ext4_msg(sb, KERN_ERR, "can't mount with "
4734                         "journal_async_commit in data=ordered mode");
4735                 goto failed_mount_wq;
4736         }
4737
4738         set_task_ioprio(sbi->s_journal->j_task, parsed_opts.journal_ioprio);
4739
4740         sbi->s_journal->j_submit_inode_data_buffers =
4741                 ext4_journal_submit_inode_data_buffers;
4742         sbi->s_journal->j_finish_inode_data_buffers =
4743                 ext4_journal_finish_inode_data_buffers;
4744
4745 no_journal:
4746         if (!test_opt(sb, NO_MBCACHE)) {
4747                 sbi->s_ea_block_cache = ext4_xattr_create_cache();
4748                 if (!sbi->s_ea_block_cache) {
4749                         ext4_msg(sb, KERN_ERR,
4750                                  "Failed to create ea_block_cache");
4751                         goto failed_mount_wq;
4752                 }
4753
4754                 if (ext4_has_feature_ea_inode(sb)) {
4755                         sbi->s_ea_inode_cache = ext4_xattr_create_cache();
4756                         if (!sbi->s_ea_inode_cache) {
4757                                 ext4_msg(sb, KERN_ERR,
4758                                          "Failed to create ea_inode_cache");
4759                                 goto failed_mount_wq;
4760                         }
4761                 }
4762         }
4763
4764         if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
4765                 ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
4766                 goto failed_mount_wq;
4767         }
4768
4769         if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
4770             !ext4_has_feature_encrypt(sb)) {
4771                 ext4_set_feature_encrypt(sb);
4772                 ext4_commit_super(sb);
4773         }
4774
4775         /*
4776          * Get the # of file system overhead blocks from the
4777          * superblock if present.
4778          */
4779         if (es->s_overhead_clusters)
4780                 sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
4781         else {
4782                 err = ext4_calculate_overhead(sb);
4783                 if (err)
4784                         goto failed_mount_wq;
4785         }
4786
4787         /*
4788          * The maximum number of concurrent works can be high and
4789          * concurrency isn't really necessary.  Limit it to 1.
4790          */
4791         EXT4_SB(sb)->rsv_conversion_wq =
4792                 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4793         if (!EXT4_SB(sb)->rsv_conversion_wq) {
4794                 printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4795                 ret = -ENOMEM;
4796                 goto failed_mount4;
4797         }
4798
4799         /*
4800          * The jbd2_journal_load will have done any necessary log recovery,
4801          * so we can safely mount the rest of the filesystem now.
4802          */
4803
4804         root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
4805         if (IS_ERR(root)) {
4806                 ext4_msg(sb, KERN_ERR, "get root inode failed");
4807                 ret = PTR_ERR(root);
4808                 root = NULL;
4809                 goto failed_mount4;
4810         }
4811         if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4812                 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
4813                 iput(root);
4814                 goto failed_mount4;
4815         }
4816
4817         sb->s_root = d_make_root(root);
4818         if (!sb->s_root) {
4819                 ext4_msg(sb, KERN_ERR, "get root dentry failed");
4820                 ret = -ENOMEM;
4821                 goto failed_mount4;
4822         }
4823
4824         ret = ext4_setup_super(sb, es, sb_rdonly(sb));
4825         if (ret == -EROFS) {
4826                 sb->s_flags |= SB_RDONLY;
4827                 ret = 0;
4828         } else if (ret)
4829                 goto failed_mount4a;
4830
4831         ext4_set_resv_clusters(sb);
4832
4833         if (test_opt(sb, BLOCK_VALIDITY)) {
4834                 err = ext4_setup_system_zone(sb);
4835                 if (err) {
4836                         ext4_msg(sb, KERN_ERR, "failed to initialize system "
4837                                  "zone (%d)", err);
4838                         goto failed_mount4a;
4839                 }
4840         }
4841         ext4_fc_replay_cleanup(sb);
4842
4843         ext4_ext_init(sb);
4844
4845         /*
4846          * Enable optimize_scan if number of groups is > threshold. This can be
4847          * turned off by passing "mb_optimize_scan=0". This can also be
4848          * turned on forcefully by passing "mb_optimize_scan=1".
4849          */
4850         if (parsed_opts.mb_optimize_scan == 1)
4851                 set_opt2(sb, MB_OPTIMIZE_SCAN);
4852         else if (parsed_opts.mb_optimize_scan == 0)
4853                 clear_opt2(sb, MB_OPTIMIZE_SCAN);
4854         else if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
4855                 set_opt2(sb, MB_OPTIMIZE_SCAN);
4856
4857         err = ext4_mb_init(sb);
4858         if (err) {
4859                 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
4860                          err);
4861                 goto failed_mount5;
4862         }
4863
4864         /*
4865          * We can only set up the journal commit callback once
4866          * mballoc is initialized
4867          */
4868         if (sbi->s_journal)
4869                 sbi->s_journal->j_commit_callback =
4870                         ext4_journal_commit_callback;
4871
4872         block = ext4_count_free_clusters(sb);
4873         ext4_free_blocks_count_set(sbi->s_es,
4874                                    EXT4_C2B(sbi, block));
4875         err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
4876                                   GFP_KERNEL);
4877         if (!err) {
4878                 unsigned long freei = ext4_count_free_inodes(sb);
4879                 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4880                 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
4881                                           GFP_KERNEL);
4882         }
4883         /*
4884          * Update the checksum after updating free space/inode
4885          * counters.  Otherwise the superblock can have an incorrect
4886          * checksum in the buffer cache until it is written out and
4887          * e2fsprogs programs trying to open a file system immediately
4888          * after it is mounted can fail.
4889          */
4890         ext4_superblock_csum_set(sb);
4891         if (!err)
4892                 err = percpu_counter_init(&sbi->s_dirs_counter,
4893                                           ext4_count_dirs(sb), GFP_KERNEL);
4894         if (!err)
4895                 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
4896                                           GFP_KERNEL);
4897         if (!err)
4898                 err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
4899                                           GFP_KERNEL);
4900         if (!err)
4901                 err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
4902
4903         if (err) {
4904                 ext4_msg(sb, KERN_ERR, "insufficient memory");
4905                 goto failed_mount6;
4906         }
4907
4908         if (ext4_has_feature_flex_bg(sb))
4909                 if (!ext4_fill_flex_info(sb)) {
4910                         ext4_msg(sb, KERN_ERR,
4911                                "unable to initialize "
4912                                "flex_bg meta info!");
4913                         ret = -ENOMEM;
4914                         goto failed_mount6;
4915                 }
4916
4917         err = ext4_register_li_request(sb, first_not_zeroed);
4918         if (err)
4919                 goto failed_mount6;
4920
4921         err = ext4_register_sysfs(sb);
4922         if (err)
4923                 goto failed_mount7;
4924
4925 #ifdef CONFIG_QUOTA
4926         /* Enable quota usage during mount. */
4927         if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
4928                 err = ext4_enable_quotas(sb);
4929                 if (err)
4930                         goto failed_mount8;
4931         }
4932 #endif  /* CONFIG_QUOTA */
4933
4934         /*
4935          * Save the original bdev mapping's wb_err value which could be
4936          * used to detect the metadata async write error.
4937          */
4938         spin_lock_init(&sbi->s_bdev_wb_lock);
4939         errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
4940                                  &sbi->s_bdev_wb_err);
4941         sb->s_bdev->bd_super = sb;
4942         EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
4943         ext4_orphan_cleanup(sb, es);
4944         EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4945         if (needs_recovery) {
4946                 ext4_msg(sb, KERN_INFO, "recovery complete");
4947                 err = ext4_mark_recovery_complete(sb, es);
4948                 if (err)
4949                         goto failed_mount8;
4950         }
4951         if (EXT4_SB(sb)->s_journal) {
4952                 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
4953                         descr = " journalled data mode";
4954                 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
4955                         descr = " ordered data mode";
4956                 else
4957                         descr = " writeback data mode";
4958         } else
4959                 descr = "out journal";
4960
4961         if (test_opt(sb, DISCARD)) {
4962                 struct request_queue *q = bdev_get_queue(sb->s_bdev);
4963                 if (!blk_queue_discard(q))
4964                         ext4_msg(sb, KERN_WARNING,
4965                                  "mounting with \"discard\" option, but "
4966                                  "the device does not support discard");
4967         }
4968
4969         if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
4970                 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4971                          "Opts: %.*s%s%s. Quota mode: %s.", descr,
4972                          (int) sizeof(sbi->s_es->s_mount_opts),
4973                          sbi->s_es->s_mount_opts,
4974                          *sbi->s_es->s_mount_opts ? "; " : "", orig_data,
4975                          ext4_quota_mode(sb));
4976
4977         if (es->s_error_count)
4978                 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4979
4980         /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
4981         ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
4982         ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
4983         ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
4984         atomic_set(&sbi->s_warning_count, 0);
4985         atomic_set(&sbi->s_msg_count, 0);
4986
4987         kfree(orig_data);
4988         return 0;
4989
4990 cantfind_ext4:
4991         if (!silent)
4992                 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4993         goto failed_mount;
4994
4995 failed_mount8:
4996         ext4_unregister_sysfs(sb);
4997         kobject_put(&sbi->s_kobj);
4998 failed_mount7:
4999         ext4_unregister_li_request(sb);
5000 failed_mount6:
5001         ext4_mb_release(sb);
5002         rcu_read_lock();
5003         flex_groups = rcu_dereference(sbi->s_flex_groups);
5004         if (flex_groups) {
5005                 for (i = 0; i < sbi->s_flex_groups_allocated; i++)
5006                         kvfree(flex_groups[i]);
5007                 kvfree(flex_groups);
5008         }
5009         rcu_read_unlock();
5010         percpu_counter_destroy(&sbi->s_freeclusters_counter);
5011         percpu_counter_destroy(&sbi->s_freeinodes_counter);
5012         percpu_counter_destroy(&sbi->s_dirs_counter);
5013         percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
5014         percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
5015         percpu_free_rwsem(&sbi->s_writepages_rwsem);
5016 failed_mount5:
5017         ext4_ext_release(sb);
5018         ext4_release_system_zone(sb);
5019 failed_mount4a:
5020         dput(sb->s_root);
5021         sb->s_root = NULL;
5022 failed_mount4:
5023         ext4_msg(sb, KERN_ERR, "mount failed");
5024         if (EXT4_SB(sb)->rsv_conversion_wq)
5025                 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
5026 failed_mount_wq:
5027         ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
5028         sbi->s_ea_inode_cache = NULL;
5029
5030         ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
5031         sbi->s_ea_block_cache = NULL;
5032
5033         if (sbi->s_journal) {
5034                 jbd2_journal_destroy(sbi->s_journal);
5035                 sbi->s_journal = NULL;
5036         }
5037 failed_mount3a:
5038         ext4_es_unregister_shrinker(sbi);
5039 failed_mount3:
5040         flush_work(&sbi->s_error_work);
5041         del_timer_sync(&sbi->s_err_report);
5042         ext4_stop_mmpd(sbi);
5043 failed_mount2:
5044         rcu_read_lock();
5045         group_desc = rcu_dereference(sbi->s_group_desc);
5046         for (i = 0; i < db_count; i++)
5047                 brelse(group_desc[i]);
5048         kvfree(group_desc);
5049         rcu_read_unlock();
5050 failed_mount:
5051         if (sbi->s_chksum_driver)
5052                 crypto_free_shash(sbi->s_chksum_driver);
5053
5054 #ifdef CONFIG_UNICODE
5055         utf8_unload(sb->s_encoding);
5056 #endif
5057
5058 #ifdef CONFIG_QUOTA
5059         for (i = 0; i < EXT4_MAXQUOTAS; i++)
5060                 kfree(get_qf_name(sb, sbi, i));
5061 #endif
5062         fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
5063         /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
5064         brelse(bh);
5065         ext4_blkdev_remove(sbi);
5066 out_fail:
5067         sb->s_fs_info = NULL;
5068         kfree(sbi->s_blockgroup_lock);
5069 out_free_base:
5070         kfree(sbi);
5071         kfree(orig_data);
5072         fs_put_dax(dax_dev);
5073         return err ? err : ret;
5074 }
5075
5076 /*
5077  * Setup any per-fs journal parameters now.  We'll do this both on
5078  * initial mount, once the journal has been initialised but before we've
5079  * done any recovery; and again on any subsequent remount.
5080  */
5081 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
5082 {
5083         struct ext4_sb_info *sbi = EXT4_SB(sb);
5084
5085         journal->j_commit_interval = sbi->s_commit_interval;
5086         journal->j_min_batch_time = sbi->s_min_batch_time;
5087         journal->j_max_batch_time = sbi->s_max_batch_time;
5088         ext4_fc_init(sb, journal);
5089
5090         write_lock(&journal->j_state_lock);
5091         if (test_opt(sb, BARRIER))
5092                 journal->j_flags |= JBD2_BARRIER;
5093         else
5094                 journal->j_flags &= ~JBD2_BARRIER;
5095         if (test_opt(sb, DATA_ERR_ABORT))
5096                 journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
5097         else
5098                 journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
5099         write_unlock(&journal->j_state_lock);
5100 }
5101
5102 static struct inode *ext4_get_journal_inode(struct super_block *sb,
5103                                              unsigned int journal_inum)
5104 {
5105         struct inode *journal_inode;
5106
5107         /*
5108          * Test for the existence of a valid inode on disk.  Bad things
5109          * happen if we iget() an unused inode, as the subsequent iput()
5110          * will try to delete it.
5111          */
5112         journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
5113         if (IS_ERR(journal_inode)) {
5114                 ext4_msg(sb, KERN_ERR, "no journal found");
5115                 return NULL;
5116         }
5117         if (!journal_inode->i_nlink) {
5118                 make_bad_inode(journal_inode);
5119                 iput(journal_inode);
5120                 ext4_msg(sb, KERN_ERR, "journal inode is deleted");
5121                 return NULL;
5122         }
5123
5124         jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
5125                   journal_inode, journal_inode->i_size);
5126         if (!S_ISREG(journal_inode->i_mode)) {
5127                 ext4_msg(sb, KERN_ERR, "invalid journal inode");
5128                 iput(journal_inode);
5129                 return NULL;
5130         }
5131         return journal_inode;
5132 }
5133
5134 static journal_t *ext4_get_journal(struct super_block *sb,
5135                                    unsigned int journal_inum)
5136 {
5137         struct inode *journal_inode;
5138         journal_t *journal;
5139
5140         if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5141                 return NULL;
5142
5143         journal_inode = ext4_get_journal_inode(sb, journal_inum);
5144         if (!journal_inode)
5145                 return NULL;
5146
5147         journal = jbd2_journal_init_inode(journal_inode);
5148         if (!journal) {
5149                 ext4_msg(sb, KERN_ERR, "Could not load journal inode");
5150                 iput(journal_inode);
5151                 return NULL;
5152         }
5153         journal->j_private = sb;
5154         ext4_init_journal_params(sb, journal);
5155         return journal;
5156 }
5157
5158 static journal_t *ext4_get_dev_journal(struct super_block *sb,
5159                                        dev_t j_dev)
5160 {
5161         struct buffer_head *bh;
5162         journal_t *journal;
5163         ext4_fsblk_t start;
5164         ext4_fsblk_t len;
5165         int hblock, blocksize;
5166         ext4_fsblk_t sb_block;
5167         unsigned long offset;
5168         struct ext4_super_block *es;
5169         struct block_device *bdev;
5170
5171         if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5172                 return NULL;
5173
5174         bdev = ext4_blkdev_get(j_dev, sb);
5175         if (bdev == NULL)
5176                 return NULL;
5177
5178         blocksize = sb->s_blocksize;
5179         hblock = bdev_logical_block_size(bdev);
5180         if (blocksize < hblock) {
5181                 ext4_msg(sb, KERN_ERR,
5182                         "blocksize too small for journal device");
5183                 goto out_bdev;
5184         }
5185
5186         sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
5187         offset = EXT4_MIN_BLOCK_SIZE % blocksize;
5188         set_blocksize(bdev, blocksize);
5189         if (!(bh = __bread(bdev, sb_block, blocksize))) {
5190                 ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
5191                        "external journal");
5192                 goto out_bdev;
5193         }
5194
5195         es = (struct ext4_super_block *) (bh->b_data + offset);
5196         if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
5197             !(le32_to_cpu(es->s_feature_incompat) &
5198               EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
5199                 ext4_msg(sb, KERN_ERR, "external journal has "
5200                                         "bad superblock");
5201                 brelse(bh);
5202                 goto out_bdev;
5203         }
5204
5205         if ((le32_to_cpu(es->s_feature_ro_compat) &
5206              EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
5207             es->s_checksum != ext4_superblock_csum(sb, es)) {
5208                 ext4_msg(sb, KERN_ERR, "external journal has "
5209                                        "corrupt superblock");
5210                 brelse(bh);
5211                 goto out_bdev;
5212         }
5213
5214         if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
5215                 ext4_msg(sb, KERN_ERR, "journal UUID does not match");
5216                 brelse(bh);
5217                 goto out_bdev;
5218         }
5219
5220         len = ext4_blocks_count(es);
5221         start = sb_block + 1;
5222         brelse(bh);     /* we're done with the superblock */
5223
5224         journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
5225                                         start, len, blocksize);
5226         if (!journal) {
5227                 ext4_msg(sb, KERN_ERR, "failed to create device journal");
5228                 goto out_bdev;
5229         }
5230         journal->j_private = sb;
5231         if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) {
5232                 ext4_msg(sb, KERN_ERR, "I/O error on journal device");
5233                 goto out_journal;
5234         }
5235         if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
5236                 ext4_msg(sb, KERN_ERR, "External journal has more than one "
5237                                         "user (unsupported) - %d",
5238                         be32_to_cpu(journal->j_superblock->s_nr_users));
5239                 goto out_journal;
5240         }
5241         EXT4_SB(sb)->s_journal_bdev = bdev;
5242         ext4_init_journal_params(sb, journal);
5243         return journal;
5244
5245 out_journal:
5246         jbd2_journal_destroy(journal);
5247 out_bdev:
5248         ext4_blkdev_put(bdev);
5249         return NULL;
5250 }
5251
5252 static int ext4_load_journal(struct super_block *sb,
5253                              struct ext4_super_block *es,
5254                              unsigned long journal_devnum)
5255 {
5256         journal_t *journal;
5257         unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
5258         dev_t journal_dev;
5259         int err = 0;
5260         int really_read_only;
5261         int journal_dev_ro;
5262
5263         if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5264                 return -EFSCORRUPTED;
5265
5266         if (journal_devnum &&
5267             journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5268                 ext4_msg(sb, KERN_INFO, "external journal device major/minor "
5269                         "numbers have changed");
5270                 journal_dev = new_decode_dev(journal_devnum);
5271         } else
5272                 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
5273
5274         if (journal_inum && journal_dev) {
5275                 ext4_msg(sb, KERN_ERR,
5276                          "filesystem has both journal inode and journal device!");
5277                 return -EINVAL;
5278         }
5279
5280         if (journal_inum) {
5281                 journal = ext4_get_journal(sb, journal_inum);
5282                 if (!journal)
5283                         return -EINVAL;
5284         } else {
5285                 journal = ext4_get_dev_journal(sb, journal_dev);
5286                 if (!journal)
5287                         return -EINVAL;
5288         }
5289
5290         journal_dev_ro = bdev_read_only(journal->j_dev);
5291         really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
5292
5293         if (journal_dev_ro && !sb_rdonly(sb)) {
5294                 ext4_msg(sb, KERN_ERR,
5295                          "journal device read-only, try mounting with '-o ro'");
5296                 err = -EROFS;
5297                 goto err_out;
5298         }
5299
5300         /*
5301          * Are we loading a blank journal or performing recovery after a
5302          * crash?  For recovery, we need to check in advance whether we
5303          * can get read-write access to the device.
5304          */
5305         if (ext4_has_feature_journal_needs_recovery(sb)) {
5306                 if (sb_rdonly(sb)) {
5307                         ext4_msg(sb, KERN_INFO, "INFO: recovery "
5308                                         "required on readonly filesystem");
5309                         if (really_read_only) {
5310                                 ext4_msg(sb, KERN_ERR, "write access "
5311                                         "unavailable, cannot proceed "
5312                                         "(try mounting with noload)");
5313                                 err = -EROFS;
5314                                 goto err_out;
5315                         }
5316                         ext4_msg(sb, KERN_INFO, "write access will "
5317                                "be enabled during recovery");
5318                 }
5319         }
5320
5321         if (!(journal->j_flags & JBD2_BARRIER))
5322                 ext4_msg(sb, KERN_INFO, "barriers disabled");
5323
5324         if (!ext4_has_feature_journal_needs_recovery(sb))
5325                 err = jbd2_journal_wipe(journal, !really_read_only);
5326         if (!err) {
5327                 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
5328                 if (save)
5329                         memcpy(save, ((char *) es) +
5330                                EXT4_S_ERR_START, EXT4_S_ERR_LEN);
5331                 err = jbd2_journal_load(journal);
5332                 if (save)
5333                         memcpy(((char *) es) + EXT4_S_ERR_START,
5334                                save, EXT4_S_ERR_LEN);
5335                 kfree(save);
5336         }
5337
5338         if (err) {
5339                 ext4_msg(sb, KERN_ERR, "error loading journal");
5340                 goto err_out;
5341         }
5342
5343         EXT4_SB(sb)->s_journal = journal;
5344         err = ext4_clear_journal_err(sb, es);
5345         if (err) {
5346                 EXT4_SB(sb)->s_journal = NULL;
5347                 jbd2_journal_destroy(journal);
5348                 return err;
5349         }
5350
5351         if (!really_read_only && journal_devnum &&
5352             journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5353                 es->s_journal_dev = cpu_to_le32(journal_devnum);
5354
5355                 /* Make sure we flush the recovery flag to disk. */
5356                 ext4_commit_super(sb);
5357         }
5358
5359         return 0;
5360
5361 err_out:
5362         jbd2_journal_destroy(journal);
5363         return err;
5364 }
5365
5366 /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
5367 static void ext4_update_super(struct super_block *sb)
5368 {
5369         struct ext4_sb_info *sbi = EXT4_SB(sb);
5370         struct ext4_super_block *es = sbi->s_es;
5371         struct buffer_head *sbh = sbi->s_sbh;
5372
5373         lock_buffer(sbh);
5374         /*
5375          * If the file system is mounted read-only, don't update the
5376          * superblock write time.  This avoids updating the superblock
5377          * write time when we are mounting the root file system
5378          * read/only but we need to replay the journal; at that point,
5379          * for people who are east of GMT and who make their clock
5380          * tick in localtime for Windows bug-for-bug compatibility,
5381          * the clock is set in the future, and this will cause e2fsck
5382          * to complain and force a full file system check.
5383          */
5384         if (!(sb->s_flags & SB_RDONLY))
5385                 ext4_update_tstamp(es, s_wtime);
5386         es->s_kbytes_written =
5387                 cpu_to_le64(sbi->s_kbytes_written +
5388                     ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
5389                       sbi->s_sectors_written_start) >> 1));
5390         if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
5391                 ext4_free_blocks_count_set(es,
5392                         EXT4_C2B(sbi, percpu_counter_sum_positive(
5393                                 &sbi->s_freeclusters_counter)));
5394         if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
5395                 es->s_free_inodes_count =
5396                         cpu_to_le32(percpu_counter_sum_positive(
5397                                 &sbi->s_freeinodes_counter));
5398         /* Copy error information to the on-disk superblock */
5399         spin_lock(&sbi->s_error_lock);
5400         if (sbi->s_add_error_count > 0) {
5401                 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
5402                 if (!es->s_first_error_time && !es->s_first_error_time_hi) {
5403                         __ext4_update_tstamp(&es->s_first_error_time,
5404                                              &es->s_first_error_time_hi,
5405                                              sbi->s_first_error_time);
5406                         strncpy(es->s_first_error_func, sbi->s_first_error_func,
5407                                 sizeof(es->s_first_error_func));
5408                         es->s_first_error_line =
5409                                 cpu_to_le32(sbi->s_first_error_line);
5410                         es->s_first_error_ino =
5411                                 cpu_to_le32(sbi->s_first_error_ino);
5412                         es->s_first_error_block =
5413                                 cpu_to_le64(sbi->s_first_error_block);
5414                         es->s_first_error_errcode =
5415                                 ext4_errno_to_code(sbi->s_first_error_code);
5416                 }
5417                 __ext4_update_tstamp(&es->s_last_error_time,
5418                                      &es->s_last_error_time_hi,
5419                                      sbi->s_last_error_time);
5420                 strncpy(es->s_last_error_func, sbi->s_last_error_func,
5421                         sizeof(es->s_last_error_func));
5422                 es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line);
5423                 es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino);
5424                 es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block);
5425                 es->s_last_error_errcode =
5426                                 ext4_errno_to_code(sbi->s_last_error_code);
5427                 /*
5428                  * Start the daily error reporting function if it hasn't been
5429                  * started already
5430                  */
5431                 if (!es->s_error_count)
5432                         mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);
5433                 le32_add_cpu(&es->s_error_count, sbi->s_add_error_count);
5434                 sbi->s_add_error_count = 0;
5435         }
5436         spin_unlock(&sbi->s_error_lock);
5437
5438         ext4_superblock_csum_set(sb);
5439         unlock_buffer(sbh);
5440 }
5441
5442 static int ext4_commit_super(struct super_block *sb)
5443 {
5444         struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
5445         int error = 0;
5446
5447         if (!sbh)
5448                 return -EINVAL;
5449         if (block_device_ejected(sb))
5450                 return -ENODEV;
5451
5452         ext4_update_super(sb);
5453
5454         if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
5455                 /*
5456                  * Oh, dear.  A previous attempt to write the
5457                  * superblock failed.  This could happen because the
5458                  * USB device was yanked out.  Or it could happen to
5459                  * be a transient write error and maybe the block will
5460                  * be remapped.  Nothing we can do but to retry the
5461                  * write and hope for the best.
5462                  */
5463                 ext4_msg(sb, KERN_ERR, "previous I/O error to "
5464                        "superblock detected");
5465                 clear_buffer_write_io_error(sbh);
5466                 set_buffer_uptodate(sbh);
5467         }
5468         BUFFER_TRACE(sbh, "marking dirty");
5469         mark_buffer_dirty(sbh);
5470         error = __sync_dirty_buffer(sbh,
5471                 REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
5472         if (buffer_write_io_error(sbh)) {
5473                 ext4_msg(sb, KERN_ERR, "I/O error while writing "
5474                        "superblock");
5475                 clear_buffer_write_io_error(sbh);
5476                 set_buffer_uptodate(sbh);
5477         }
5478         return error;
5479 }
5480
5481 /*
5482  * Have we just finished recovery?  If so, and if we are mounting (or
5483  * remounting) the filesystem readonly, then we will end up with a
5484  * consistent fs on disk.  Record that fact.
5485  */
5486 static int ext4_mark_recovery_complete(struct super_block *sb,
5487                                        struct ext4_super_block *es)
5488 {
5489         int err;
5490         journal_t *journal = EXT4_SB(sb)->s_journal;
5491
5492         if (!ext4_has_feature_journal(sb)) {
5493                 if (journal != NULL) {
5494                         ext4_error(sb, "Journal got removed while the fs was "
5495                                    "mounted!");
5496                         return -EFSCORRUPTED;
5497                 }
5498                 return 0;
5499         }
5500         jbd2_journal_lock_updates(journal);
5501         err = jbd2_journal_flush(journal, 0);
5502         if (err < 0)
5503                 goto out;
5504
5505         if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
5506                 ext4_clear_feature_journal_needs_recovery(sb);
5507                 ext4_commit_super(sb);
5508         }
5509 out:
5510         jbd2_journal_unlock_updates(journal);
5511         return err;
5512 }
5513
5514 /*
5515  * If we are mounting (or read-write remounting) a filesystem whose journal
5516  * has recorded an error from a previous lifetime, move that error to the
5517  * main filesystem now.
5518  */
5519 static int ext4_clear_journal_err(struct super_block *sb,
5520                                    struct ext4_super_block *es)
5521 {
5522         journal_t *journal;
5523         int j_errno;
5524         const char *errstr;
5525
5526         if (!ext4_has_feature_journal(sb)) {
5527                 ext4_error(sb, "Journal got removed while the fs was mounted!");
5528                 return -EFSCORRUPTED;
5529         }
5530
5531         journal = EXT4_SB(sb)->s_journal;
5532
5533         /*
5534          * Now check for any error status which may have been recorded in the
5535          * journal by a prior ext4_error() or ext4_abort()
5536          */
5537
5538         j_errno = jbd2_journal_errno(journal);
5539         if (j_errno) {
5540                 char nbuf[16];
5541
5542                 errstr = ext4_decode_error(sb, j_errno, nbuf);
5543                 ext4_warning(sb, "Filesystem error recorded "
5544                              "from previous mount: %s", errstr);
5545                 ext4_warning(sb, "Marking fs in need of filesystem check.");
5546
5547                 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
5548                 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
5549                 ext4_commit_super(sb);
5550
5551                 jbd2_journal_clear_err(journal);
5552                 jbd2_journal_update_sb_errno(journal);
5553         }
5554         return 0;
5555 }
5556
5557 /*
5558  * Force the running and committing transactions to commit,
5559  * and wait on the commit.
5560  */
5561 int ext4_force_commit(struct super_block *sb)
5562 {
5563         journal_t *journal;
5564
5565         if (sb_rdonly(sb))
5566                 return 0;
5567
5568         journal = EXT4_SB(sb)->s_journal;
5569         return ext4_journal_force_commit(journal);
5570 }
5571
5572 static int ext4_sync_fs(struct super_block *sb, int wait)
5573 {
5574         int ret = 0;
5575         tid_t target;
5576         bool needs_barrier = false;
5577         struct ext4_sb_info *sbi = EXT4_SB(sb);
5578
5579         if (unlikely(ext4_forced_shutdown(sbi)))
5580                 return 0;
5581
5582         trace_ext4_sync_fs(sb, wait);
5583         flush_workqueue(sbi->rsv_conversion_wq);
5584         /*
5585          * Writeback quota in non-journalled quota case - journalled quota has
5586          * no dirty dquots
5587          */
5588         dquot_writeback_dquots(sb, -1);
5589         /*
5590          * Data writeback is possible w/o journal transaction, so barrier must
5591          * being sent at the end of the function. But we can skip it if
5592          * transaction_commit will do it for us.
5593          */
5594         if (sbi->s_journal) {
5595                 target = jbd2_get_latest_transaction(sbi->s_journal);
5596                 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
5597                     !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
5598                         needs_barrier = true;
5599
5600                 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
5601                         if (wait)
5602                                 ret = jbd2_log_wait_commit(sbi->s_journal,
5603                                                            target);
5604                 }
5605         } else if (wait && test_opt(sb, BARRIER))
5606                 needs_barrier = true;
5607         if (needs_barrier) {
5608                 int err;
5609                 err = blkdev_issue_flush(sb->s_bdev);
5610                 if (!ret)
5611                         ret = err;
5612         }
5613
5614         return ret;
5615 }
5616
5617 /*
5618  * LVM calls this function before a (read-only) snapshot is created.  This
5619  * gives us a chance to flush the journal completely and mark the fs clean.
5620  *
5621  * Note that only this function cannot bring a filesystem to be in a clean
5622  * state independently. It relies on upper layer to stop all data & metadata
5623  * modifications.
5624  */
5625 static int ext4_freeze(struct super_block *sb)
5626 {
5627         int error = 0;
5628         journal_t *journal;
5629
5630         if (sb_rdonly(sb))
5631                 return 0;
5632
5633         journal = EXT4_SB(sb)->s_journal;
5634
5635         if (journal) {
5636                 /* Now we set up the journal barrier. */
5637                 jbd2_journal_lock_updates(journal);
5638
5639                 /*
5640                  * Don't clear the needs_recovery flag if we failed to
5641                  * flush the journal.
5642                  */
5643                 error = jbd2_journal_flush(journal, 0);
5644                 if (error < 0)
5645                         goto out;
5646
5647                 /* Journal blocked and flushed, clear needs_recovery flag. */
5648                 ext4_clear_feature_journal_needs_recovery(sb);
5649         }
5650
5651         error = ext4_commit_super(sb);
5652 out:
5653         if (journal)
5654                 /* we rely on upper layer to stop further updates */
5655                 jbd2_journal_unlock_updates(journal);
5656         return error;
5657 }
5658
5659 /*
5660  * Called by LVM after the snapshot is done.  We need to reset the RECOVER
5661  * flag here, even though the filesystem is not technically dirty yet.
5662  */
5663 static int ext4_unfreeze(struct super_block *sb)
5664 {
5665         if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
5666                 return 0;
5667
5668         if (EXT4_SB(sb)->s_journal) {
5669                 /* Reset the needs_recovery flag before the fs is unlocked. */
5670                 ext4_set_feature_journal_needs_recovery(sb);
5671         }
5672
5673         ext4_commit_super(sb);
5674         return 0;
5675 }
5676
5677 /*
5678  * Structure to save mount options for ext4_remount's benefit
5679  */
5680 struct ext4_mount_options {
5681         unsigned long s_mount_opt;
5682         unsigned long s_mount_opt2;
5683         kuid_t s_resuid;
5684         kgid_t s_resgid;
5685         unsigned long s_commit_interval;
5686         u32 s_min_batch_time, s_max_batch_time;
5687 #ifdef CONFIG_QUOTA
5688         int s_jquota_fmt;
5689         char *s_qf_names[EXT4_MAXQUOTAS];
5690 #endif
5691 };
5692
5693 static int ext4_remount(struct super_block *sb, int *flags, char *data)
5694 {
5695         struct ext4_super_block *es;
5696         struct ext4_sb_info *sbi = EXT4_SB(sb);
5697         unsigned long old_sb_flags, vfs_flags;
5698         struct ext4_mount_options old_opts;
5699         int enable_quota = 0;
5700         ext4_group_t g;
5701         int err = 0;
5702 #ifdef CONFIG_QUOTA
5703         int i, j;
5704         char *to_free[EXT4_MAXQUOTAS];
5705 #endif
5706         char *orig_data = kstrdup(data, GFP_KERNEL);
5707         struct ext4_parsed_options parsed_opts;
5708
5709         parsed_opts.journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
5710         parsed_opts.journal_devnum = 0;
5711
5712         if (data && !orig_data)
5713                 return -ENOMEM;
5714
5715         /* Store the original options */
5716         old_sb_flags = sb->s_flags;
5717         old_opts.s_mount_opt = sbi->s_mount_opt;
5718         old_opts.s_mount_opt2 = sbi->s_mount_opt2;
5719         old_opts.s_resuid = sbi->s_resuid;
5720         old_opts.s_resgid = sbi->s_resgid;
5721         old_opts.s_commit_interval = sbi->s_commit_interval;
5722         old_opts.s_min_batch_time = sbi->s_min_batch_time;
5723         old_opts.s_max_batch_time = sbi->s_max_batch_time;
5724 #ifdef CONFIG_QUOTA
5725         old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
5726         for (i = 0; i < EXT4_MAXQUOTAS; i++)
5727                 if (sbi->s_qf_names[i]) {
5728                         char *qf_name = get_qf_name(sb, sbi, i);
5729
5730                         old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
5731                         if (!old_opts.s_qf_names[i]) {
5732                                 for (j = 0; j < i; j++)
5733                                         kfree(old_opts.s_qf_names[j]);
5734                                 kfree(orig_data);
5735                                 return -ENOMEM;
5736                         }
5737                 } else
5738                         old_opts.s_qf_names[i] = NULL;
5739 #endif
5740         if (sbi->s_journal && sbi->s_journal->j_task->io_context)
5741                 parsed_opts.journal_ioprio =
5742                         sbi->s_journal->j_task->io_context->ioprio;
5743
5744         /*
5745          * Some options can be enabled by ext4 and/or by VFS mount flag
5746          * either way we need to make sure it matches in both *flags and
5747          * s_flags. Copy those selected flags from *flags to s_flags
5748          */
5749         vfs_flags = SB_LAZYTIME | SB_I_VERSION;
5750         sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
5751
5752         if (!parse_options(data, sb, &parsed_opts, 1)) {
5753                 err = -EINVAL;
5754                 goto restore_opts;
5755         }
5756
5757         if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
5758             test_opt(sb, JOURNAL_CHECKSUM)) {
5759                 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
5760                          "during remount not supported; ignoring");
5761                 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
5762         }
5763
5764         if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
5765                 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
5766                         ext4_msg(sb, KERN_ERR, "can't mount with "
5767                                  "both data=journal and delalloc");
5768                         err = -EINVAL;
5769                         goto restore_opts;
5770                 }
5771                 if (test_opt(sb, DIOREAD_NOLOCK)) {
5772                         ext4_msg(sb, KERN_ERR, "can't mount with "
5773                                  "both data=journal and dioread_nolock");
5774                         err = -EINVAL;
5775                         goto restore_opts;
5776                 }
5777         } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
5778                 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
5779                         ext4_msg(sb, KERN_ERR, "can't mount with "
5780                                 "journal_async_commit in data=ordered mode");
5781                         err = -EINVAL;
5782                         goto restore_opts;
5783                 }
5784         }
5785
5786         if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
5787                 ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
5788                 err = -EINVAL;
5789                 goto restore_opts;
5790         }
5791
5792         if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5793                 ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
5794
5795         sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
5796                 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5797
5798         es = sbi->s_es;
5799
5800         if (sbi->s_journal) {
5801                 ext4_init_journal_params(sb, sbi->s_journal);
5802                 set_task_ioprio(sbi->s_journal->j_task, parsed_opts.journal_ioprio);
5803         }
5804
5805         /* Flush outstanding errors before changing fs state */
5806         flush_work(&sbi->s_error_work);
5807
5808         if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5809                 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) {
5810                         err = -EROFS;
5811                         goto restore_opts;
5812                 }
5813
5814                 if (*flags & SB_RDONLY) {
5815                         err = sync_filesystem(sb);
5816                         if (err < 0)
5817                                 goto restore_opts;
5818                         err = dquot_suspend(sb, -1);
5819                         if (err < 0)
5820                                 goto restore_opts;
5821
5822                         /*
5823                          * First of all, the unconditional stuff we have to do
5824                          * to disable replay of the journal when we next remount
5825                          */
5826                         sb->s_flags |= SB_RDONLY;
5827
5828                         /*
5829                          * OK, test if we are remounting a valid rw partition
5830                          * readonly, and if so set the rdonly flag and then
5831                          * mark the partition as valid again.
5832                          */
5833                         if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
5834                             (sbi->s_mount_state & EXT4_VALID_FS))
5835                                 es->s_state = cpu_to_le16(sbi->s_mount_state);
5836
5837                         if (sbi->s_journal) {
5838                                 /*
5839                                  * We let remount-ro finish even if marking fs
5840                                  * as clean failed...
5841                                  */
5842                                 ext4_mark_recovery_complete(sb, es);
5843                         }
5844                 } else {
5845                         /* Make sure we can mount this feature set readwrite */
5846                         if (ext4_has_feature_readonly(sb) ||
5847                             !ext4_feature_set_ok(sb, 0)) {
5848                                 err = -EROFS;
5849                                 goto restore_opts;
5850                         }
5851                         /*
5852                          * Make sure the group descriptor checksums
5853                          * are sane.  If they aren't, refuse to remount r/w.
5854                          */
5855                         for (g = 0; g < sbi->s_groups_count; g++) {
5856                                 struct ext4_group_desc *gdp =
5857                                         ext4_get_group_desc(sb, g, NULL);
5858
5859                                 if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5860                                         ext4_msg(sb, KERN_ERR,
5861                "ext4_remount: Checksum for group %u failed (%u!=%u)",
5862                 g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5863                                                le16_to_cpu(gdp->bg_checksum));
5864                                         err = -EFSBADCRC;
5865                                         goto restore_opts;
5866                                 }
5867                         }
5868
5869                         /*
5870                          * If we have an unprocessed orphan list hanging
5871                          * around from a previously readonly bdev mount,
5872                          * require a full umount/remount for now.
5873                          */
5874                         if (es->s_last_orphan) {
5875                                 ext4_msg(sb, KERN_WARNING, "Couldn't "
5876                                        "remount RDWR because of unprocessed "
5877                                        "orphan inode list.  Please "
5878                                        "umount/remount instead");
5879                                 err = -EINVAL;
5880                                 goto restore_opts;
5881                         }
5882
5883                         /*
5884                          * Mounting a RDONLY partition read-write, so reread
5885                          * and store the current valid flag.  (It may have
5886                          * been changed by e2fsck since we originally mounted
5887                          * the partition.)
5888                          */
5889                         if (sbi->s_journal) {
5890                                 err = ext4_clear_journal_err(sb, es);
5891                                 if (err)
5892                                         goto restore_opts;
5893                         }
5894                         sbi->s_mount_state = le16_to_cpu(es->s_state);
5895
5896                         err = ext4_setup_super(sb, es, 0);
5897                         if (err)
5898                                 goto restore_opts;
5899
5900                         sb->s_flags &= ~SB_RDONLY;
5901                         if (ext4_has_feature_mmp(sb))
5902                                 if (ext4_multi_mount_protect(sb,
5903                                                 le64_to_cpu(es->s_mmp_block))) {
5904                                         err = -EROFS;
5905                                         goto restore_opts;
5906                                 }
5907                         enable_quota = 1;
5908                 }
5909         }
5910
5911         /*
5912          * Reinitialize lazy itable initialization thread based on
5913          * current settings
5914          */
5915         if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
5916                 ext4_unregister_li_request(sb);
5917         else {
5918                 ext4_group_t first_not_zeroed;
5919                 first_not_zeroed = ext4_has_uninit_itable(sb);
5920                 ext4_register_li_request(sb, first_not_zeroed);
5921         }
5922
5923         /*
5924          * Handle creation of system zone data early because it can fail.
5925          * Releasing of existing data is done when we are sure remount will
5926          * succeed.
5927          */
5928         if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
5929                 err = ext4_setup_system_zone(sb);
5930                 if (err)
5931                         goto restore_opts;
5932         }
5933
5934         if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
5935                 err = ext4_commit_super(sb);
5936                 if (err)
5937                         goto restore_opts;
5938         }
5939
5940 #ifdef CONFIG_QUOTA
5941         /* Release old quota file names */
5942         for (i = 0; i < EXT4_MAXQUOTAS; i++)
5943                 kfree(old_opts.s_qf_names[i]);
5944         if (enable_quota) {
5945                 if (sb_any_quota_suspended(sb))
5946                         dquot_resume(sb, -1);
5947                 else if (ext4_has_feature_quota(sb)) {
5948                         err = ext4_enable_quotas(sb);
5949                         if (err)
5950                                 goto restore_opts;
5951                 }
5952         }
5953 #endif
5954         if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
5955                 ext4_release_system_zone(sb);
5956
5957         if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
5958                 ext4_stop_mmpd(sbi);
5959
5960         /*
5961          * Some options can be enabled by ext4 and/or by VFS mount flag
5962          * either way we need to make sure it matches in both *flags and
5963          * s_flags. Copy those selected flags from s_flags to *flags
5964          */
5965         *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
5966
5967         ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s. Quota mode: %s.",
5968                  orig_data, ext4_quota_mode(sb));
5969         kfree(orig_data);
5970         return 0;
5971
5972 restore_opts:
5973         sb->s_flags = old_sb_flags;
5974         sbi->s_mount_opt = old_opts.s_mount_opt;
5975         sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5976         sbi->s_resuid = old_opts.s_resuid;
5977         sbi->s_resgid = old_opts.s_resgid;
5978         sbi->s_commit_interval = old_opts.s_commit_interval;
5979         sbi->s_min_batch_time = old_opts.s_min_batch_time;
5980         sbi->s_max_batch_time = old_opts.s_max_batch_time;
5981         if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
5982                 ext4_release_system_zone(sb);
5983 #ifdef CONFIG_QUOTA
5984         sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
5985         for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5986                 to_free[i] = get_qf_name(sb, sbi, i);
5987                 rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
5988         }
5989         synchronize_rcu();
5990         for (i = 0; i < EXT4_MAXQUOTAS; i++)
5991                 kfree(to_free[i]);
5992 #endif
5993         if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
5994                 ext4_stop_mmpd(sbi);
5995         kfree(orig_data);
5996         return err;
5997 }
5998
5999 #ifdef CONFIG_QUOTA
6000 static int ext4_statfs_project(struct super_block *sb,
6001                                kprojid_t projid, struct kstatfs *buf)
6002 {
6003         struct kqid qid;
6004         struct dquot *dquot;
6005         u64 limit;
6006         u64 curblock;
6007
6008         qid = make_kqid_projid(projid);
6009         dquot = dqget(sb, qid);
6010         if (IS_ERR(dquot))
6011                 return PTR_ERR(dquot);
6012         spin_lock(&dquot->dq_dqb_lock);
6013
6014         limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
6015                              dquot->dq_dqb.dqb_bhardlimit);
6016         limit >>= sb->s_blocksize_bits;
6017
6018         if (limit && buf->f_blocks > limit) {
6019                 curblock = (dquot->dq_dqb.dqb_curspace +
6020                             dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
6021                 buf->f_blocks = limit;
6022                 buf->f_bfree = buf->f_bavail =
6023                         (buf->f_blocks > curblock) ?
6024                          (buf->f_blocks - curblock) : 0;
6025         }
6026
6027         limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
6028                              dquot->dq_dqb.dqb_ihardlimit);
6029         if (limit && buf->f_files > limit) {
6030                 buf->f_files = limit;
6031                 buf->f_ffree =
6032                         (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
6033                          (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
6034         }
6035
6036         spin_unlock(&dquot->dq_dqb_lock);
6037         dqput(dquot);
6038         return 0;
6039 }
6040 #endif
6041
6042 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
6043 {
6044         struct super_block *sb = dentry->d_sb;
6045         struct ext4_sb_info *sbi = EXT4_SB(sb);
6046         struct ext4_super_block *es = sbi->s_es;
6047         ext4_fsblk_t overhead = 0, resv_blocks;
6048         s64 bfree;
6049         resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
6050
6051         if (!test_opt(sb, MINIX_DF))
6052                 overhead = sbi->s_overhead;
6053
6054         buf->f_type = EXT4_SUPER_MAGIC;
6055         buf->f_bsize = sb->s_blocksize;
6056         buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
6057         bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
6058                 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
6059         /* prevent underflow in case that few free space is available */
6060         buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
6061         buf->f_bavail = buf->f_bfree -
6062                         (ext4_r_blocks_count(es) + resv_blocks);
6063         if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
6064                 buf->f_bavail = 0;
6065         buf->f_files = le32_to_cpu(es->s_inodes_count);
6066         buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
6067         buf->f_namelen = EXT4_NAME_LEN;
6068         buf->f_fsid = uuid_to_fsid(es->s_uuid);
6069
6070 #ifdef CONFIG_QUOTA
6071         if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
6072             sb_has_quota_limits_enabled(sb, PRJQUOTA))
6073                 ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
6074 #endif
6075         return 0;
6076 }
6077
6078
6079 #ifdef CONFIG_QUOTA
6080
6081 /*
6082  * Helper functions so that transaction is started before we acquire dqio_sem
6083  * to keep correct lock ordering of transaction > dqio_sem
6084  */
6085 static inline struct inode *dquot_to_inode(struct dquot *dquot)
6086 {
6087         return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
6088 }
6089
6090 static int ext4_write_dquot(struct dquot *dquot)
6091 {
6092         int ret, err;
6093         handle_t *handle;
6094         struct inode *inode;
6095
6096         inode = dquot_to_inode(dquot);
6097         handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
6098                                     EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
6099         if (IS_ERR(handle))
6100                 return PTR_ERR(handle);
6101         ret = dquot_commit(dquot);
6102         err = ext4_journal_stop(handle);
6103         if (!ret)
6104                 ret = err;
6105         return ret;
6106 }
6107
6108 static int ext4_acquire_dquot(struct dquot *dquot)
6109 {
6110         int ret, err;
6111         handle_t *handle;
6112
6113         handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6114                                     EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
6115         if (IS_ERR(handle))
6116                 return PTR_ERR(handle);
6117         ret = dquot_acquire(dquot);
6118         err = ext4_journal_stop(handle);
6119         if (!ret)
6120                 ret = err;
6121         return ret;
6122 }
6123
6124 static int ext4_release_dquot(struct dquot *dquot)
6125 {
6126         int ret, err;
6127         handle_t *handle;
6128
6129         handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6130                                     EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
6131         if (IS_ERR(handle)) {
6132                 /* Release dquot anyway to avoid endless cycle in dqput() */
6133                 dquot_release(dquot);
6134                 return PTR_ERR(handle);
6135         }
6136         ret = dquot_release(dquot);
6137         err = ext4_journal_stop(handle);
6138         if (!ret)
6139                 ret = err;
6140         return ret;
6141 }
6142
6143 static int ext4_mark_dquot_dirty(struct dquot *dquot)
6144 {
6145         struct super_block *sb = dquot->dq_sb;
6146
6147         if (ext4_is_quota_journalled(sb)) {
6148                 dquot_mark_dquot_dirty(dquot);
6149                 return ext4_write_dquot(dquot);
6150         } else {
6151                 return dquot_mark_dquot_dirty(dquot);
6152         }
6153 }
6154
6155 static int ext4_write_info(struct super_block *sb, int type)
6156 {
6157         int ret, err;
6158         handle_t *handle;
6159
6160         /* Data block + inode block */
6161         handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
6162         if (IS_ERR(handle))
6163                 return PTR_ERR(handle);
6164         ret = dquot_commit_info(sb, type);
6165         err = ext4_journal_stop(handle);
6166         if (!ret)
6167                 ret = err;
6168         return ret;
6169 }
6170
6171 static void lockdep_set_quota_inode(struct inode *inode, int subclass)
6172 {
6173         struct ext4_inode_info *ei = EXT4_I(inode);
6174
6175         /* The first argument of lockdep_set_subclass has to be
6176          * *exactly* the same as the argument to init_rwsem() --- in
6177          * this case, in init_once() --- or lockdep gets unhappy
6178          * because the name of the lock is set using the
6179          * stringification of the argument to init_rwsem().
6180          */
6181         (void) ei;      /* shut up clang warning if !CONFIG_LOCKDEP */
6182         lockdep_set_subclass(&ei->i_data_sem, subclass);
6183 }
6184
6185 /*
6186  * Standard function to be called on quota_on
6187  */
6188 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
6189                          const struct path *path)
6190 {
6191         int err;
6192
6193         if (!test_opt(sb, QUOTA))
6194                 return -EINVAL;
6195
6196         /* Quotafile not on the same filesystem? */
6197         if (path->dentry->d_sb != sb)
6198                 return -EXDEV;
6199
6200         /* Quota already enabled for this file? */
6201         if (IS_NOQUOTA(d_inode(path->dentry)))
6202                 return -EBUSY;
6203
6204         /* Journaling quota? */
6205         if (EXT4_SB(sb)->s_qf_names[type]) {
6206                 /* Quotafile not in fs root? */
6207                 if (path->dentry->d_parent != sb->s_root)
6208                         ext4_msg(sb, KERN_WARNING,
6209                                 "Quota file not on filesystem root. "
6210                                 "Journaled quota will not work");
6211                 sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
6212         } else {
6213                 /*
6214                  * Clear the flag just in case mount options changed since
6215                  * last time.
6216                  */
6217                 sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
6218         }
6219
6220         /*
6221          * When we journal data on quota file, we have to flush journal to see
6222          * all updates to the file when we bypass pagecache...
6223          */
6224         if (EXT4_SB(sb)->s_journal &&
6225             ext4_should_journal_data(d_inode(path->dentry))) {
6226                 /*
6227                  * We don't need to lock updates but journal_flush() could
6228                  * otherwise be livelocked...
6229                  */
6230                 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
6231                 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
6232                 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
6233                 if (err)
6234                         return err;
6235         }
6236
6237         lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
6238         err = dquot_quota_on(sb, type, format_id, path);
6239         if (err) {
6240                 lockdep_set_quota_inode(path->dentry->d_inode,
6241                                              I_DATA_SEM_NORMAL);
6242         } else {
6243                 struct inode *inode = d_inode(path->dentry);
6244                 handle_t *handle;
6245
6246                 /*
6247                  * Set inode flags to prevent userspace from messing with quota
6248                  * files. If this fails, we return success anyway since quotas
6249                  * are already enabled and this is not a hard failure.
6250                  */
6251                 inode_lock(inode);
6252                 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
6253                 if (IS_ERR(handle))
6254                         goto unlock_inode;
6255                 EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
6256                 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
6257                                 S_NOATIME | S_IMMUTABLE);
6258                 err = ext4_mark_inode_dirty(handle, inode);
6259                 ext4_journal_stop(handle);
6260         unlock_inode:
6261                 inode_unlock(inode);
6262         }
6263         return err;
6264 }
6265
6266 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
6267                              unsigned int flags)
6268 {
6269         int err;
6270         struct inode *qf_inode;
6271         unsigned long qf_inums[EXT4_MAXQUOTAS] = {
6272                 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
6273                 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
6274                 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
6275         };
6276
6277         BUG_ON(!ext4_has_feature_quota(sb));
6278
6279         if (!qf_inums[type])
6280                 return -EPERM;
6281
6282         qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
6283         if (IS_ERR(qf_inode)) {
6284                 ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
6285                 return PTR_ERR(qf_inode);
6286         }
6287
6288         /* Don't account quota for quota files to avoid recursion */
6289         qf_inode->i_flags |= S_NOQUOTA;
6290         lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
6291         err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
6292         if (err)
6293                 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
6294         iput(qf_inode);
6295
6296         return err;
6297 }
6298
6299 /* Enable usage tracking for all quota types. */
6300 int ext4_enable_quotas(struct super_block *sb)
6301 {
6302         int type, err = 0;
6303         unsigned long qf_inums[EXT4_MAXQUOTAS] = {
6304                 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
6305                 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
6306                 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
6307         };
6308         bool quota_mopt[EXT4_MAXQUOTAS] = {
6309                 test_opt(sb, USRQUOTA),
6310                 test_opt(sb, GRPQUOTA),
6311                 test_opt(sb, PRJQUOTA),
6312         };
6313
6314         sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
6315         for (type = 0; type < EXT4_MAXQUOTAS; type++) {
6316                 if (qf_inums[type]) {
6317                         err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
6318                                 DQUOT_USAGE_ENABLED |
6319                                 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
6320                         if (err) {
6321                                 ext4_warning(sb,
6322                                         "Failed to enable quota tracking "
6323                                         "(type=%d, err=%d). Please run "
6324                                         "e2fsck to fix.", type, err);
6325                                 for (type--; type >= 0; type--)
6326                                         dquot_quota_off(sb, type);
6327
6328                                 return err;
6329                         }
6330                 }
6331         }
6332         return 0;
6333 }
6334
6335 static int ext4_quota_off(struct super_block *sb, int type)
6336 {
6337         struct inode *inode = sb_dqopt(sb)->files[type];
6338         handle_t *handle;
6339         int err;
6340
6341         /* Force all delayed allocation blocks to be allocated.
6342          * Caller already holds s_umount sem */
6343         if (test_opt(sb, DELALLOC))
6344                 sync_filesystem(sb);
6345
6346         if (!inode || !igrab(inode))
6347                 goto out;
6348
6349         err = dquot_quota_off(sb, type);
6350         if (err || ext4_has_feature_quota(sb))
6351                 goto out_put;
6352
6353         inode_lock(inode);
6354         /*
6355          * Update modification times of quota files when userspace can
6356          * start looking at them. If we fail, we return success anyway since
6357          * this is not a hard failure and quotas are already disabled.
6358          */
6359         handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
6360         if (IS_ERR(handle)) {
6361                 err = PTR_ERR(handle);
6362                 goto out_unlock;
6363         }
6364         EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
6365         inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
6366         inode->i_mtime = inode->i_ctime = current_time(inode);
6367         err = ext4_mark_inode_dirty(handle, inode);
6368         ext4_journal_stop(handle);
6369 out_unlock:
6370         inode_unlock(inode);
6371 out_put:
6372         lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
6373         iput(inode);
6374         return err;
6375 out:
6376         return dquot_quota_off(sb, type);
6377 }
6378
6379 /* Read data from quotafile - avoid pagecache and such because we cannot afford
6380  * acquiring the locks... As quota files are never truncated and quota code
6381  * itself serializes the operations (and no one else should touch the files)
6382  * we don't have to be afraid of races */
6383 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
6384                                size_t len, loff_t off)
6385 {
6386         struct inode *inode = sb_dqopt(sb)->files[type];
6387         ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6388         int offset = off & (sb->s_blocksize - 1);
6389         int tocopy;
6390         size_t toread;
6391         struct buffer_head *bh;
6392         loff_t i_size = i_size_read(inode);
6393
6394         if (off > i_size)
6395                 return 0;
6396         if (off+len > i_size)
6397                 len = i_size-off;
6398         toread = len;
6399         while (toread > 0) {
6400                 tocopy = sb->s_blocksize - offset < toread ?
6401                                 sb->s_blocksize - offset : toread;
6402                 bh = ext4_bread(NULL, inode, blk, 0);
6403                 if (IS_ERR(bh))
6404                         return PTR_ERR(bh);
6405                 if (!bh)        /* A hole? */
6406                         memset(data, 0, tocopy);
6407                 else
6408                         memcpy(data, bh->b_data+offset, tocopy);
6409                 brelse(bh);
6410                 offset = 0;
6411                 toread -= tocopy;
6412                 data += tocopy;
6413                 blk++;
6414         }
6415         return len;
6416 }
6417
6418 /* Write to quotafile (we know the transaction is already started and has
6419  * enough credits) */
6420 static ssize_t ext4_quota_write(struct super_block *sb, int type,
6421                                 const char *data, size_t len, loff_t off)
6422 {
6423         struct inode *inode = sb_dqopt(sb)->files[type];
6424         ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6425         int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
6426         int retries = 0;
6427         struct buffer_head *bh;
6428         handle_t *handle = journal_current_handle();
6429
6430         if (EXT4_SB(sb)->s_journal && !handle) {
6431                 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
6432                         " cancelled because transaction is not started",
6433                         (unsigned long long)off, (unsigned long long)len);
6434                 return -EIO;
6435         }
6436         /*
6437          * Since we account only one data block in transaction credits,
6438          * then it is impossible to cross a block boundary.
6439          */
6440         if (sb->s_blocksize - offset < len) {
6441                 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
6442                         " cancelled because not block aligned",
6443                         (unsigned long long)off, (unsigned long long)len);
6444                 return -EIO;
6445         }
6446
6447         do {
6448                 bh = ext4_bread(handle, inode, blk,
6449                                 EXT4_GET_BLOCKS_CREATE |
6450                                 EXT4_GET_BLOCKS_METADATA_NOFAIL);
6451         } while (PTR_ERR(bh) == -ENOSPC &&
6452                  ext4_should_retry_alloc(inode->i_sb, &retries));
6453         if (IS_ERR(bh))
6454                 return PTR_ERR(bh);
6455         if (!bh)
6456                 goto out;
6457         BUFFER_TRACE(bh, "get write access");
6458         err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
6459         if (err) {
6460                 brelse(bh);
6461                 return err;
6462         }
6463         lock_buffer(bh);
6464         memcpy(bh->b_data+offset, data, len);
6465         flush_dcache_page(bh->b_page);
6466         unlock_buffer(bh);
6467         err = ext4_handle_dirty_metadata(handle, NULL, bh);
6468         brelse(bh);
6469 out:
6470         if (inode->i_size < off + len) {
6471                 i_size_write(inode, off + len);
6472                 EXT4_I(inode)->i_disksize = inode->i_size;
6473                 err2 = ext4_mark_inode_dirty(handle, inode);
6474                 if (unlikely(err2 && !err))
6475                         err = err2;
6476         }
6477         return err ? err : len;
6478 }
6479 #endif
6480
6481 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
6482                        const char *dev_name, void *data)
6483 {
6484         return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
6485 }
6486
6487 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
6488 static inline void register_as_ext2(void)
6489 {
6490         int err = register_filesystem(&ext2_fs_type);
6491         if (err)
6492                 printk(KERN_WARNING
6493                        "EXT4-fs: Unable to register as ext2 (%d)\n", err);
6494 }
6495
6496 static inline void unregister_as_ext2(void)
6497 {
6498         unregister_filesystem(&ext2_fs_type);
6499 }
6500
6501 static inline int ext2_feature_set_ok(struct super_block *sb)
6502 {
6503         if (ext4_has_unknown_ext2_incompat_features(sb))
6504                 return 0;
6505         if (sb_rdonly(sb))
6506                 return 1;
6507         if (ext4_has_unknown_ext2_ro_compat_features(sb))
6508                 return 0;
6509         return 1;
6510 }
6511 #else
6512 static inline void register_as_ext2(void) { }
6513 static inline void unregister_as_ext2(void) { }
6514 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
6515 #endif
6516
6517 static inline void register_as_ext3(void)
6518 {
6519         int err = register_filesystem(&ext3_fs_type);
6520         if (err)
6521                 printk(KERN_WARNING
6522                        "EXT4-fs: Unable to register as ext3 (%d)\n", err);
6523 }
6524
6525 static inline void unregister_as_ext3(void)
6526 {
6527         unregister_filesystem(&ext3_fs_type);
6528 }
6529
6530 static inline int ext3_feature_set_ok(struct super_block *sb)
6531 {
6532         if (ext4_has_unknown_ext3_incompat_features(sb))
6533                 return 0;
6534         if (!ext4_has_feature_journal(sb))
6535                 return 0;
6536         if (sb_rdonly(sb))
6537                 return 1;
6538         if (ext4_has_unknown_ext3_ro_compat_features(sb))
6539                 return 0;
6540         return 1;
6541 }
6542
6543 static struct file_system_type ext4_fs_type = {
6544         .owner          = THIS_MODULE,
6545         .name           = "ext4",
6546         .mount          = ext4_mount,
6547         .kill_sb        = kill_block_super,
6548         .fs_flags       = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
6549 };
6550 MODULE_ALIAS_FS("ext4");
6551
6552 /* Shared across all ext4 file systems */
6553 wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
6554
6555 static int __init ext4_init_fs(void)
6556 {
6557         int i, err;
6558
6559         ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
6560         ext4_li_info = NULL;
6561
6562         /* Build-time check for flags consistency */
6563         ext4_check_flag_values();
6564
6565         for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
6566                 init_waitqueue_head(&ext4__ioend_wq[i]);
6567
6568         err = ext4_init_es();
6569         if (err)
6570                 return err;
6571
6572         err = ext4_init_pending();
6573         if (err)
6574                 goto out7;
6575
6576         err = ext4_init_post_read_processing();
6577         if (err)
6578                 goto out6;
6579
6580         err = ext4_init_pageio();
6581         if (err)
6582                 goto out5;
6583
6584         err = ext4_init_system_zone();
6585         if (err)
6586                 goto out4;
6587
6588         err = ext4_init_sysfs();
6589         if (err)
6590                 goto out3;
6591
6592         err = ext4_init_mballoc();
6593         if (err)
6594                 goto out2;
6595         err = init_inodecache();
6596         if (err)
6597                 goto out1;
6598
6599         err = ext4_fc_init_dentry_cache();
6600         if (err)
6601                 goto out05;
6602
6603         register_as_ext3();
6604         register_as_ext2();
6605         err = register_filesystem(&ext4_fs_type);
6606         if (err)
6607                 goto out;
6608
6609         return 0;
6610 out:
6611         unregister_as_ext2();
6612         unregister_as_ext3();
6613 out05:
6614         destroy_inodecache();
6615 out1:
6616         ext4_exit_mballoc();
6617 out2:
6618         ext4_exit_sysfs();
6619 out3:
6620         ext4_exit_system_zone();
6621 out4:
6622         ext4_exit_pageio();
6623 out5:
6624         ext4_exit_post_read_processing();
6625 out6:
6626         ext4_exit_pending();
6627 out7:
6628         ext4_exit_es();
6629
6630         return err;
6631 }
6632
6633 static void __exit ext4_exit_fs(void)
6634 {
6635         ext4_destroy_lazyinit_thread();
6636         unregister_as_ext2();
6637         unregister_as_ext3();
6638         unregister_filesystem(&ext4_fs_type);
6639         destroy_inodecache();
6640         ext4_exit_mballoc();
6641         ext4_exit_sysfs();
6642         ext4_exit_system_zone();
6643         ext4_exit_pageio();
6644         ext4_exit_post_read_processing();
6645         ext4_exit_es();
6646         ext4_exit_pending();
6647 }
6648
6649 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
6650 MODULE_DESCRIPTION("Fourth Extended Filesystem");
6651 MODULE_LICENSE("GPL");
6652 MODULE_SOFTDEP("pre: crc32c");
6653 module_init(ext4_init_fs)
6654 module_exit(ext4_exit_fs)