From: Linus Torvalds Date: Thu, 2 Sep 2021 16:37:09 +0000 (-0700) Subject: Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso... X-Git-Tag: microblaze-v5.16~130 X-Git-Url: http://git.monstr.eu/?p=linux-2.6-microblaze.git;a=commitdiff_plain;h=111c1aa8cad4a0069dfe98fc093507b5b2cdfda7;hp=-c Merge tag 'ext4_for_linus' of git://git./linux/kernel/git/tytso/ext4 Pull ext4 updates from Ted Ts'o: "In addition to some ext4 bug fixes and cleanups, this cycle we add the orphan_file feature, which eliminates bottlenecks when doing a large number of parallel truncates and file deletions, and move the discard operation out of the jbd2 commit thread when using the discard mount option, to better support devices with slow discard operations" * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (23 commits) ext4: make the updating inode data procedure atomic ext4: remove an unnecessary if statement in __ext4_get_inode_loc() ext4: move inode eio simulation behind io completeion ext4: Improve scalability of ext4 orphan file handling ext4: Orphan file documentation ext4: Speedup ext4 orphan inode handling ext4: Move orphan inode handling into a separate file ext4: Support for checksumming from journal triggers ext4: fix race writing to an inline_data file while its xattrs are changing jbd2: add sparse annotations for add_transaction_credits() ext4: fix sparse warnings ext4: Make sure quota files are not grabbed accidentally ext4: fix e2fsprogs checksum failure for mounted filesystem ext4: if zeroout fails fall back to splitting the extent node ext4: reduce arguments of ext4_fc_add_dentry_tlv ext4: flush background discard kwork when retry allocation ext4: get discard out of jbd2 commit kthread contex ext4: remove the repeated comment of ext4_trim_all_free ext4: add new helper interface ext4_try_to_trim_range() ext4: remove the 'group' parameter of ext4_trim_extent ... --- 111c1aa8cad4a0069dfe98fc093507b5b2cdfda7 diff --combined fs/ext4/ext4.h index 7ebaf66b6e31,98758e8ea7fc..90ff5acaf11f --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@@ -1034,7 -1034,14 +1034,14 @@@ struct ext4_inode_info */ struct rw_semaphore xattr_sem; - struct list_head i_orphan; /* unlinked but open inodes */ + /* + * Inodes with EXT4_STATE_ORPHAN_FILE use i_orphan_idx. Otherwise + * i_orphan is used. + */ + union { + struct list_head i_orphan; /* unlinked but open inodes */ + unsigned int i_orphan_idx; /* Index in orphan file */ + }; /* Fast commit related info */ @@@ -1086,6 -1093,15 +1093,6 @@@ * by other means, so we have i_data_sem. */ struct rw_semaphore i_data_sem; - /* - * i_mmap_sem is for serializing page faults with truncate / punch hole - * operations. We have to make sure that new page cannot be faulted in - * a section of the inode that is being punched. We cannot easily use - * i_data_sem for this since we need protection for the whole punch - * operation and i_data_sem ranks below transaction start so we have - * to occasionally drop it. - */ - struct rw_semaphore i_mmap_sem; struct inode vfs_inode; struct jbd2_inode *jinode; @@@ -1419,7 -1435,8 +1426,8 @@@ struct ext4_super_block __u8 s_last_error_errcode; __le16 s_encoding; /* Filename charset encoding */ __le16 s_encoding_flags; /* Filename charset encoding flags */ - __le32 s_reserved[95]; /* Padding to the end of the block */ + __le32 s_orphan_file_inum; /* Inode for tracking orphan inodes */ + __le32 s_reserved[94]; /* Padding to the end of the block */ __le32 s_checksum; /* crc32c(superblock) */ }; @@@ -1438,6 -1455,54 +1446,54 @@@ #define EXT4_ENC_UTF8_12_1 1 + /* Types of ext4 journal triggers */ + enum ext4_journal_trigger_type { + EXT4_JTR_ORPHAN_FILE, + EXT4_JTR_NONE /* This must be the last entry for indexing to work! */ + }; + + #define EXT4_JOURNAL_TRIGGER_COUNT EXT4_JTR_NONE + + struct ext4_journal_trigger { + struct jbd2_buffer_trigger_type tr_triggers; + struct super_block *sb; + }; + + static inline struct ext4_journal_trigger *EXT4_TRIGGER( + struct jbd2_buffer_trigger_type *trigger) + { + return container_of(trigger, struct ext4_journal_trigger, tr_triggers); + } + + #define EXT4_ORPHAN_BLOCK_MAGIC 0x0b10ca04 + + /* Structure at the tail of orphan block */ + struct ext4_orphan_block_tail { + __le32 ob_magic; + __le32 ob_checksum; + }; + + static inline int ext4_inodes_per_orphan_block(struct super_block *sb) + { + return (sb->s_blocksize - sizeof(struct ext4_orphan_block_tail)) / + sizeof(u32); + } + + struct ext4_orphan_block { + atomic_t ob_free_entries; /* Number of free orphan entries in block */ + struct buffer_head *ob_bh; /* Buffer for orphan block */ + }; + + /* + * Info about orphan file. + */ + struct ext4_orphan_info { + int of_blocks; /* Number of orphan blocks in a file */ + __u32 of_csum_seed; /* Checksum seed for orphan file */ + struct ext4_orphan_block *of_binfo; /* Array with info about orphan + * file blocks */ + }; + /* * fourth extended-fs super-block data in memory */ @@@ -1492,9 -1557,11 +1548,11 @@@ struct ext4_sb_info /* Journaling */ struct journal_s *s_journal; - struct list_head s_orphan; - struct mutex s_orphan_lock; unsigned long s_ext4_flags; /* Ext4 superblock flags */ + struct mutex s_orphan_lock; /* Protects on disk list changes */ + struct list_head s_orphan; /* List of orphaned inodes in on disk + list */ + struct ext4_orphan_info s_orphan_info; unsigned long s_commit_interval; u32 s_max_batch_time; u32 s_min_batch_time; @@@ -1527,6 -1594,9 +1585,9 @@@ unsigned int s_mb_free_pending; struct list_head s_freed_data_list; /* List of blocks to be freed after commit completed */ + struct list_head s_discard_list; + struct work_struct s_discard_work; + atomic_t s_retry_alloc_pending; struct rb_root s_mb_avg_fragment_size_root; rwlock_t s_mb_rb_lock; struct list_head *s_mb_largest_free_orders; @@@ -1616,6 -1686,9 +1677,9 @@@ struct mb_cache *s_ea_inode_cache; spinlock_t s_es_lock ____cacheline_aligned_in_smp; + /* Journal triggers for checksum computation */ + struct ext4_journal_trigger s_journal_triggers[EXT4_JOURNAL_TRIGGER_COUNT]; + /* Ratelimit ext4 messages. */ struct ratelimit_state s_err_ratelimit_state; struct ratelimit_state s_warning_ratelimit_state; @@@ -1826,6 -1899,7 +1890,7 @@@ enum EXT4_STATE_LUSTRE_EA_INODE, /* Lustre-style ea_inode */ EXT4_STATE_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ EXT4_STATE_FC_COMMITTING, /* Fast commit ongoing */ + EXT4_STATE_ORPHAN_FILE, /* Inode orphaned in orphan file */ }; #define EXT4_INODE_BIT_FNS(name, field, offset) \ @@@ -1927,6 -2001,7 +1992,7 @@@ static inline bool ext4_verity_in_progr */ #define EXT4_FEATURE_COMPAT_FAST_COMMIT 0x0400 #define EXT4_FEATURE_COMPAT_STABLE_INODES 0x0800 + #define EXT4_FEATURE_COMPAT_ORPHAN_FILE 0x1000 /* Orphan file exists */ #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 @@@ -1947,6 -2022,8 +2013,8 @@@ #define EXT4_FEATURE_RO_COMPAT_READONLY 0x1000 #define EXT4_FEATURE_RO_COMPAT_PROJECT 0x2000 #define EXT4_FEATURE_RO_COMPAT_VERITY 0x8000 + #define EXT4_FEATURE_RO_COMPAT_ORPHAN_PRESENT 0x10000 /* Orphan file may be + non-empty */ #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 @@@ -2030,6 -2107,7 +2098,7 @@@ EXT4_FEATURE_COMPAT_FUNCS(dir_index, D EXT4_FEATURE_COMPAT_FUNCS(sparse_super2, SPARSE_SUPER2) EXT4_FEATURE_COMPAT_FUNCS(fast_commit, FAST_COMMIT) EXT4_FEATURE_COMPAT_FUNCS(stable_inodes, STABLE_INODES) + EXT4_FEATURE_COMPAT_FUNCS(orphan_file, ORPHAN_FILE) EXT4_FEATURE_RO_COMPAT_FUNCS(sparse_super, SPARSE_SUPER) EXT4_FEATURE_RO_COMPAT_FUNCS(large_file, LARGE_FILE) @@@ -2044,6 -2122,7 +2113,7 @@@ EXT4_FEATURE_RO_COMPAT_FUNCS(metadata_c EXT4_FEATURE_RO_COMPAT_FUNCS(readonly, READONLY) EXT4_FEATURE_RO_COMPAT_FUNCS(project, PROJECT) EXT4_FEATURE_RO_COMPAT_FUNCS(verity, VERITY) + EXT4_FEATURE_RO_COMPAT_FUNCS(orphan_present, ORPHAN_PRESENT) EXT4_FEATURE_INCOMPAT_FUNCS(compression, COMPRESSION) EXT4_FEATURE_INCOMPAT_FUNCS(filetype, FILETYPE) @@@ -2077,7 -2156,8 +2147,8 @@@ EXT4_FEATURE_INCOMPAT_FUNCS(casefold, EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ EXT4_FEATURE_RO_COMPAT_BTREE_DIR) - #define EXT4_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR + #define EXT4_FEATURE_COMPAT_SUPP (EXT4_FEATURE_COMPAT_EXT_ATTR| \ + EXT4_FEATURE_COMPAT_ORPHAN_FILE) #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ EXT4_FEATURE_INCOMPAT_RECOVER| \ EXT4_FEATURE_INCOMPAT_META_BG| \ @@@ -2102,7 -2182,8 +2173,8 @@@ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM|\ EXT4_FEATURE_RO_COMPAT_QUOTA |\ EXT4_FEATURE_RO_COMPAT_PROJECT |\ - EXT4_FEATURE_RO_COMPAT_VERITY) + EXT4_FEATURE_RO_COMPAT_VERITY |\ + EXT4_FEATURE_RO_COMPAT_ORPHAN_PRESENT) #define EXTN_FEATURE_FUNCS(ver) \ static inline bool ext4_has_unknown_ext##ver##_compat_features(struct super_block *sb) \ @@@ -2138,6 -2219,8 +2210,8 @@@ static inline bool ext4_has_incompat_fe return (EXT4_SB(sb)->s_es->s_feature_incompat != 0); } + extern int ext4_feature_set_ok(struct super_block *sb, int readonly); + /* * Superblock flags */ @@@ -2150,7 -2233,6 +2224,6 @@@ static inline int ext4_forced_shutdown( return test_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags); } - /* * Default values for user and/or group using reserved blocks */ @@@ -2911,13 -2993,14 +2984,14 @@@ int ext4_get_block(struct inode *inode int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create); int ext4_walk_page_buffers(handle_t *handle, + struct inode *inode, struct buffer_head *head, unsigned from, unsigned to, int *partial, - int (*fn)(handle_t *handle, + int (*fn)(handle_t *handle, struct inode *inode, struct buffer_head *bh)); - int do_journal_get_write_access(handle_t *handle, + int do_journal_get_write_access(handle_t *handle, struct inode *inode, struct buffer_head *bh); #define FALL_BACK_TO_NONDELALLOC 1 #define CONVERT_INLINE_DATA 2 @@@ -2963,6 -3046,7 +3037,6 @@@ extern int ext4_chunk_trans_blocks(stru extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, loff_t lstart, loff_t lend); extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf); -extern vm_fault_t ext4_filemap_fault(struct vm_fault *vmf); extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern int ext4_get_projid(struct inode *inode, kprojid_t *projid); extern void ext4_da_release_space(struct inode *inode, int to_free); @@@ -2996,8 -3080,6 +3070,6 @@@ extern int ext4_init_new_dir(handle_t * struct inode *inode); extern int ext4_dirblock_csum_verify(struct inode *inode, struct buffer_head *bh); - extern int ext4_orphan_add(handle_t *, struct inode *); - extern int ext4_orphan_del(handle_t *, struct inode *); extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, __u32 start_minor_hash, __u32 *next_hash); extern int ext4_search_dir(struct buffer_head *bh, @@@ -3466,6 -3548,7 +3538,7 @@@ static inline bool ext4_is_quota_journa return (ext4_has_feature_quota(sb) || sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]); } + int ext4_enable_quotas(struct super_block *sb); #endif /* @@@ -3727,6 -3810,19 +3800,19 @@@ extern void ext4_stop_mmpd(struct ext4_ /* verity.c */ extern const struct fsverity_operations ext4_verityops; + /* orphan.c */ + extern int ext4_orphan_add(handle_t *, struct inode *); + extern int ext4_orphan_del(handle_t *, struct inode *); + extern void ext4_orphan_cleanup(struct super_block *sb, + struct ext4_super_block *es); + extern void ext4_release_orphan_info(struct super_block *sb); + extern int ext4_init_orphan_info(struct super_block *sb); + extern int ext4_orphan_file_empty(struct super_block *sb); + extern void ext4_orphan_file_block_trigger( + struct jbd2_buffer_trigger_type *triggers, + struct buffer_head *bh, + void *data, size_t size); + /* * Add new method to test whether block and inode bitmaps are properly * initialized. With uninit_bg reading the block from disk is not enough diff --combined fs/ext4/extents.c index c33e0a2cb6c3,eb1dd4f024f2..c0de30f25185 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@@ -139,7 -139,8 +139,8 @@@ static int ext4_ext_get_access(handle_ if (path->p_bh) { /* path points to block */ BUFFER_TRACE(path->p_bh, "get_write_access"); - return ext4_journal_get_write_access(handle, path->p_bh); + return ext4_journal_get_write_access(handle, inode->i_sb, + path->p_bh, EXT4_JTR_NONE); } /* path points to leaf/index in inode body */ /* we use in-core data, no need to protect them */ @@@ -1082,7 -1083,8 +1083,8 @@@ static int ext4_ext_split(handle_t *han } lock_buffer(bh); - err = ext4_journal_get_create_access(handle, bh); + err = ext4_journal_get_create_access(handle, inode->i_sb, bh, + EXT4_JTR_NONE); if (err) goto cleanup; @@@ -1160,7 -1162,8 +1162,8 @@@ } lock_buffer(bh); - err = ext4_journal_get_create_access(handle, bh); + err = ext4_journal_get_create_access(handle, inode->i_sb, bh, + EXT4_JTR_NONE); if (err) goto cleanup; @@@ -1286,7 -1289,8 +1289,8 @@@ static int ext4_ext_grow_indepth(handle return -ENOMEM; lock_buffer(bh); - err = ext4_journal_get_create_access(handle, bh); + err = ext4_journal_get_create_access(handle, inode->i_sb, bh, + EXT4_JTR_NONE); if (err) { unlock_buffer(bh); goto out; @@@ -3569,7 -3573,7 +3573,7 @@@ static int ext4_ext_convert_to_initiali split_map.m_len - ee_block); err = ext4_ext_zeroout(inode, &zero_ex1); if (err) - goto out; + goto fallback; split_map.m_len = allocated; } if (split_map.m_lblk - ee_block + split_map.m_len < @@@ -3583,7 -3587,7 +3587,7 @@@ ext4_ext_pblock(ex)); err = ext4_ext_zeroout(inode, &zero_ex2); if (err) - goto out; + goto fallback; } split_map.m_len += split_map.m_lblk - ee_block; @@@ -3592,6 -3596,7 +3596,7 @@@ } } + fallback: err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, flags); if (err > 0) @@@ -4474,7 -4479,6 +4479,7 @@@ static long ext4_zero_range(struct fil loff_t len, int mode) { struct inode *inode = file_inode(file); + struct address_space *mapping = file->f_mapping; handle_t *handle = NULL; unsigned int max_blocks; loff_t new_size = 0; @@@ -4561,17 -4565,17 +4566,17 @@@ * Prevent page faults from reinstantiating pages we have * released from page cache. */ - down_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock(mapping); ret = ext4_break_layouts(inode); if (ret) { - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(mapping); goto out_mutex; } ret = ext4_update_disksize_before_punch(inode, offset, len); if (ret) { - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(mapping); goto out_mutex; } /* Now release the pages and zero block aligned part of pages */ @@@ -4580,7 -4584,7 +4585,7 @@@ ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(mapping); if (ret) goto out_mutex; } @@@ -5222,7 -5226,6 +5227,7 @@@ out static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) { struct super_block *sb = inode->i_sb; + struct address_space *mapping = inode->i_mapping; ext4_lblk_t punch_start, punch_stop; handle_t *handle; unsigned int credits; @@@ -5276,7 -5279,7 +5281,7 @@@ * Prevent page faults from reinstantiating pages we have released from * page cache. */ - down_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock(mapping); ret = ext4_break_layouts(inode); if (ret) @@@ -5291,15 -5294,15 +5296,15 @@@ * Write tail of the last page before removed range since it will get * removed from the page cache below. */ - ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset); + ret = filemap_write_and_wait_range(mapping, ioffset, offset); if (ret) goto out_mmap; /* * Write data that will be shifted to preserve them when discarding * page cache below. We are also protected from pages becoming dirty - * by i_mmap_sem. + * by i_rwsem and invalidate_lock. */ - ret = filemap_write_and_wait_range(inode->i_mapping, offset + len, + ret = filemap_write_and_wait_range(mapping, offset + len, LLONG_MAX); if (ret) goto out_mmap; @@@ -5352,7 -5355,7 +5357,7 @@@ out_stop ext4_journal_stop(handle); ext4_fc_stop_ineligible(sb); out_mmap: - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(mapping); out_mutex: inode_unlock(inode); return ret; @@@ -5369,7 -5372,6 +5374,7 @@@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct super_block *sb = inode->i_sb; + struct address_space *mapping = inode->i_mapping; handle_t *handle; struct ext4_ext_path *path; struct ext4_extent *extent; @@@ -5428,7 -5430,7 +5433,7 @@@ * Prevent page faults from reinstantiating pages we have released from * page cache. */ - down_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock(mapping); ret = ext4_break_layouts(inode); if (ret) @@@ -5529,7 -5531,7 +5534,7 @@@ out_stop ext4_journal_stop(handle); ext4_fc_stop_ineligible(sb); out_mmap: - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(mapping); out_mutex: inode_unlock(inode); return ret; diff --combined fs/ext4/file.c index d3b4ed91aa68,eda12bc50592..ac0e11bbb445 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@@ -704,23 -704,22 +704,23 @@@ static vm_fault_t ext4_dax_huge_fault(s */ bool write = (vmf->flags & FAULT_FLAG_WRITE) && (vmf->vma->vm_flags & VM_SHARED); + struct address_space *mapping = vmf->vma->vm_file->f_mapping; pfn_t pfn; if (write) { sb_start_pagefault(sb); file_update_time(vmf->vma->vm_file); - down_read(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock_shared(mapping); retry: handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, EXT4_DATA_TRANS_BLOCKS(sb)); if (IS_ERR(handle)) { - up_read(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock_shared(mapping); sb_end_pagefault(sb); return VM_FAULT_SIGBUS; } } else { - down_read(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock_shared(mapping); } result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops); if (write) { @@@ -732,10 -731,10 +732,10 @@@ /* Handling synchronous page fault? */ if (result & VM_FAULT_NEEDDSYNC) result = dax_finish_sync_fault(vmf, pe_size, pfn); - up_read(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock_shared(mapping); sb_end_pagefault(sb); } else { - up_read(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock_shared(mapping); } return result; @@@ -757,7 -756,7 +757,7 @@@ static const struct vm_operations_struc #endif static const struct vm_operations_struct ext4_file_vm_ops = { - .fault = ext4_filemap_fault, + .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = ext4_page_mkwrite, }; @@@ -823,7 -822,8 +823,8 @@@ static int ext4_sample_last_mounted(str if (IS_ERR(handle)) goto out; BUFFER_TRACE(sbi->s_sbh, "get_write_access"); - err = ext4_journal_get_write_access(handle, sbi->s_sbh); + err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, + EXT4_JTR_NONE); if (err) goto out_journal; lock_buffer(sbi->s_sbh); diff --combined fs/ext4/inode.c index 325c038e7b23,62e9165bc69c..d18852d6029c --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@@ -139,7 -139,6 +139,6 @@@ static inline int ext4_begin_ordered_tr static void ext4_invalidatepage(struct page *page, unsigned int offset, unsigned int length); static int __ext4_journalled_writepage(struct page *page, unsigned int len); - static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents); @@@ -869,7 -868,8 +868,8 @@@ struct buffer_head *ext4_getblk(handle_ */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); - err = ext4_journal_get_create_access(handle, bh); + err = ext4_journal_get_create_access(handle, inode->i_sb, bh, + EXT4_JTR_NONE); if (unlikely(err)) { unlock_buffer(bh); goto errout; @@@ -954,12 -954,12 +954,12 @@@ out_brelse return err; } - int ext4_walk_page_buffers(handle_t *handle, + int ext4_walk_page_buffers(handle_t *handle, struct inode *inode, struct buffer_head *head, unsigned from, unsigned to, int *partial, - int (*fn)(handle_t *handle, + int (*fn)(handle_t *handle, struct inode *inode, struct buffer_head *bh)) { struct buffer_head *bh; @@@ -978,7 -978,7 +978,7 @@@ *partial = 1; continue; } - err = (*fn)(handle, bh); + err = (*fn)(handle, inode, bh); if (!ret) ret = err; } @@@ -1009,7 -1009,7 +1009,7 @@@ * is elevated. We'll still have enough credits for the tiny quotafile * write. */ - int do_journal_get_write_access(handle_t *handle, + int do_journal_get_write_access(handle_t *handle, struct inode *inode, struct buffer_head *bh) { int dirty = buffer_dirty(bh); @@@ -1028,7 -1028,8 +1028,8 @@@ if (dirty) clear_buffer_dirty(bh); BUFFER_TRACE(bh, "get write access"); - ret = ext4_journal_get_write_access(handle, bh); + ret = ext4_journal_get_write_access(handle, inode->i_sb, bh, + EXT4_JTR_NONE); if (!ret && dirty) ret = ext4_handle_dirty_metadata(handle, NULL, bh); return ret; @@@ -1208,8 -1209,8 +1209,8 @@@ retry_journal ret = __block_write_begin(page, pos, len, ext4_get_block); #endif if (!ret && ext4_should_journal_data(inode)) { - ret = ext4_walk_page_buffers(handle, page_buffers(page), - from, to, NULL, + ret = ext4_walk_page_buffers(handle, inode, + page_buffers(page), from, to, NULL, do_journal_get_write_access); } @@@ -1253,7 -1254,8 +1254,8 @@@ } /* For write_end() in data=journal mode */ - static int write_end_fn(handle_t *handle, struct buffer_head *bh) + static int write_end_fn(handle_t *handle, struct inode *inode, + struct buffer_head *bh) { int ret; if (!buffer_mapped(bh) || buffer_freed(bh)) @@@ -1352,6 -1354,7 +1354,7 @@@ errout * to call ext4_handle_dirty_metadata() instead. */ static void ext4_journalled_zero_new_buffers(handle_t *handle, + struct inode *inode, struct page *page, unsigned from, unsigned to) { @@@ -1370,7 -1373,7 +1373,7 @@@ size = min(to, block_end) - start; zero_user(page, start, size); - write_end_fn(handle, bh); + write_end_fn(handle, inode, bh); } clear_buffer_new(bh); } @@@ -1412,13 -1415,13 +1415,13 @@@ static int ext4_journalled_write_end(st copied = ret; } else if (unlikely(copied < len) && !PageUptodate(page)) { copied = 0; - ext4_journalled_zero_new_buffers(handle, page, from, to); + ext4_journalled_zero_new_buffers(handle, inode, page, from, to); } else { if (unlikely(copied < len)) - ext4_journalled_zero_new_buffers(handle, page, + ext4_journalled_zero_new_buffers(handle, inode, page, from + copied, to); - ret = ext4_walk_page_buffers(handle, page_buffers(page), from, - from + copied, &partial, + ret = ext4_walk_page_buffers(handle, inode, page_buffers(page), + from, from + copied, &partial, write_end_fn); if (!partial) SetPageUptodate(page); @@@ -1619,7 -1622,8 +1622,8 @@@ static void ext4_print_free_blocks(stru return; } - static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) + static int ext4_bh_delay_or_unwritten(handle_t *handle, struct inode *inode, + struct buffer_head *bh) { return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); } @@@ -1851,13 -1855,15 +1855,15 @@@ int ext4_da_get_block_prep(struct inod return 0; } - static int bget_one(handle_t *handle, struct buffer_head *bh) + static int bget_one(handle_t *handle, struct inode *inode, + struct buffer_head *bh) { get_bh(bh); return 0; } - static int bput_one(handle_t *handle, struct buffer_head *bh) + static int bput_one(handle_t *handle, struct inode *inode, + struct buffer_head *bh) { put_bh(bh); return 0; @@@ -1888,7 -1894,7 +1894,7 @@@ static int __ext4_journalled_writepage( BUG(); goto out; } - ext4_walk_page_buffers(handle, page_bufs, 0, len, + ext4_walk_page_buffers(handle, inode, page_bufs, 0, len, NULL, bget_one); } /* @@@ -1920,11 -1926,11 +1926,11 @@@ if (inline_data) { ret = ext4_mark_inode_dirty(handle, inode); } else { - ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, - do_journal_get_write_access); + ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len, + NULL, do_journal_get_write_access); - err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, - write_end_fn); + err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len, + NULL, write_end_fn); } if (ret == 0) ret = err; @@@ -1941,7 -1947,7 +1947,7 @@@ out unlock_page(page); out_no_pagelock: if (!inline_data && page_bufs) - ext4_walk_page_buffers(NULL, page_bufs, 0, len, + ext4_walk_page_buffers(NULL, inode, page_bufs, 0, len, NULL, bput_one); brelse(inode_bh); return ret; @@@ -2031,7 -2037,7 +2037,7 @@@ static int ext4_writepage(struct page * * for the extremely common case, this is an optimization that * skips a useless round trip through ext4_bio_write_page(). */ - if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, + if (ext4_walk_page_buffers(NULL, inode, page_bufs, 0, len, NULL, ext4_bh_delay_or_unwritten)) { redirty_page_for_writepage(wbc, page); if ((current->flags & PF_MEMALLOC) || @@@ -3794,7 -3800,8 +3800,8 @@@ static int __ext4_block_zero_page_range } if (ext4_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); - err = ext4_journal_get_write_access(handle, bh); + err = ext4_journal_get_write_access(handle, inode->i_sb, bh, + EXT4_JTR_NONE); if (err) goto unlock; } @@@ -3950,19 -3957,20 +3957,19 @@@ int ext4_update_disksize_before_punch(s return ret; } -static void ext4_wait_dax_page(struct ext4_inode_info *ei) +static void ext4_wait_dax_page(struct inode *inode) { - up_write(&ei->i_mmap_sem); + filemap_invalidate_unlock(inode->i_mapping); schedule(); - down_write(&ei->i_mmap_sem); + filemap_invalidate_lock(inode->i_mapping); } int ext4_break_layouts(struct inode *inode) { - struct ext4_inode_info *ei = EXT4_I(inode); struct page *page; int error; - if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) + if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock))) return -EINVAL; do { @@@ -3973,7 -3981,7 +3980,7 @@@ error = ___wait_var_event(&page->_refcount, atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, 0, 0, - ext4_wait_dax_page(ei)); + ext4_wait_dax_page(inode)); } while (error == 0); return error; @@@ -4004,9 -4012,9 +4011,9 @@@ int ext4_punch_hole(struct inode *inode ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); if (ext4_has_inline_data(inode)) { - down_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock(mapping); ret = ext4_convert_inline_data(inode); - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(mapping); if (ret) return ret; } @@@ -4057,7 -4065,7 +4064,7 @@@ * Prevent page faults from reinstantiating pages we have released from * page cache. */ - down_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock(mapping); ret = ext4_break_layouts(inode); if (ret) @@@ -4130,7 -4138,7 +4137,7 @@@ out_stop: ext4_journal_stop(handle); out_dio: - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(mapping); out_mutex: inode_unlock(inode); return ret; @@@ -4329,101 -4337,93 +4336,93 @@@ static int __ext4_get_inode_loc(struct bh = sb_getblk(sb, block); if (unlikely(!bh)) return -ENOMEM; - if (ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO)) - goto simulate_eio; - if (!buffer_uptodate(bh)) { - lock_buffer(bh); + if (ext4_buffer_uptodate(bh)) + goto has_buffer; - if (ext4_buffer_uptodate(bh)) { - /* someone brought it uptodate while we waited */ - unlock_buffer(bh); - goto has_buffer; - } - - /* - * If we have all information of the inode in memory and this - * is the only valid inode in the block, we need not read the - * block. - */ - if (in_mem) { - struct buffer_head *bitmap_bh; - int i, start; + lock_buffer(bh); + /* + * If we have all information of the inode in memory and this + * is the only valid inode in the block, we need not read the + * block. + */ + if (in_mem) { + struct buffer_head *bitmap_bh; + int i, start; - start = inode_offset & ~(inodes_per_block - 1); + start = inode_offset & ~(inodes_per_block - 1); - /* Is the inode bitmap in cache? */ - bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); - if (unlikely(!bitmap_bh)) - goto make_io; + /* Is the inode bitmap in cache? */ + bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); + if (unlikely(!bitmap_bh)) + goto make_io; - /* - * If the inode bitmap isn't in cache then the - * optimisation may end up performing two reads instead - * of one, so skip it. - */ - if (!buffer_uptodate(bitmap_bh)) { - brelse(bitmap_bh); - goto make_io; - } - for (i = start; i < start + inodes_per_block; i++) { - if (i == inode_offset) - continue; - if (ext4_test_bit(i, bitmap_bh->b_data)) - break; - } + /* + * If the inode bitmap isn't in cache then the + * optimisation may end up performing two reads instead + * of one, so skip it. + */ + if (!buffer_uptodate(bitmap_bh)) { brelse(bitmap_bh); - if (i == start + inodes_per_block) { - /* all other inodes are free, so skip I/O */ - memset(bh->b_data, 0, bh->b_size); - set_buffer_uptodate(bh); - unlock_buffer(bh); - goto has_buffer; - } + goto make_io; } + for (i = start; i < start + inodes_per_block; i++) { + if (i == inode_offset) + continue; + if (ext4_test_bit(i, bitmap_bh->b_data)) + break; + } + brelse(bitmap_bh); + if (i == start + inodes_per_block) { + /* all other inodes are free, so skip I/O */ + memset(bh->b_data, 0, bh->b_size); + set_buffer_uptodate(bh); + unlock_buffer(bh); + goto has_buffer; + } + } make_io: - /* - * If we need to do any I/O, try to pre-readahead extra - * blocks from the inode table. - */ - blk_start_plug(&plug); - if (EXT4_SB(sb)->s_inode_readahead_blks) { - ext4_fsblk_t b, end, table; - unsigned num; - __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; - - table = ext4_inode_table(sb, gdp); - /* s_inode_readahead_blks is always a power of 2 */ - b = block & ~((ext4_fsblk_t) ra_blks - 1); - if (table > b) - b = table; - end = b + ra_blks; - num = EXT4_INODES_PER_GROUP(sb); - if (ext4_has_group_desc_csum(sb)) - num -= ext4_itable_unused_count(sb, gdp); - table += num / inodes_per_block; - if (end > table) - end = table; - while (b <= end) - ext4_sb_breadahead_unmovable(sb, b++); - } + /* + * If we need to do any I/O, try to pre-readahead extra + * blocks from the inode table. + */ + blk_start_plug(&plug); + if (EXT4_SB(sb)->s_inode_readahead_blks) { + ext4_fsblk_t b, end, table; + unsigned num; + __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; + + table = ext4_inode_table(sb, gdp); + /* s_inode_readahead_blks is always a power of 2 */ + b = block & ~((ext4_fsblk_t) ra_blks - 1); + if (table > b) + b = table; + end = b + ra_blks; + num = EXT4_INODES_PER_GROUP(sb); + if (ext4_has_group_desc_csum(sb)) + num -= ext4_itable_unused_count(sb, gdp); + table += num / inodes_per_block; + if (end > table) + end = table; + while (b <= end) + ext4_sb_breadahead_unmovable(sb, b++); + } - /* - * There are other valid inodes in the buffer, this inode - * has in-inode xattrs, or we don't have this inode in memory. - * Read the block from disk. - */ - trace_ext4_load_inode(sb, ino); - ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL); - blk_finish_plug(&plug); - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) { - simulate_eio: - if (ret_block) - *ret_block = block; - brelse(bh); - return -EIO; - } + /* + * There are other valid inodes in the buffer, this inode + * has in-inode xattrs, or we don't have this inode in memory. + * Read the block from disk. + */ + trace_ext4_load_inode(sb, ino); + ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL); + blk_finish_plug(&plug); + wait_on_buffer(bh); + ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO); + if (!buffer_uptodate(bh)) { + if (ret_block) + *ret_block = block; + brelse(bh); + return -EIO; } has_buffer: iloc->bh = bh; @@@ -4602,6 -4602,7 +4601,7 @@@ struct inode *__ext4_iget(struct super_ struct ext4_iloc iloc; struct ext4_inode *raw_inode; struct ext4_inode_info *ei; + struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct inode *inode; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; @@@ -4612,9 -4613,13 +4612,13 @@@ projid_t i_projid; if ((!(flags & EXT4_IGET_SPECIAL) && - (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) || + ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) || + ino == le32_to_cpu(es->s_usr_quota_inum) || + ino == le32_to_cpu(es->s_grp_quota_inum) || + ino == le32_to_cpu(es->s_prj_quota_inum) || + ino == le32_to_cpu(es->s_orphan_file_inum))) || (ino < EXT4_ROOT_INO) || - (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) { + (ino > le32_to_cpu(es->s_inodes_count))) { if (flags & EXT4_IGET_HANDLE) return ERR_PTR(-ESTALE); __ext4_error(sb, function, line, false, EFSCORRUPTED, 0, @@@ -4927,8 -4932,14 +4931,14 @@@ static int ext4_inode_blocks_set(handle ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); return 0; } + + /* + * This should never happen since sb->s_maxbytes should not have + * allowed this, sb->s_maxbytes was set according to the huge_file + * feature in ext4_fill_super(). + */ if (!ext4_has_feature_huge_file(sb)) - return -EFBIG; + return -EFSCORRUPTED; if (i_blocks <= 0xffffffffffffULL) { /* @@@ -5031,16 -5042,14 +5041,14 @@@ static int ext4_do_update_inode(handle_ spin_lock(&ei->i_raw_lock); - /* For fields not tracked in the in-memory inode, - * initialise them to zero for new inodes. */ + /* + * For fields not tracked in the in-memory inode, initialise them + * to zero for new inodes. + */ if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); err = ext4_inode_blocks_set(handle, raw_inode, ei); - if (err) { - spin_unlock(&ei->i_raw_lock); - goto out_brelse; - } raw_inode->i_mode = cpu_to_le16(inode->i_mode); i_uid = i_uid_read(inode); @@@ -5049,10 -5058,11 +5057,11 @@@ if (!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); - /* - * Fix up interoperability with old kernels. Otherwise, old inodes get - * re-used with the upper 16 bits of the uid/gid intact - */ + /* + * Fix up interoperability with old kernels. Otherwise, + * old inodes get re-used with the upper 16 bits of the + * uid/gid intact. + */ if (ei->i_dtime && list_empty(&ei->i_orphan)) { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; @@@ -5121,8 -5131,9 +5130,9 @@@ } } - BUG_ON(!ext4_has_feature_project(inode->i_sb) && - i_projid != EXT4_DEF_PROJID); + if (i_projid != EXT4_DEF_PROJID && + !ext4_has_feature_project(inode->i_sb)) + err = err ?: -EFSCORRUPTED; if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) @@@ -5130,6 -5141,11 +5140,11 @@@ ext4_inode_csum_set(inode, raw_inode, ei); spin_unlock(&ei->i_raw_lock); + if (err) { + EXT4_ERROR_INODE(inode, "corrupted inode contents"); + goto out_brelse; + } + if (inode->i_sb->s_flags & SB_LAZYTIME) ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, bh->b_data); @@@ -5137,13 -5153,15 +5152,15 @@@ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, bh); if (err) - goto out_brelse; + goto out_error; ext4_clear_inode_state(inode, EXT4_STATE_NEW); if (set_large_file) { BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); - err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); + err = ext4_journal_get_write_access(handle, sb, + EXT4_SB(sb)->s_sbh, + EXT4_JTR_NONE); if (err) - goto out_brelse; + goto out_error; lock_buffer(EXT4_SB(sb)->s_sbh); ext4_set_feature_large_file(sb); ext4_superblock_csum_set(sb); @@@ -5153,9 -5171,10 +5170,10 @@@ EXT4_SB(sb)->s_sbh); } ext4_update_inode_fsync_trans(handle, inode, need_datasync); + out_error: + ext4_std_error(inode->i_sb, err); out_brelse: brelse(bh); - ext4_std_error(inode->i_sb, err); return err; } @@@ -5425,11 -5444,11 +5443,11 @@@ int ext4_setattr(struct user_namespace inode_dio_wait(inode); } - down_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock(inode->i_mapping); rc = ext4_break_layouts(inode); if (rc) { - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(inode->i_mapping); goto err_out; } @@@ -5505,7 -5524,7 +5523,7 @@@ error = rc; } out_mmap_sem: - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(inode->i_mapping); } if (!error) { @@@ -5742,7 -5761,8 +5760,8 @@@ ext4_reserve_inode_write(handle_t *hand err = ext4_get_inode_loc(inode, iloc); if (!err) { BUFFER_TRACE(iloc->bh, "get_write_access"); - err = ext4_journal_get_write_access(handle, iloc->bh); + err = ext4_journal_get_write_access(handle, inode->i_sb, + iloc->bh, EXT4_JTR_NONE); if (err) { brelse(iloc->bh); iloc->bh = NULL; @@@ -5865,7 -5885,8 +5884,8 @@@ int ext4_expand_extra_isize(struct inod ext4_write_lock_xattr(inode, &no_expand); BUFFER_TRACE(iloc->bh, "get_write_access"); - error = ext4_journal_get_write_access(handle, iloc->bh); + error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh, + EXT4_JTR_NONE); if (error) { brelse(iloc->bh); goto out_unlock; @@@ -5982,10 -6003,10 +6002,10 @@@ int ext4_change_inode_journal_flag(stru * data (and journalled aops don't know how to handle these cases). */ if (val) { - down_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock(inode->i_mapping); err = filemap_write_and_wait(inode->i_mapping); if (err < 0) { - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(inode->i_mapping); return err; } } @@@ -6018,7 -6039,7 +6038,7 @@@ percpu_up_write(&sbi->s_writepages_rwsem); if (val) - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(inode->i_mapping); /* Finally we can mark the inode as dirty. */ @@@ -6036,7 -6057,8 +6056,8 @@@ return err; } - static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) + static int ext4_bh_unmapped(handle_t *handle, struct inode *inode, + struct buffer_head *bh) { return !buffer_mapped(bh); } @@@ -6062,7 -6084,7 +6083,7 @@@ vm_fault_t ext4_page_mkwrite(struct vm_ sb_start_pagefault(inode->i_sb); file_update_time(vma->vm_file); - down_read(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock_shared(mapping); err = ext4_convert_inline_data(inode); if (err) @@@ -6109,7 -6131,7 +6130,7 @@@ * inode to the transaction's list to writeprotect pages on commit. */ if (page_has_buffers(page)) { - if (!ext4_walk_page_buffers(NULL, page_buffers(page), + if (!ext4_walk_page_buffers(NULL, inode, page_buffers(page), 0, len, NULL, ext4_bh_unmapped)) { /* Wait so that we don't change page under IO */ @@@ -6155,11 -6177,13 +6176,13 @@@ retry_alloc err = __block_write_begin(page, 0, len, ext4_get_block); if (!err) { ret = VM_FAULT_SIGBUS; - if (ext4_walk_page_buffers(handle, page_buffers(page), - 0, len, NULL, do_journal_get_write_access)) + if (ext4_walk_page_buffers(handle, inode, + page_buffers(page), 0, len, NULL, + do_journal_get_write_access)) goto out_error; - if (ext4_walk_page_buffers(handle, page_buffers(page), - 0, len, NULL, write_end_fn)) + if (ext4_walk_page_buffers(handle, inode, + page_buffers(page), 0, len, NULL, + write_end_fn)) goto out_error; if (ext4_jbd2_inode_add_write(handle, inode, page_offset(page), len)) @@@ -6175,7 -6199,7 +6198,7 @@@ out_ret: ret = block_page_mkwrite_return(err); out: - up_read(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock_shared(mapping); sb_end_pagefault(inode->i_sb); return ret; out_error: @@@ -6183,3 -6207,15 +6206,3 @@@ ext4_journal_stop(handle); goto out; } - -vm_fault_t ext4_filemap_fault(struct vm_fault *vmf) -{ - struct inode *inode = file_inode(vmf->vma->vm_file); - vm_fault_t ret; - - down_read(&EXT4_I(inode)->i_mmap_sem); - ret = filemap_fault(vmf); - up_read(&EXT4_I(inode)->i_mmap_sem); - - return ret; -} diff --combined fs/ext4/ioctl.c index 4fb5fe083c2b,20aeff88cab6..606dee9e08a3 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@@ -148,7 -148,7 +148,7 @@@ static long swap_inode_boot_loader(stru goto journal_err_out; } - down_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_lock(inode->i_mapping); err = filemap_write_and_wait(inode->i_mapping); if (err) goto err_out; @@@ -256,7 -256,7 +256,7 @@@ err_out1 ext4_double_up_write_data_sem(inode, inode_bl); err_out: - up_write(&EXT4_I(inode)->i_mmap_sem); + filemap_invalidate_unlock(inode->i_mapping); journal_err_out: unlock_two_nondirectories(inode, inode_bl); iput(inode_bl); @@@ -1154,7 -1154,9 +1154,9 @@@ resizefs_out err = PTR_ERR(handle); goto pwsalt_err_exit; } - err = ext4_journal_get_write_access(handle, sbi->s_sbh); + err = ext4_journal_get_write_access(handle, sb, + sbi->s_sbh, + EXT4_JTR_NONE); if (err) goto pwsalt_err_journal; lock_buffer(sbi->s_sbh); diff --combined fs/ext4/super.c index d6df62fc810c,feca816b6bf3..136940af00b8 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@@ -80,7 -80,6 +80,6 @@@ static struct dentry *ext4_mount(struc const char *dev_name, void *data); static inline int ext2_feature_set_ok(struct super_block *sb); static inline int ext3_feature_set_ok(struct super_block *sb); - static int ext4_feature_set_ok(struct super_block *sb, int readonly); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); static void ext4_clear_request_list(void); @@@ -90,9 -89,12 +89,9 @@@ static struct inode *ext4_get_journal_i /* * Lock ordering * - * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and - * i_mmap_rwsem (inode->i_mmap_rwsem)! - * * page fault path: - * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start -> - * page lock -> i_data_sem (rw) + * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start + * -> page lock -> i_data_sem (rw) * * buffered write path: * sb_start_write -> i_mutex -> mmap_lock @@@ -100,9 -102,8 +99,9 @@@ * i_data_sem (rw) * * truncate: - * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock - * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start -> + * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) -> + * page lock + * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start -> * i_data_sem (rw) * * direct IO: @@@ -1173,6 -1174,7 +1172,7 @@@ static void ext4_put_super(struct super flush_work(&sbi->s_error_work); destroy_workqueue(sbi->rsv_conversion_wq); + ext4_release_orphan_info(sb); /* * Unregister sysfs before destroying jbd2 journal. @@@ -1198,6 -1200,7 +1198,7 @@@ if (!sb_rdonly(sb) && !aborted) { ext4_clear_feature_journal_needs_recovery(sb); + ext4_clear_feature_orphan_present(sb); es->s_state = cpu_to_le16(sbi->s_mount_state); } if (!sb_rdonly(sb)) @@@ -1358,6 -1361,7 +1359,6 @@@ static void init_once(void *foo INIT_LIST_HEAD(&ei->i_orphan); init_rwsem(&ei->xattr_sem); init_rwsem(&ei->i_data_sem); - init_rwsem(&ei->i_mmap_sem); inode_init_once(&ei->vfs_inode); ext4_fc_init_inode(&ei->vfs_inode); } @@@ -1582,14 -1586,12 +1583,12 @@@ static int ext4_mark_dquot_dirty(struc static int ext4_write_info(struct super_block *sb, int type); static int ext4_quota_on(struct super_block *sb, int type, int format_id, const struct path *path); - static int ext4_quota_on_mount(struct super_block *sb, int type); static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags); - static int ext4_enable_quotas(struct super_block *sb); static struct dquot **ext4_get_dquots(struct inode *inode) { @@@ -2684,8 -2686,11 +2683,11 @@@ static int ext4_setup_super(struct supe es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); ext4_update_tstamp(es, s_mtime); - if (sbi->s_journal) + if (sbi->s_journal) { ext4_set_feature_journal_needs_recovery(sb); + if (ext4_has_feature_orphan_file(sb)) + ext4_set_feature_orphan_present(sb); + } err = ext4_commit_super(sb); done: @@@ -2967,169 -2972,6 +2969,6 @@@ static int ext4_check_descriptors(struc return 1; } - /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at - * the superblock) which were deleted from all directories, but held open by - * a process at the time of a crash. We walk the list and try to delete these - * inodes at recovery time (only with a read-write filesystem). - * - * In order to keep the orphan inode chain consistent during traversal (in - * case of crash during recovery), we link each inode into the superblock - * orphan list_head and handle it the same way as an inode deletion during - * normal operation (which journals the operations for us). - * - * We only do an iget() and an iput() on each inode, which is very safe if we - * accidentally point at an in-use or already deleted inode. The worst that - * can happen in this case is that we get a "bit already cleared" message from - * ext4_free_inode(). The only reason we would point at a wrong inode is if - * e2fsck was run on this filesystem, and it must have already done the orphan - * inode cleanup for us, so we can safely abort without any further action. - */ - static void ext4_orphan_cleanup(struct super_block *sb, - struct ext4_super_block *es) - { - unsigned int s_flags = sb->s_flags; - int ret, nr_orphans = 0, nr_truncates = 0; - #ifdef CONFIG_QUOTA - int quota_update = 0; - int i; - #endif - if (!es->s_last_orphan) { - jbd_debug(4, "no orphan inodes to clean up\n"); - return; - } - - if (bdev_read_only(sb->s_bdev)) { - ext4_msg(sb, KERN_ERR, "write access " - "unavailable, skipping orphan cleanup"); - return; - } - - /* Check if feature set would not allow a r/w mount */ - if (!ext4_feature_set_ok(sb, 0)) { - ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " - "unknown ROCOMPAT features"); - return; - } - - if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { - /* don't clear list on RO mount w/ errors */ - if (es->s_last_orphan && !(s_flags & SB_RDONLY)) { - ext4_msg(sb, KERN_INFO, "Errors on filesystem, " - "clearing orphan list.\n"); - es->s_last_orphan = 0; - } - jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); - return; - } - - if (s_flags & SB_RDONLY) { - ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); - sb->s_flags &= ~SB_RDONLY; - } - #ifdef CONFIG_QUOTA - /* - * Turn on quotas which were not enabled for read-only mounts if - * filesystem has quota feature, so that they are updated correctly. - */ - if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) { - int ret = ext4_enable_quotas(sb); - - if (!ret) - quota_update = 1; - else - ext4_msg(sb, KERN_ERR, - "Cannot turn on quotas: error %d", ret); - } - - /* Turn on journaled quotas used for old sytle */ - for (i = 0; i < EXT4_MAXQUOTAS; i++) { - if (EXT4_SB(sb)->s_qf_names[i]) { - int ret = ext4_quota_on_mount(sb, i); - - if (!ret) - quota_update = 1; - else - ext4_msg(sb, KERN_ERR, - "Cannot turn on journaled " - "quota: type %d: error %d", i, ret); - } - } - #endif - - while (es->s_last_orphan) { - struct inode *inode; - - /* - * We may have encountered an error during cleanup; if - * so, skip the rest. - */ - if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { - jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); - es->s_last_orphan = 0; - break; - } - - inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); - if (IS_ERR(inode)) { - es->s_last_orphan = 0; - break; - } - - list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); - dquot_initialize(inode); - if (inode->i_nlink) { - if (test_opt(sb, DEBUG)) - ext4_msg(sb, KERN_DEBUG, - "%s: truncating inode %lu to %lld bytes", - __func__, inode->i_ino, inode->i_size); - jbd_debug(2, "truncating inode %lu to %lld bytes\n", - inode->i_ino, inode->i_size); - inode_lock(inode); - truncate_inode_pages(inode->i_mapping, inode->i_size); - ret = ext4_truncate(inode); - if (ret) { - /* - * We need to clean up the in-core orphan list - * manually if ext4_truncate() failed to get a - * transaction handle. - */ - ext4_orphan_del(NULL, inode); - ext4_std_error(inode->i_sb, ret); - } - inode_unlock(inode); - nr_truncates++; - } else { - if (test_opt(sb, DEBUG)) - ext4_msg(sb, KERN_DEBUG, - "%s: deleting unreferenced inode %lu", - __func__, inode->i_ino); - jbd_debug(2, "deleting unreferenced inode %lu\n", - inode->i_ino); - nr_orphans++; - } - iput(inode); /* The delete magic happens here! */ - } - - #define PLURAL(x) (x), ((x) == 1) ? "" : "s" - - if (nr_orphans) - ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted", - PLURAL(nr_orphans)); - if (nr_truncates) - ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up", - PLURAL(nr_truncates)); - #ifdef CONFIG_QUOTA - /* Turn off quotas if they were enabled for orphan cleanup */ - if (quota_update) { - for (i = 0; i < EXT4_MAXQUOTAS; i++) { - if (sb_dqopt(sb)->files[i]) - dquot_quota_off(sb, i); - } - } - #endif - sb->s_flags = s_flags; /* Restore SB_RDONLY status */ - } - /* * Maximal extent format file size. * Resulting logical blkno at s_maxbytes must fit in our on-disk @@@ -3309,7 -3151,7 +3148,7 @@@ static unsigned long ext4_get_stripe_si * Returns 1 if this filesystem can be mounted as requested, * 0 if it cannot be. */ - static int ext4_feature_set_ok(struct super_block *sb, int readonly) + int ext4_feature_set_ok(struct super_block *sb, int readonly) { if (ext4_has_unknown_ext4_incompat_features(sb)) { ext4_msg(sb, KERN_ERR, @@@ -4011,6 -3853,20 +3850,20 @@@ static const char *ext4_quota_mode(stru #endif } + static void ext4_setup_csum_trigger(struct super_block *sb, + enum ext4_journal_trigger_type type, + void (*trigger)( + struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh, + void *mapped_data, + size_t size)) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); + + sbi->s_journal_triggers[type].sb = sb; + sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger; + } + static int ext4_fill_super(struct super_block *sb, void *data, int silent) { struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev); @@@ -4109,6 -3965,8 +3962,8 @@@ silent = 1; goto cantfind_ext4; } + ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE, + ext4_orphan_file_block_trigger); /* Load the checksum driver */ sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); @@@ -4773,6 -4631,7 +4628,7 @@@ sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || + ext4_has_feature_orphan_present(sb) || ext4_has_feature_journal_needs_recovery(sb)); if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) @@@ -5029,6 -4888,14 +4885,14 @@@ no_journal err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, GFP_KERNEL); } + /* + * Update the checksum after updating free space/inode + * counters. Otherwise the superblock can have an incorrect + * checksum in the buffer cache until it is written out and + * e2fsprogs programs trying to open a file system immediately + * after it is mounted can fail. + */ + ext4_superblock_csum_set(sb); if (!err) err = percpu_counter_init(&sbi->s_dirs_counter, ext4_count_dirs(sb), GFP_KERNEL); @@@ -5063,12 -4930,15 +4927,15 @@@ if (err) goto failed_mount7; + err = ext4_init_orphan_info(sb); + if (err) + goto failed_mount8; #ifdef CONFIG_QUOTA /* Enable quota usage during mount. */ if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) { err = ext4_enable_quotas(sb); if (err) - goto failed_mount8; + goto failed_mount9; } #endif /* CONFIG_QUOTA */ @@@ -5087,7 -4957,7 +4954,7 @@@ ext4_msg(sb, KERN_INFO, "recovery complete"); err = ext4_mark_recovery_complete(sb, es); if (err) - goto failed_mount8; + goto failed_mount9; } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) @@@ -5133,6 -5003,8 +5000,8 @@@ cantfind_ext4 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; + failed_mount9: + ext4_release_orphan_info(sb); failed_mount8: ext4_unregister_sysfs(sb); kobject_put(&sbi->s_kobj); @@@ -5643,8 -5515,15 +5512,15 @@@ static int ext4_mark_recovery_complete( if (err < 0) goto out; - if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) { + if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) || + ext4_has_feature_orphan_present(sb))) { + if (!ext4_orphan_file_empty(sb)) { + ext4_error(sb, "Orphan file not empty on read-only fs."); + err = -EFSCORRUPTED; + goto out; + } ext4_clear_feature_journal_needs_recovery(sb); + ext4_clear_feature_orphan_present(sb); ext4_commit_super(sb); } out: @@@ -5787,6 -5666,8 +5663,8 @@@ static int ext4_freeze(struct super_blo /* Journal blocked and flushed, clear needs_recovery flag. */ ext4_clear_feature_journal_needs_recovery(sb); + if (ext4_orphan_file_empty(sb)) + ext4_clear_feature_orphan_present(sb); } error = ext4_commit_super(sb); @@@ -5809,6 -5690,8 +5687,8 @@@ static int ext4_unfreeze(struct super_b if (EXT4_SB(sb)->s_journal) { /* Reset the needs_recovery flag before the fs is unlocked. */ ext4_set_feature_journal_needs_recovery(sb); + if (ext4_has_feature_orphan_file(sb)) + ext4_set_feature_orphan_present(sb); } ext4_commit_super(sb); @@@ -6012,7 -5895,7 +5892,7 @@@ static int ext4_remount(struct super_bl * around from a previously readonly bdev mount, * require a full umount/remount for now. */ - if (es->s_last_orphan) { + if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) { ext4_msg(sb, KERN_WARNING, "Couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " @@@ -6309,16 -6192,6 +6189,6 @@@ static int ext4_write_info(struct super return ret; } - /* - * Turn on quotas during mount time - we need to find - * the quota file and such... - */ - static int ext4_quota_on_mount(struct super_block *sb, int type) - { - return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type), - EXT4_SB(sb)->s_jquota_fmt, type); - } - static void lockdep_set_quota_inode(struct inode *inode, int subclass) { struct ext4_inode_info *ei = EXT4_I(inode); @@@ -6448,7 -6321,7 +6318,7 @@@ static int ext4_quota_enable(struct sup } /* Enable usage tracking for all quota types. */ - static int ext4_enable_quotas(struct super_block *sb) + int ext4_enable_quotas(struct super_block *sb) { int type, err = 0; unsigned long qf_inums[EXT4_MAXQUOTAS] = { @@@ -6606,7 -6479,7 +6476,7 @@@ static ssize_t ext4_quota_write(struct if (!bh) goto out; BUFFER_TRACE(bh, "get write access"); - err = ext4_journal_get_write_access(handle, bh); + err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); if (err) { brelse(bh); return err;