1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
33 #include <trace/events/f2fs.h>
35 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37 struct inode *inode = file_inode(vmf->vma->vm_file);
40 down_read(&F2FS_I(inode)->i_mmap_sem);
41 ret = filemap_fault(vmf);
42 up_read(&F2FS_I(inode)->i_mmap_sem);
45 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
48 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
53 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55 struct page *page = vmf->page;
56 struct inode *inode = file_inode(vmf->vma->vm_file);
57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58 struct dnode_of_data dn;
59 bool need_alloc = true;
62 if (unlikely(f2fs_cp_error(sbi))) {
67 if (!f2fs_is_checkpoint_ready(sbi)) {
72 #ifdef CONFIG_F2FS_FS_COMPRESSION
73 if (f2fs_compressed_file(inode)) {
74 int ret = f2fs_is_compressed_cluster(inode, page->index);
80 if (ret < F2FS_I(inode)->i_cluster_size) {
88 /* should do out of any locked page */
90 f2fs_balance_fs(sbi, true);
92 sb_start_pagefault(inode->i_sb);
94 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
96 file_update_time(vmf->vma->vm_file);
97 down_read(&F2FS_I(inode)->i_mmap_sem);
99 if (unlikely(page->mapping != inode->i_mapping ||
100 page_offset(page) > i_size_read(inode) ||
101 !PageUptodate(page))) {
108 /* block allocation */
109 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
110 set_new_dnode(&dn, inode, NULL, NULL, 0);
111 err = f2fs_get_block(&dn, page->index);
113 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
116 #ifdef CONFIG_F2FS_FS_COMPRESSION
118 set_new_dnode(&dn, inode, NULL, NULL, 0);
119 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
128 f2fs_wait_on_page_writeback(page, DATA, false, true);
130 /* wait for GCed page writeback via META_MAPPING */
131 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
134 * check to see if the page is mapped already (no holes)
136 if (PageMappedToDisk(page))
139 /* page is wholly or partially inside EOF */
140 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
141 i_size_read(inode)) {
144 offset = i_size_read(inode) & ~PAGE_MASK;
145 zero_user_segment(page, offset, PAGE_SIZE);
147 set_page_dirty(page);
148 if (!PageUptodate(page))
149 SetPageUptodate(page);
151 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
152 f2fs_update_time(sbi, REQ_TIME);
154 trace_f2fs_vm_page_mkwrite(page, DATA);
156 up_read(&F2FS_I(inode)->i_mmap_sem);
158 sb_end_pagefault(inode->i_sb);
160 return block_page_mkwrite_return(err);
163 static const struct vm_operations_struct f2fs_file_vm_ops = {
164 .fault = f2fs_filemap_fault,
165 .map_pages = filemap_map_pages,
166 .page_mkwrite = f2fs_vm_page_mkwrite,
169 static int get_parent_ino(struct inode *inode, nid_t *pino)
171 struct dentry *dentry;
174 * Make sure to get the non-deleted alias. The alias associated with
175 * the open file descriptor being fsync()'ed may be deleted already.
177 dentry = d_find_alias(inode);
181 *pino = parent_ino(dentry);
186 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
188 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
189 enum cp_reason_type cp_reason = CP_NO_NEEDED;
191 if (!S_ISREG(inode->i_mode))
192 cp_reason = CP_NON_REGULAR;
193 else if (f2fs_compressed_file(inode))
194 cp_reason = CP_COMPRESSED;
195 else if (inode->i_nlink != 1)
196 cp_reason = CP_HARDLINK;
197 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
198 cp_reason = CP_SB_NEED_CP;
199 else if (file_wrong_pino(inode))
200 cp_reason = CP_WRONG_PINO;
201 else if (!f2fs_space_for_roll_forward(sbi))
202 cp_reason = CP_NO_SPC_ROLL;
203 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
204 cp_reason = CP_NODE_NEED_CP;
205 else if (test_opt(sbi, FASTBOOT))
206 cp_reason = CP_FASTBOOT_MODE;
207 else if (F2FS_OPTION(sbi).active_logs == 2)
208 cp_reason = CP_SPEC_LOG_NUM;
209 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
210 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
211 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
213 cp_reason = CP_RECOVER_DIR;
218 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
220 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
222 /* But we need to avoid that there are some inode updates */
223 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
229 static void try_to_fix_pino(struct inode *inode)
231 struct f2fs_inode_info *fi = F2FS_I(inode);
234 down_write(&fi->i_sem);
235 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
236 get_parent_ino(inode, &pino)) {
237 f2fs_i_pino_write(inode, pino);
238 file_got_pino(inode);
240 up_write(&fi->i_sem);
243 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
244 int datasync, bool atomic)
246 struct inode *inode = file->f_mapping->host;
247 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
248 nid_t ino = inode->i_ino;
250 enum cp_reason_type cp_reason = 0;
251 struct writeback_control wbc = {
252 .sync_mode = WB_SYNC_ALL,
253 .nr_to_write = LONG_MAX,
256 unsigned int seq_id = 0;
258 if (unlikely(f2fs_readonly(inode->i_sb) ||
259 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
262 trace_f2fs_sync_file_enter(inode);
264 if (S_ISDIR(inode->i_mode))
267 /* if fdatasync is triggered, let's do in-place-update */
268 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
269 set_inode_flag(inode, FI_NEED_IPU);
270 ret = file_write_and_wait_range(file, start, end);
271 clear_inode_flag(inode, FI_NEED_IPU);
274 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
278 /* if the inode is dirty, let's recover all the time */
279 if (!f2fs_skip_inode_update(inode, datasync)) {
280 f2fs_write_inode(inode, NULL);
285 * if there is no written data, don't waste time to write recovery info.
287 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
288 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
290 /* it may call write_inode just prior to fsync */
291 if (need_inode_page_update(sbi, ino))
294 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
295 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
301 * Both of fdatasync() and fsync() are able to be recovered from
304 down_read(&F2FS_I(inode)->i_sem);
305 cp_reason = need_do_checkpoint(inode);
306 up_read(&F2FS_I(inode)->i_sem);
309 /* all the dirty node pages should be flushed for POR */
310 ret = f2fs_sync_fs(inode->i_sb, 1);
313 * We've secured consistency through sync_fs. Following pino
314 * will be used only for fsynced inodes after checkpoint.
316 try_to_fix_pino(inode);
317 clear_inode_flag(inode, FI_APPEND_WRITE);
318 clear_inode_flag(inode, FI_UPDATE_WRITE);
322 atomic_inc(&sbi->wb_sync_req[NODE]);
323 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
324 atomic_dec(&sbi->wb_sync_req[NODE]);
328 /* if cp_error was enabled, we should avoid infinite loop */
329 if (unlikely(f2fs_cp_error(sbi))) {
334 if (f2fs_need_inode_block_update(sbi, ino)) {
335 f2fs_mark_inode_dirty_sync(inode, true);
336 f2fs_write_inode(inode, NULL);
341 * If it's atomic_write, it's just fine to keep write ordering. So
342 * here we don't need to wait for node write completion, since we use
343 * node chain which serializes node blocks. If one of node writes are
344 * reordered, we can see simply broken chain, resulting in stopping
345 * roll-forward recovery. It means we'll recover all or none node blocks
349 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
354 /* once recovery info is written, don't need to tack this */
355 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
356 clear_inode_flag(inode, FI_APPEND_WRITE);
358 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
359 ret = f2fs_issue_flush(sbi, inode->i_ino);
361 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
362 clear_inode_flag(inode, FI_UPDATE_WRITE);
363 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
365 f2fs_update_time(sbi, REQ_TIME);
367 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
368 f2fs_trace_ios(NULL, 1);
372 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
374 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
376 return f2fs_do_sync_file(file, start, end, datasync, false);
379 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
380 pgoff_t pgofs, int whence)
385 if (whence != SEEK_DATA)
388 /* find first dirty page index */
389 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
398 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
399 pgoff_t dirty, pgoff_t pgofs, int whence)
403 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
404 __is_valid_data_blkaddr(blkaddr))
408 if (blkaddr == NULL_ADDR)
415 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
417 struct inode *inode = file->f_mapping->host;
418 loff_t maxbytes = inode->i_sb->s_maxbytes;
419 struct dnode_of_data dn;
420 pgoff_t pgofs, end_offset, dirty;
421 loff_t data_ofs = offset;
427 isize = i_size_read(inode);
431 /* handle inline data case */
432 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
433 if (whence == SEEK_HOLE)
438 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
440 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
442 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
443 set_new_dnode(&dn, inode, NULL, NULL, 0);
444 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
445 if (err && err != -ENOENT) {
447 } else if (err == -ENOENT) {
448 /* direct node does not exists */
449 if (whence == SEEK_DATA) {
450 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
457 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
459 /* find data/hole in dnode block */
460 for (; dn.ofs_in_node < end_offset;
461 dn.ofs_in_node++, pgofs++,
462 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
465 blkaddr = f2fs_data_blkaddr(&dn);
467 if (__is_valid_data_blkaddr(blkaddr) &&
468 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
469 blkaddr, DATA_GENERIC_ENHANCE)) {
474 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
483 if (whence == SEEK_DATA)
486 if (whence == SEEK_HOLE && data_ofs > isize)
489 return vfs_setpos(file, data_ofs, maxbytes);
495 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
497 struct inode *inode = file->f_mapping->host;
498 loff_t maxbytes = inode->i_sb->s_maxbytes;
504 return generic_file_llseek_size(file, offset, whence,
505 maxbytes, i_size_read(inode));
510 return f2fs_seek_block(file, offset, whence);
516 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
518 struct inode *inode = file_inode(file);
521 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
524 if (!f2fs_is_compress_backend_ready(inode))
527 /* we don't need to use inline_data strictly */
528 err = f2fs_convert_inline_inode(inode);
533 vma->vm_ops = &f2fs_file_vm_ops;
534 set_inode_flag(inode, FI_MMAP_FILE);
538 static int f2fs_file_open(struct inode *inode, struct file *filp)
540 int err = fscrypt_file_open(inode, filp);
545 if (!f2fs_is_compress_backend_ready(inode))
548 err = fsverity_file_open(inode, filp);
552 filp->f_mode |= FMODE_NOWAIT;
554 return dquot_file_open(inode, filp);
557 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
559 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
560 struct f2fs_node *raw_node;
561 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
564 bool compressed_cluster = false;
565 int cluster_index = 0, valid_blocks = 0;
566 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
567 bool released = !F2FS_I(dn->inode)->i_compr_blocks;
569 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
570 base = get_extra_isize(dn->inode);
572 raw_node = F2FS_NODE(dn->node_page);
573 addr = blkaddr_in_node(raw_node) + base + ofs;
575 /* Assumption: truncateion starts with cluster */
576 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
577 block_t blkaddr = le32_to_cpu(*addr);
579 if (f2fs_compressed_file(dn->inode) &&
580 !(cluster_index & (cluster_size - 1))) {
581 if (compressed_cluster)
582 f2fs_i_compr_blocks_update(dn->inode,
583 valid_blocks, false);
584 compressed_cluster = (blkaddr == COMPRESS_ADDR);
588 if (blkaddr == NULL_ADDR)
591 dn->data_blkaddr = NULL_ADDR;
592 f2fs_set_data_blkaddr(dn);
594 if (__is_valid_data_blkaddr(blkaddr)) {
595 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
596 DATA_GENERIC_ENHANCE))
598 if (compressed_cluster)
602 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
603 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
605 f2fs_invalidate_blocks(sbi, blkaddr);
607 if (!released || blkaddr != COMPRESS_ADDR)
611 if (compressed_cluster)
612 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
617 * once we invalidate valid blkaddr in range [ofs, ofs + count],
618 * we will invalidate all blkaddr in the whole range.
620 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
622 f2fs_update_extent_cache_range(dn, fofs, 0, len);
623 dec_valid_block_count(sbi, dn->inode, nr_free);
625 dn->ofs_in_node = ofs;
627 f2fs_update_time(sbi, REQ_TIME);
628 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
629 dn->ofs_in_node, nr_free);
632 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
634 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
637 static int truncate_partial_data_page(struct inode *inode, u64 from,
640 loff_t offset = from & (PAGE_SIZE - 1);
641 pgoff_t index = from >> PAGE_SHIFT;
642 struct address_space *mapping = inode->i_mapping;
645 if (!offset && !cache_only)
649 page = find_lock_page(mapping, index);
650 if (page && PageUptodate(page))
652 f2fs_put_page(page, 1);
656 page = f2fs_get_lock_data_page(inode, index, true);
658 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
660 f2fs_wait_on_page_writeback(page, DATA, true, true);
661 zero_user(page, offset, PAGE_SIZE - offset);
663 /* An encrypted inode should have a key and truncate the last page. */
664 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
666 set_page_dirty(page);
667 f2fs_put_page(page, 1);
671 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
673 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
674 struct dnode_of_data dn;
676 int count = 0, err = 0;
678 bool truncate_page = false;
680 trace_f2fs_truncate_blocks_enter(inode, from);
682 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
684 if (free_from >= sbi->max_file_blocks)
690 ipage = f2fs_get_node_page(sbi, inode->i_ino);
692 err = PTR_ERR(ipage);
696 if (f2fs_has_inline_data(inode)) {
697 f2fs_truncate_inline_inode(inode, ipage, from);
698 f2fs_put_page(ipage, 1);
699 truncate_page = true;
703 set_new_dnode(&dn, inode, ipage, NULL, 0);
704 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
711 count = ADDRS_PER_PAGE(dn.node_page, inode);
713 count -= dn.ofs_in_node;
714 f2fs_bug_on(sbi, count < 0);
716 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
717 f2fs_truncate_data_blocks_range(&dn, count);
723 err = f2fs_truncate_inode_blocks(inode, free_from);
728 /* lastly zero out the first data page */
730 err = truncate_partial_data_page(inode, from, truncate_page);
732 trace_f2fs_truncate_blocks_exit(inode, err);
736 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
738 u64 free_from = from;
741 #ifdef CONFIG_F2FS_FS_COMPRESSION
743 * for compressed file, only support cluster size
744 * aligned truncation.
746 if (f2fs_compressed_file(inode))
747 free_from = round_up(from,
748 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
751 err = f2fs_do_truncate_blocks(inode, free_from, lock);
755 #ifdef CONFIG_F2FS_FS_COMPRESSION
756 if (from != free_from)
757 err = f2fs_truncate_partial_cluster(inode, from, lock);
763 int f2fs_truncate(struct inode *inode)
767 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
770 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
771 S_ISLNK(inode->i_mode)))
774 trace_f2fs_truncate(inode);
776 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
777 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
781 /* we should check inline_data size */
782 if (!f2fs_may_inline_data(inode)) {
783 err = f2fs_convert_inline_inode(inode);
788 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
792 inode->i_mtime = inode->i_ctime = current_time(inode);
793 f2fs_mark_inode_dirty_sync(inode, false);
797 int f2fs_getattr(const struct path *path, struct kstat *stat,
798 u32 request_mask, unsigned int query_flags)
800 struct inode *inode = d_inode(path->dentry);
801 struct f2fs_inode_info *fi = F2FS_I(inode);
802 struct f2fs_inode *ri;
805 if (f2fs_has_extra_attr(inode) &&
806 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
807 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
808 stat->result_mask |= STATX_BTIME;
809 stat->btime.tv_sec = fi->i_crtime.tv_sec;
810 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
814 if (flags & F2FS_COMPR_FL)
815 stat->attributes |= STATX_ATTR_COMPRESSED;
816 if (flags & F2FS_APPEND_FL)
817 stat->attributes |= STATX_ATTR_APPEND;
818 if (IS_ENCRYPTED(inode))
819 stat->attributes |= STATX_ATTR_ENCRYPTED;
820 if (flags & F2FS_IMMUTABLE_FL)
821 stat->attributes |= STATX_ATTR_IMMUTABLE;
822 if (flags & F2FS_NODUMP_FL)
823 stat->attributes |= STATX_ATTR_NODUMP;
824 if (IS_VERITY(inode))
825 stat->attributes |= STATX_ATTR_VERITY;
827 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
829 STATX_ATTR_ENCRYPTED |
830 STATX_ATTR_IMMUTABLE |
834 generic_fillattr(inode, stat);
836 /* we need to show initial sectors used for inline_data/dentries */
837 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
838 f2fs_has_inline_dentry(inode))
839 stat->blocks += (stat->size + 511) >> 9;
844 #ifdef CONFIG_F2FS_FS_POSIX_ACL
845 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
847 unsigned int ia_valid = attr->ia_valid;
849 if (ia_valid & ATTR_UID)
850 inode->i_uid = attr->ia_uid;
851 if (ia_valid & ATTR_GID)
852 inode->i_gid = attr->ia_gid;
853 if (ia_valid & ATTR_ATIME)
854 inode->i_atime = attr->ia_atime;
855 if (ia_valid & ATTR_MTIME)
856 inode->i_mtime = attr->ia_mtime;
857 if (ia_valid & ATTR_CTIME)
858 inode->i_ctime = attr->ia_ctime;
859 if (ia_valid & ATTR_MODE) {
860 umode_t mode = attr->ia_mode;
862 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
864 set_acl_inode(inode, mode);
868 #define __setattr_copy setattr_copy
871 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
873 struct inode *inode = d_inode(dentry);
876 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
879 if ((attr->ia_valid & ATTR_SIZE) &&
880 !f2fs_is_compress_backend_ready(inode))
883 err = setattr_prepare(dentry, attr);
887 err = fscrypt_prepare_setattr(dentry, attr);
891 err = fsverity_prepare_setattr(dentry, attr);
895 if (is_quota_modification(inode, attr)) {
896 err = dquot_initialize(inode);
900 if ((attr->ia_valid & ATTR_UID &&
901 !uid_eq(attr->ia_uid, inode->i_uid)) ||
902 (attr->ia_valid & ATTR_GID &&
903 !gid_eq(attr->ia_gid, inode->i_gid))) {
904 f2fs_lock_op(F2FS_I_SB(inode));
905 err = dquot_transfer(inode, attr);
907 set_sbi_flag(F2FS_I_SB(inode),
908 SBI_QUOTA_NEED_REPAIR);
909 f2fs_unlock_op(F2FS_I_SB(inode));
913 * update uid/gid under lock_op(), so that dquot and inode can
914 * be updated atomically.
916 if (attr->ia_valid & ATTR_UID)
917 inode->i_uid = attr->ia_uid;
918 if (attr->ia_valid & ATTR_GID)
919 inode->i_gid = attr->ia_gid;
920 f2fs_mark_inode_dirty_sync(inode, true);
921 f2fs_unlock_op(F2FS_I_SB(inode));
924 if (attr->ia_valid & ATTR_SIZE) {
925 loff_t old_size = i_size_read(inode);
927 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
929 * should convert inline inode before i_size_write to
930 * keep smaller than inline_data size with inline flag.
932 err = f2fs_convert_inline_inode(inode);
937 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
938 down_write(&F2FS_I(inode)->i_mmap_sem);
940 truncate_setsize(inode, attr->ia_size);
942 if (attr->ia_size <= old_size)
943 err = f2fs_truncate(inode);
945 * do not trim all blocks after i_size if target size is
946 * larger than i_size.
948 up_write(&F2FS_I(inode)->i_mmap_sem);
949 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
953 spin_lock(&F2FS_I(inode)->i_size_lock);
954 inode->i_mtime = inode->i_ctime = current_time(inode);
955 F2FS_I(inode)->last_disk_size = i_size_read(inode);
956 spin_unlock(&F2FS_I(inode)->i_size_lock);
959 __setattr_copy(inode, attr);
961 if (attr->ia_valid & ATTR_MODE) {
962 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
963 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
964 inode->i_mode = F2FS_I(inode)->i_acl_mode;
965 clear_inode_flag(inode, FI_ACL_MODE);
969 /* file size may changed here */
970 f2fs_mark_inode_dirty_sync(inode, true);
972 /* inode change will produce dirty node pages flushed by checkpoint */
973 f2fs_balance_fs(F2FS_I_SB(inode), true);
978 const struct inode_operations f2fs_file_inode_operations = {
979 .getattr = f2fs_getattr,
980 .setattr = f2fs_setattr,
981 .get_acl = f2fs_get_acl,
982 .set_acl = f2fs_set_acl,
983 .listxattr = f2fs_listxattr,
984 .fiemap = f2fs_fiemap,
987 static int fill_zero(struct inode *inode, pgoff_t index,
988 loff_t start, loff_t len)
990 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
996 f2fs_balance_fs(sbi, true);
999 page = f2fs_get_new_data_page(inode, NULL, index, false);
1000 f2fs_unlock_op(sbi);
1003 return PTR_ERR(page);
1005 f2fs_wait_on_page_writeback(page, DATA, true, true);
1006 zero_user(page, start, len);
1007 set_page_dirty(page);
1008 f2fs_put_page(page, 1);
1012 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1016 while (pg_start < pg_end) {
1017 struct dnode_of_data dn;
1018 pgoff_t end_offset, count;
1020 set_new_dnode(&dn, inode, NULL, NULL, 0);
1021 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1023 if (err == -ENOENT) {
1024 pg_start = f2fs_get_next_page_offset(&dn,
1031 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1032 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1034 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1036 f2fs_truncate_data_blocks_range(&dn, count);
1037 f2fs_put_dnode(&dn);
1044 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1046 pgoff_t pg_start, pg_end;
1047 loff_t off_start, off_end;
1050 ret = f2fs_convert_inline_inode(inode);
1054 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1055 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1057 off_start = offset & (PAGE_SIZE - 1);
1058 off_end = (offset + len) & (PAGE_SIZE - 1);
1060 if (pg_start == pg_end) {
1061 ret = fill_zero(inode, pg_start, off_start,
1062 off_end - off_start);
1067 ret = fill_zero(inode, pg_start++, off_start,
1068 PAGE_SIZE - off_start);
1073 ret = fill_zero(inode, pg_end, 0, off_end);
1078 if (pg_start < pg_end) {
1079 struct address_space *mapping = inode->i_mapping;
1080 loff_t blk_start, blk_end;
1081 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1083 f2fs_balance_fs(sbi, true);
1085 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1086 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1088 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1089 down_write(&F2FS_I(inode)->i_mmap_sem);
1091 truncate_inode_pages_range(mapping, blk_start,
1095 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1096 f2fs_unlock_op(sbi);
1098 up_write(&F2FS_I(inode)->i_mmap_sem);
1099 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1106 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1107 int *do_replace, pgoff_t off, pgoff_t len)
1109 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1110 struct dnode_of_data dn;
1114 set_new_dnode(&dn, inode, NULL, NULL, 0);
1115 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1116 if (ret && ret != -ENOENT) {
1118 } else if (ret == -ENOENT) {
1119 if (dn.max_level == 0)
1121 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1122 dn.ofs_in_node, len);
1128 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1129 dn.ofs_in_node, len);
1130 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1131 *blkaddr = f2fs_data_blkaddr(&dn);
1133 if (__is_valid_data_blkaddr(*blkaddr) &&
1134 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1135 DATA_GENERIC_ENHANCE)) {
1136 f2fs_put_dnode(&dn);
1137 return -EFSCORRUPTED;
1140 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1142 if (f2fs_lfs_mode(sbi)) {
1143 f2fs_put_dnode(&dn);
1147 /* do not invalidate this block address */
1148 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1152 f2fs_put_dnode(&dn);
1161 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1162 int *do_replace, pgoff_t off, int len)
1164 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1165 struct dnode_of_data dn;
1168 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1169 if (*do_replace == 0)
1172 set_new_dnode(&dn, inode, NULL, NULL, 0);
1173 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1175 dec_valid_block_count(sbi, inode, 1);
1176 f2fs_invalidate_blocks(sbi, *blkaddr);
1178 f2fs_update_data_blkaddr(&dn, *blkaddr);
1180 f2fs_put_dnode(&dn);
1185 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1186 block_t *blkaddr, int *do_replace,
1187 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1189 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1194 if (blkaddr[i] == NULL_ADDR && !full) {
1199 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1200 struct dnode_of_data dn;
1201 struct node_info ni;
1205 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1206 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1210 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1212 f2fs_put_dnode(&dn);
1216 ilen = min((pgoff_t)
1217 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1218 dn.ofs_in_node, len - i);
1220 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1221 f2fs_truncate_data_blocks_range(&dn, 1);
1223 if (do_replace[i]) {
1224 f2fs_i_blocks_write(src_inode,
1226 f2fs_i_blocks_write(dst_inode,
1228 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1229 blkaddr[i], ni.version, true, false);
1235 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1236 if (dst_inode->i_size < new_size)
1237 f2fs_i_size_write(dst_inode, new_size);
1238 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1240 f2fs_put_dnode(&dn);
1242 struct page *psrc, *pdst;
1244 psrc = f2fs_get_lock_data_page(src_inode,
1247 return PTR_ERR(psrc);
1248 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1251 f2fs_put_page(psrc, 1);
1252 return PTR_ERR(pdst);
1254 f2fs_copy_page(psrc, pdst);
1255 set_page_dirty(pdst);
1256 f2fs_put_page(pdst, 1);
1257 f2fs_put_page(psrc, 1);
1259 ret = f2fs_truncate_hole(src_inode,
1260 src + i, src + i + 1);
1269 static int __exchange_data_block(struct inode *src_inode,
1270 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1271 pgoff_t len, bool full)
1273 block_t *src_blkaddr;
1279 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1281 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1282 array_size(olen, sizeof(block_t)),
1287 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1288 array_size(olen, sizeof(int)),
1291 kvfree(src_blkaddr);
1295 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1296 do_replace, src, olen);
1300 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1301 do_replace, src, dst, olen, full);
1309 kvfree(src_blkaddr);
1315 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1316 kvfree(src_blkaddr);
1321 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1323 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1324 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1325 pgoff_t start = offset >> PAGE_SHIFT;
1326 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1329 f2fs_balance_fs(sbi, true);
1331 /* avoid gc operation during block exchange */
1332 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1333 down_write(&F2FS_I(inode)->i_mmap_sem);
1336 f2fs_drop_extent_tree(inode);
1337 truncate_pagecache(inode, offset);
1338 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1339 f2fs_unlock_op(sbi);
1341 up_write(&F2FS_I(inode)->i_mmap_sem);
1342 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1346 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1351 if (offset + len >= i_size_read(inode))
1354 /* collapse range should be aligned to block size of f2fs. */
1355 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1358 ret = f2fs_convert_inline_inode(inode);
1362 /* write out all dirty pages from offset */
1363 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1367 ret = f2fs_do_collapse(inode, offset, len);
1371 /* write out all moved pages, if possible */
1372 down_write(&F2FS_I(inode)->i_mmap_sem);
1373 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1374 truncate_pagecache(inode, offset);
1376 new_size = i_size_read(inode) - len;
1377 ret = f2fs_truncate_blocks(inode, new_size, true);
1378 up_write(&F2FS_I(inode)->i_mmap_sem);
1380 f2fs_i_size_write(inode, new_size);
1384 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1387 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1388 pgoff_t index = start;
1389 unsigned int ofs_in_node = dn->ofs_in_node;
1393 for (; index < end; index++, dn->ofs_in_node++) {
1394 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1398 dn->ofs_in_node = ofs_in_node;
1399 ret = f2fs_reserve_new_blocks(dn, count);
1403 dn->ofs_in_node = ofs_in_node;
1404 for (index = start; index < end; index++, dn->ofs_in_node++) {
1405 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1407 * f2fs_reserve_new_blocks will not guarantee entire block
1410 if (dn->data_blkaddr == NULL_ADDR) {
1414 if (dn->data_blkaddr != NEW_ADDR) {
1415 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1416 dn->data_blkaddr = NEW_ADDR;
1417 f2fs_set_data_blkaddr(dn);
1421 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1426 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1429 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1430 struct address_space *mapping = inode->i_mapping;
1431 pgoff_t index, pg_start, pg_end;
1432 loff_t new_size = i_size_read(inode);
1433 loff_t off_start, off_end;
1436 ret = inode_newsize_ok(inode, (len + offset));
1440 ret = f2fs_convert_inline_inode(inode);
1444 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1448 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1449 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1451 off_start = offset & (PAGE_SIZE - 1);
1452 off_end = (offset + len) & (PAGE_SIZE - 1);
1454 if (pg_start == pg_end) {
1455 ret = fill_zero(inode, pg_start, off_start,
1456 off_end - off_start);
1460 new_size = max_t(loff_t, new_size, offset + len);
1463 ret = fill_zero(inode, pg_start++, off_start,
1464 PAGE_SIZE - off_start);
1468 new_size = max_t(loff_t, new_size,
1469 (loff_t)pg_start << PAGE_SHIFT);
1472 for (index = pg_start; index < pg_end;) {
1473 struct dnode_of_data dn;
1474 unsigned int end_offset;
1477 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1478 down_write(&F2FS_I(inode)->i_mmap_sem);
1480 truncate_pagecache_range(inode,
1481 (loff_t)index << PAGE_SHIFT,
1482 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1486 set_new_dnode(&dn, inode, NULL, NULL, 0);
1487 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1489 f2fs_unlock_op(sbi);
1490 up_write(&F2FS_I(inode)->i_mmap_sem);
1491 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1495 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1496 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1498 ret = f2fs_do_zero_range(&dn, index, end);
1499 f2fs_put_dnode(&dn);
1501 f2fs_unlock_op(sbi);
1502 up_write(&F2FS_I(inode)->i_mmap_sem);
1503 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1505 f2fs_balance_fs(sbi, dn.node_changed);
1511 new_size = max_t(loff_t, new_size,
1512 (loff_t)index << PAGE_SHIFT);
1516 ret = fill_zero(inode, pg_end, 0, off_end);
1520 new_size = max_t(loff_t, new_size, offset + len);
1525 if (new_size > i_size_read(inode)) {
1526 if (mode & FALLOC_FL_KEEP_SIZE)
1527 file_set_keep_isize(inode);
1529 f2fs_i_size_write(inode, new_size);
1534 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1536 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1537 pgoff_t nr, pg_start, pg_end, delta, idx;
1541 new_size = i_size_read(inode) + len;
1542 ret = inode_newsize_ok(inode, new_size);
1546 if (offset >= i_size_read(inode))
1549 /* insert range should be aligned to block size of f2fs. */
1550 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1553 ret = f2fs_convert_inline_inode(inode);
1557 f2fs_balance_fs(sbi, true);
1559 down_write(&F2FS_I(inode)->i_mmap_sem);
1560 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1561 up_write(&F2FS_I(inode)->i_mmap_sem);
1565 /* write out all dirty pages from offset */
1566 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1570 pg_start = offset >> PAGE_SHIFT;
1571 pg_end = (offset + len) >> PAGE_SHIFT;
1572 delta = pg_end - pg_start;
1573 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1575 /* avoid gc operation during block exchange */
1576 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1577 down_write(&F2FS_I(inode)->i_mmap_sem);
1578 truncate_pagecache(inode, offset);
1580 while (!ret && idx > pg_start) {
1581 nr = idx - pg_start;
1587 f2fs_drop_extent_tree(inode);
1589 ret = __exchange_data_block(inode, inode, idx,
1590 idx + delta, nr, false);
1591 f2fs_unlock_op(sbi);
1593 up_write(&F2FS_I(inode)->i_mmap_sem);
1594 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1596 /* write out all moved pages, if possible */
1597 down_write(&F2FS_I(inode)->i_mmap_sem);
1598 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1599 truncate_pagecache(inode, offset);
1600 up_write(&F2FS_I(inode)->i_mmap_sem);
1603 f2fs_i_size_write(inode, new_size);
1607 static int expand_inode_data(struct inode *inode, loff_t offset,
1608 loff_t len, int mode)
1610 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1611 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1612 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1613 .m_may_create = true };
1615 loff_t new_size = i_size_read(inode);
1619 err = inode_newsize_ok(inode, (len + offset));
1623 err = f2fs_convert_inline_inode(inode);
1627 f2fs_balance_fs(sbi, true);
1629 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1630 off_end = (offset + len) & (PAGE_SIZE - 1);
1632 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1633 map.m_len = pg_end - map.m_lblk;
1640 if (f2fs_is_pinned_file(inode)) {
1641 block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1642 sbi->log_blocks_per_seg;
1645 if (map.m_len % sbi->blocks_per_seg)
1646 len += sbi->blocks_per_seg;
1648 map.m_len = sbi->blocks_per_seg;
1650 if (has_not_enough_free_secs(sbi, 0,
1651 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1652 down_write(&sbi->gc_lock);
1653 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1654 if (err && err != -ENODATA && err != -EAGAIN)
1658 down_write(&sbi->pin_sem);
1659 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1662 f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA);
1663 f2fs_unlock_op(sbi);
1665 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1666 up_write(&sbi->pin_sem);
1670 map.m_lblk += map.m_len;
1676 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1685 last_off = map.m_lblk + map.m_len - 1;
1687 /* update new size to the failed position */
1688 new_size = (last_off == pg_end) ? offset + len :
1689 (loff_t)(last_off + 1) << PAGE_SHIFT;
1691 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1694 if (new_size > i_size_read(inode)) {
1695 if (mode & FALLOC_FL_KEEP_SIZE)
1696 file_set_keep_isize(inode);
1698 f2fs_i_size_write(inode, new_size);
1704 static long f2fs_fallocate(struct file *file, int mode,
1705 loff_t offset, loff_t len)
1707 struct inode *inode = file_inode(file);
1710 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1712 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1714 if (!f2fs_is_compress_backend_ready(inode))
1717 /* f2fs only support ->fallocate for regular file */
1718 if (!S_ISREG(inode->i_mode))
1721 if (IS_ENCRYPTED(inode) &&
1722 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1725 if (f2fs_compressed_file(inode) &&
1726 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1727 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1730 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1731 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1732 FALLOC_FL_INSERT_RANGE))
1737 if (mode & FALLOC_FL_PUNCH_HOLE) {
1738 if (offset >= inode->i_size)
1741 ret = punch_hole(inode, offset, len);
1742 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1743 ret = f2fs_collapse_range(inode, offset, len);
1744 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1745 ret = f2fs_zero_range(inode, offset, len, mode);
1746 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1747 ret = f2fs_insert_range(inode, offset, len);
1749 ret = expand_inode_data(inode, offset, len, mode);
1753 inode->i_mtime = inode->i_ctime = current_time(inode);
1754 f2fs_mark_inode_dirty_sync(inode, false);
1755 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1759 inode_unlock(inode);
1761 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1765 static int f2fs_release_file(struct inode *inode, struct file *filp)
1768 * f2fs_relase_file is called at every close calls. So we should
1769 * not drop any inmemory pages by close called by other process.
1771 if (!(filp->f_mode & FMODE_WRITE) ||
1772 atomic_read(&inode->i_writecount) != 1)
1775 /* some remained atomic pages should discarded */
1776 if (f2fs_is_atomic_file(inode))
1777 f2fs_drop_inmem_pages(inode);
1778 if (f2fs_is_volatile_file(inode)) {
1779 set_inode_flag(inode, FI_DROP_CACHE);
1780 filemap_fdatawrite(inode->i_mapping);
1781 clear_inode_flag(inode, FI_DROP_CACHE);
1782 clear_inode_flag(inode, FI_VOLATILE_FILE);
1783 stat_dec_volatile_write(inode);
1788 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1790 struct inode *inode = file_inode(file);
1793 * If the process doing a transaction is crashed, we should do
1794 * roll-back. Otherwise, other reader/write can see corrupted database
1795 * until all the writers close its file. Since this should be done
1796 * before dropping file lock, it needs to do in ->flush.
1798 if (f2fs_is_atomic_file(inode) &&
1799 F2FS_I(inode)->inmem_task == current)
1800 f2fs_drop_inmem_pages(inode);
1804 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1806 struct f2fs_inode_info *fi = F2FS_I(inode);
1807 u32 masked_flags = fi->i_flags & mask;
1809 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1811 /* Is it quota file? Do not allow user to mess with it */
1812 if (IS_NOQUOTA(inode))
1815 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1816 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1818 if (!f2fs_empty_dir(inode))
1822 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1823 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1825 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1829 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1830 if (masked_flags & F2FS_COMPR_FL) {
1831 if (f2fs_disable_compressed_file(inode))
1834 if (iflags & F2FS_NOCOMP_FL)
1836 if (iflags & F2FS_COMPR_FL) {
1837 if (!f2fs_may_compress(inode))
1840 set_compress_context(inode);
1843 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1844 if (masked_flags & F2FS_COMPR_FL)
1848 fi->i_flags = iflags | (fi->i_flags & ~mask);
1849 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1850 (fi->i_flags & F2FS_NOCOMP_FL));
1852 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1853 set_inode_flag(inode, FI_PROJ_INHERIT);
1855 clear_inode_flag(inode, FI_PROJ_INHERIT);
1857 inode->i_ctime = current_time(inode);
1858 f2fs_set_inode_flags(inode);
1859 f2fs_mark_inode_dirty_sync(inode, true);
1863 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1866 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1867 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1868 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1869 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1872 static const struct {
1875 } f2fs_fsflags_map[] = {
1876 { F2FS_COMPR_FL, FS_COMPR_FL },
1877 { F2FS_SYNC_FL, FS_SYNC_FL },
1878 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1879 { F2FS_APPEND_FL, FS_APPEND_FL },
1880 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1881 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1882 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1883 { F2FS_INDEX_FL, FS_INDEX_FL },
1884 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1885 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1886 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1889 #define F2FS_GETTABLE_FS_FL ( \
1899 FS_PROJINHERIT_FL | \
1901 FS_INLINE_DATA_FL | \
1906 #define F2FS_SETTABLE_FS_FL ( \
1915 FS_PROJINHERIT_FL | \
1918 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1919 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1924 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1925 if (iflags & f2fs_fsflags_map[i].iflag)
1926 fsflags |= f2fs_fsflags_map[i].fsflag;
1931 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1932 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1937 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1938 if (fsflags & f2fs_fsflags_map[i].fsflag)
1939 iflags |= f2fs_fsflags_map[i].iflag;
1944 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1946 struct inode *inode = file_inode(filp);
1947 struct f2fs_inode_info *fi = F2FS_I(inode);
1948 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1950 if (IS_ENCRYPTED(inode))
1951 fsflags |= FS_ENCRYPT_FL;
1952 if (IS_VERITY(inode))
1953 fsflags |= FS_VERITY_FL;
1954 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1955 fsflags |= FS_INLINE_DATA_FL;
1956 if (is_inode_flag_set(inode, FI_PIN_FILE))
1957 fsflags |= FS_NOCOW_FL;
1959 fsflags &= F2FS_GETTABLE_FS_FL;
1961 return put_user(fsflags, (int __user *)arg);
1964 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1966 struct inode *inode = file_inode(filp);
1967 struct f2fs_inode_info *fi = F2FS_I(inode);
1968 u32 fsflags, old_fsflags;
1972 if (!inode_owner_or_capable(inode))
1975 if (get_user(fsflags, (int __user *)arg))
1978 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1980 fsflags &= F2FS_SETTABLE_FS_FL;
1982 iflags = f2fs_fsflags_to_iflags(fsflags);
1983 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1986 ret = mnt_want_write_file(filp);
1992 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1993 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1997 ret = f2fs_setflags_common(inode, iflags,
1998 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2000 inode_unlock(inode);
2001 mnt_drop_write_file(filp);
2005 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2007 struct inode *inode = file_inode(filp);
2009 return put_user(inode->i_generation, (int __user *)arg);
2012 static int f2fs_ioc_start_atomic_write(struct file *filp)
2014 struct inode *inode = file_inode(filp);
2015 struct f2fs_inode_info *fi = F2FS_I(inode);
2016 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2019 if (!inode_owner_or_capable(inode))
2022 if (!S_ISREG(inode->i_mode))
2025 if (filp->f_flags & O_DIRECT)
2028 ret = mnt_want_write_file(filp);
2034 f2fs_disable_compressed_file(inode);
2036 if (f2fs_is_atomic_file(inode)) {
2037 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2042 ret = f2fs_convert_inline_inode(inode);
2046 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2049 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2050 * f2fs_is_atomic_file.
2052 if (get_dirty_pages(inode))
2053 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2054 inode->i_ino, get_dirty_pages(inode));
2055 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2057 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2061 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2062 if (list_empty(&fi->inmem_ilist))
2063 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2064 sbi->atomic_files++;
2065 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2067 /* add inode in inmem_list first and set atomic_file */
2068 set_inode_flag(inode, FI_ATOMIC_FILE);
2069 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2070 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2072 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2073 F2FS_I(inode)->inmem_task = current;
2074 stat_update_max_atomic_write(inode);
2076 inode_unlock(inode);
2077 mnt_drop_write_file(filp);
2081 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2083 struct inode *inode = file_inode(filp);
2086 if (!inode_owner_or_capable(inode))
2089 ret = mnt_want_write_file(filp);
2093 f2fs_balance_fs(F2FS_I_SB(inode), true);
2097 if (f2fs_is_volatile_file(inode)) {
2102 if (f2fs_is_atomic_file(inode)) {
2103 ret = f2fs_commit_inmem_pages(inode);
2107 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2109 f2fs_drop_inmem_pages(inode);
2111 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2114 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2115 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2118 inode_unlock(inode);
2119 mnt_drop_write_file(filp);
2123 static int f2fs_ioc_start_volatile_write(struct file *filp)
2125 struct inode *inode = file_inode(filp);
2128 if (!inode_owner_or_capable(inode))
2131 if (!S_ISREG(inode->i_mode))
2134 ret = mnt_want_write_file(filp);
2140 if (f2fs_is_volatile_file(inode))
2143 ret = f2fs_convert_inline_inode(inode);
2147 stat_inc_volatile_write(inode);
2148 stat_update_max_volatile_write(inode);
2150 set_inode_flag(inode, FI_VOLATILE_FILE);
2151 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2153 inode_unlock(inode);
2154 mnt_drop_write_file(filp);
2158 static int f2fs_ioc_release_volatile_write(struct file *filp)
2160 struct inode *inode = file_inode(filp);
2163 if (!inode_owner_or_capable(inode))
2166 ret = mnt_want_write_file(filp);
2172 if (!f2fs_is_volatile_file(inode))
2175 if (!f2fs_is_first_block_written(inode)) {
2176 ret = truncate_partial_data_page(inode, 0, true);
2180 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2182 inode_unlock(inode);
2183 mnt_drop_write_file(filp);
2187 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2189 struct inode *inode = file_inode(filp);
2192 if (!inode_owner_or_capable(inode))
2195 ret = mnt_want_write_file(filp);
2201 if (f2fs_is_atomic_file(inode))
2202 f2fs_drop_inmem_pages(inode);
2203 if (f2fs_is_volatile_file(inode)) {
2204 clear_inode_flag(inode, FI_VOLATILE_FILE);
2205 stat_dec_volatile_write(inode);
2206 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2209 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2211 inode_unlock(inode);
2213 mnt_drop_write_file(filp);
2214 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2218 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2220 struct inode *inode = file_inode(filp);
2221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2222 struct super_block *sb = sbi->sb;
2226 if (!capable(CAP_SYS_ADMIN))
2229 if (get_user(in, (__u32 __user *)arg))
2232 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2233 ret = mnt_want_write_file(filp);
2235 if (ret == -EROFS) {
2237 f2fs_stop_checkpoint(sbi, false);
2238 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2239 trace_f2fs_shutdown(sbi, in, ret);
2246 case F2FS_GOING_DOWN_FULLSYNC:
2247 sb = freeze_bdev(sb->s_bdev);
2253 f2fs_stop_checkpoint(sbi, false);
2254 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2255 thaw_bdev(sb->s_bdev, sb);
2258 case F2FS_GOING_DOWN_METASYNC:
2259 /* do checkpoint only */
2260 ret = f2fs_sync_fs(sb, 1);
2263 f2fs_stop_checkpoint(sbi, false);
2264 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2266 case F2FS_GOING_DOWN_NOSYNC:
2267 f2fs_stop_checkpoint(sbi, false);
2268 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2270 case F2FS_GOING_DOWN_METAFLUSH:
2271 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2272 f2fs_stop_checkpoint(sbi, false);
2273 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2275 case F2FS_GOING_DOWN_NEED_FSCK:
2276 set_sbi_flag(sbi, SBI_NEED_FSCK);
2277 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2278 set_sbi_flag(sbi, SBI_IS_DIRTY);
2279 /* do checkpoint only */
2280 ret = f2fs_sync_fs(sb, 1);
2287 f2fs_stop_gc_thread(sbi);
2288 f2fs_stop_discard_thread(sbi);
2290 f2fs_drop_discard_cmd(sbi);
2291 clear_opt(sbi, DISCARD);
2293 f2fs_update_time(sbi, REQ_TIME);
2295 if (in != F2FS_GOING_DOWN_FULLSYNC)
2296 mnt_drop_write_file(filp);
2298 trace_f2fs_shutdown(sbi, in, ret);
2303 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2305 struct inode *inode = file_inode(filp);
2306 struct super_block *sb = inode->i_sb;
2307 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2308 struct fstrim_range range;
2311 if (!capable(CAP_SYS_ADMIN))
2314 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2317 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2321 ret = mnt_want_write_file(filp);
2325 range.minlen = max((unsigned int)range.minlen,
2326 q->limits.discard_granularity);
2327 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2328 mnt_drop_write_file(filp);
2332 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2335 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2339 static bool uuid_is_nonzero(__u8 u[16])
2343 for (i = 0; i < 16; i++)
2349 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2351 struct inode *inode = file_inode(filp);
2353 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2356 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2358 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2361 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2363 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2365 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2368 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2370 struct inode *inode = file_inode(filp);
2371 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2374 if (!f2fs_sb_has_encrypt(sbi))
2377 err = mnt_want_write_file(filp);
2381 down_write(&sbi->sb_lock);
2383 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2386 /* update superblock with uuid */
2387 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2389 err = f2fs_commit_super(sbi, false);
2392 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2396 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2400 up_write(&sbi->sb_lock);
2401 mnt_drop_write_file(filp);
2405 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2408 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2411 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2414 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2416 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2419 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2422 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2424 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2427 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2430 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2433 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2436 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2439 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2442 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2445 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2448 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2450 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2453 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2456 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2458 struct inode *inode = file_inode(filp);
2459 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2463 if (!capable(CAP_SYS_ADMIN))
2466 if (get_user(sync, (__u32 __user *)arg))
2469 if (f2fs_readonly(sbi->sb))
2472 ret = mnt_want_write_file(filp);
2477 if (!down_write_trylock(&sbi->gc_lock)) {
2482 down_write(&sbi->gc_lock);
2485 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2487 mnt_drop_write_file(filp);
2491 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2493 struct inode *inode = file_inode(filp);
2494 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2495 struct f2fs_gc_range range;
2499 if (!capable(CAP_SYS_ADMIN))
2502 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2506 if (f2fs_readonly(sbi->sb))
2509 end = range.start + range.len;
2510 if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
2511 end >= MAX_BLKADDR(sbi))
2514 ret = mnt_want_write_file(filp);
2520 if (!down_write_trylock(&sbi->gc_lock)) {
2525 down_write(&sbi->gc_lock);
2528 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2534 range.start += BLKS_PER_SEC(sbi);
2535 if (range.start <= end)
2538 mnt_drop_write_file(filp);
2542 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2544 struct inode *inode = file_inode(filp);
2545 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2548 if (!capable(CAP_SYS_ADMIN))
2551 if (f2fs_readonly(sbi->sb))
2554 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2555 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2559 ret = mnt_want_write_file(filp);
2563 ret = f2fs_sync_fs(sbi->sb, 1);
2565 mnt_drop_write_file(filp);
2569 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2571 struct f2fs_defragment *range)
2573 struct inode *inode = file_inode(filp);
2574 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2575 .m_seg_type = NO_CHECK_TYPE ,
2576 .m_may_create = false };
2577 struct extent_info ei = {0, 0, 0};
2578 pgoff_t pg_start, pg_end, next_pgofs;
2579 unsigned int blk_per_seg = sbi->blocks_per_seg;
2580 unsigned int total = 0, sec_num;
2581 block_t blk_end = 0;
2582 bool fragmented = false;
2585 /* if in-place-update policy is enabled, don't waste time here */
2586 if (f2fs_should_update_inplace(inode, NULL))
2589 pg_start = range->start >> PAGE_SHIFT;
2590 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2592 f2fs_balance_fs(sbi, true);
2596 /* writeback all dirty pages in the range */
2597 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2598 range->start + range->len - 1);
2603 * lookup mapping info in extent cache, skip defragmenting if physical
2604 * block addresses are continuous.
2606 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2607 if (ei.fofs + ei.len >= pg_end)
2611 map.m_lblk = pg_start;
2612 map.m_next_pgofs = &next_pgofs;
2615 * lookup mapping info in dnode page cache, skip defragmenting if all
2616 * physical block addresses are continuous even if there are hole(s)
2617 * in logical blocks.
2619 while (map.m_lblk < pg_end) {
2620 map.m_len = pg_end - map.m_lblk;
2621 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2625 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2626 map.m_lblk = next_pgofs;
2630 if (blk_end && blk_end != map.m_pblk)
2633 /* record total count of block that we're going to move */
2636 blk_end = map.m_pblk + map.m_len;
2638 map.m_lblk += map.m_len;
2646 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2649 * make sure there are enough free section for LFS allocation, this can
2650 * avoid defragment running in SSR mode when free section are allocated
2653 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2658 map.m_lblk = pg_start;
2659 map.m_len = pg_end - pg_start;
2662 while (map.m_lblk < pg_end) {
2667 map.m_len = pg_end - map.m_lblk;
2668 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2672 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2673 map.m_lblk = next_pgofs;
2677 set_inode_flag(inode, FI_DO_DEFRAG);
2680 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2683 page = f2fs_get_lock_data_page(inode, idx, true);
2685 err = PTR_ERR(page);
2689 set_page_dirty(page);
2690 f2fs_put_page(page, 1);
2699 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2702 clear_inode_flag(inode, FI_DO_DEFRAG);
2704 err = filemap_fdatawrite(inode->i_mapping);
2709 clear_inode_flag(inode, FI_DO_DEFRAG);
2711 inode_unlock(inode);
2713 range->len = (u64)total << PAGE_SHIFT;
2717 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2719 struct inode *inode = file_inode(filp);
2720 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2721 struct f2fs_defragment range;
2724 if (!capable(CAP_SYS_ADMIN))
2727 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2730 if (f2fs_readonly(sbi->sb))
2733 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2737 /* verify alignment of offset & size */
2738 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2741 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2742 sbi->max_file_blocks))
2745 err = mnt_want_write_file(filp);
2749 err = f2fs_defragment_range(sbi, filp, &range);
2750 mnt_drop_write_file(filp);
2752 f2fs_update_time(sbi, REQ_TIME);
2756 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2763 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2764 struct file *file_out, loff_t pos_out, size_t len)
2766 struct inode *src = file_inode(file_in);
2767 struct inode *dst = file_inode(file_out);
2768 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2769 size_t olen = len, dst_max_i_size = 0;
2773 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2774 src->i_sb != dst->i_sb)
2777 if (unlikely(f2fs_readonly(src->i_sb)))
2780 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2783 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2787 if (pos_in == pos_out)
2789 if (pos_out > pos_in && pos_out < pos_in + len)
2796 if (!inode_trylock(dst))
2801 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2804 olen = len = src->i_size - pos_in;
2805 if (pos_in + len == src->i_size)
2806 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2812 dst_osize = dst->i_size;
2813 if (pos_out + olen > dst->i_size)
2814 dst_max_i_size = pos_out + olen;
2816 /* verify the end result is block aligned */
2817 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2818 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2819 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2822 ret = f2fs_convert_inline_inode(src);
2826 ret = f2fs_convert_inline_inode(dst);
2830 /* write out all dirty pages from offset */
2831 ret = filemap_write_and_wait_range(src->i_mapping,
2832 pos_in, pos_in + len);
2836 ret = filemap_write_and_wait_range(dst->i_mapping,
2837 pos_out, pos_out + len);
2841 f2fs_balance_fs(sbi, true);
2843 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2846 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2851 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2852 pos_out >> F2FS_BLKSIZE_BITS,
2853 len >> F2FS_BLKSIZE_BITS, false);
2857 f2fs_i_size_write(dst, dst_max_i_size);
2858 else if (dst_osize != dst->i_size)
2859 f2fs_i_size_write(dst, dst_osize);
2861 f2fs_unlock_op(sbi);
2864 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2866 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2875 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2877 struct f2fs_move_range range;
2881 if (!(filp->f_mode & FMODE_READ) ||
2882 !(filp->f_mode & FMODE_WRITE))
2885 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2889 dst = fdget(range.dst_fd);
2893 if (!(dst.file->f_mode & FMODE_WRITE)) {
2898 err = mnt_want_write_file(filp);
2902 err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2903 range.pos_out, range.len);
2905 mnt_drop_write_file(filp);
2909 if (copy_to_user((struct f2fs_move_range __user *)arg,
2910 &range, sizeof(range)))
2917 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2919 struct inode *inode = file_inode(filp);
2920 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2921 struct sit_info *sm = SIT_I(sbi);
2922 unsigned int start_segno = 0, end_segno = 0;
2923 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2924 struct f2fs_flush_device range;
2927 if (!capable(CAP_SYS_ADMIN))
2930 if (f2fs_readonly(sbi->sb))
2933 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2936 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2940 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2941 __is_large_section(sbi)) {
2942 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2943 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2947 ret = mnt_want_write_file(filp);
2951 if (range.dev_num != 0)
2952 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2953 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2955 start_segno = sm->last_victim[FLUSH_DEVICE];
2956 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2957 start_segno = dev_start_segno;
2958 end_segno = min(start_segno + range.segments, dev_end_segno);
2960 while (start_segno < end_segno) {
2961 if (!down_write_trylock(&sbi->gc_lock)) {
2965 sm->last_victim[GC_CB] = end_segno + 1;
2966 sm->last_victim[GC_GREEDY] = end_segno + 1;
2967 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2968 ret = f2fs_gc(sbi, true, true, start_segno);
2976 mnt_drop_write_file(filp);
2980 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2982 struct inode *inode = file_inode(filp);
2983 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2985 /* Must validate to set it with SQLite behavior in Android. */
2986 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2988 return put_user(sb_feature, (u32 __user *)arg);
2992 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2994 struct dquot *transfer_to[MAXQUOTAS] = {};
2995 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2996 struct super_block *sb = sbi->sb;
2999 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3000 if (!IS_ERR(transfer_to[PRJQUOTA])) {
3001 err = __dquot_transfer(inode, transfer_to);
3003 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3004 dqput(transfer_to[PRJQUOTA]);
3009 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3011 struct inode *inode = file_inode(filp);
3012 struct f2fs_inode_info *fi = F2FS_I(inode);
3013 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3018 if (!f2fs_sb_has_project_quota(sbi)) {
3019 if (projid != F2FS_DEF_PROJID)
3025 if (!f2fs_has_extra_attr(inode))
3028 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3030 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3034 /* Is it quota file? Do not allow user to mess with it */
3035 if (IS_NOQUOTA(inode))
3038 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3040 return PTR_ERR(ipage);
3042 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3045 f2fs_put_page(ipage, 1);
3048 f2fs_put_page(ipage, 1);
3050 err = dquot_initialize(inode);
3055 err = f2fs_transfer_project_quota(inode, kprojid);
3059 F2FS_I(inode)->i_projid = kprojid;
3060 inode->i_ctime = current_time(inode);
3061 f2fs_mark_inode_dirty_sync(inode, true);
3063 f2fs_unlock_op(sbi);
3067 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3072 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3074 if (projid != F2FS_DEF_PROJID)
3080 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3083 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3084 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3085 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3088 static const struct {
3091 } f2fs_xflags_map[] = {
3092 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3093 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3094 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3095 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3096 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3097 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3100 #define F2FS_SUPPORTED_XFLAGS ( \
3102 FS_XFLAG_IMMUTABLE | \
3105 FS_XFLAG_NOATIME | \
3106 FS_XFLAG_PROJINHERIT)
3108 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3109 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3114 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3115 if (iflags & f2fs_xflags_map[i].iflag)
3116 xflags |= f2fs_xflags_map[i].xflag;
3121 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3122 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3127 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3128 if (xflags & f2fs_xflags_map[i].xflag)
3129 iflags |= f2fs_xflags_map[i].iflag;
3134 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3136 struct f2fs_inode_info *fi = F2FS_I(inode);
3138 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3140 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3141 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3144 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3146 struct inode *inode = file_inode(filp);
3149 f2fs_fill_fsxattr(inode, &fa);
3151 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3156 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3158 struct inode *inode = file_inode(filp);
3159 struct fsxattr fa, old_fa;
3163 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3166 /* Make sure caller has proper permission */
3167 if (!inode_owner_or_capable(inode))
3170 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3173 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3174 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3177 err = mnt_want_write_file(filp);
3183 f2fs_fill_fsxattr(inode, &old_fa);
3184 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3188 err = f2fs_setflags_common(inode, iflags,
3189 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3193 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3195 inode_unlock(inode);
3196 mnt_drop_write_file(filp);
3200 int f2fs_pin_file_control(struct inode *inode, bool inc)
3202 struct f2fs_inode_info *fi = F2FS_I(inode);
3203 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3205 /* Use i_gc_failures for normal file as a risk signal. */
3207 f2fs_i_gc_failures_write(inode,
3208 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3210 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3211 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3212 __func__, inode->i_ino,
3213 fi->i_gc_failures[GC_FAILURE_PIN]);
3214 clear_inode_flag(inode, FI_PIN_FILE);
3220 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3222 struct inode *inode = file_inode(filp);
3226 if (get_user(pin, (__u32 __user *)arg))
3229 if (!S_ISREG(inode->i_mode))
3232 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3235 ret = mnt_want_write_file(filp);
3241 if (f2fs_should_update_outplace(inode, NULL)) {
3247 clear_inode_flag(inode, FI_PIN_FILE);
3248 f2fs_i_gc_failures_write(inode, 0);
3252 if (f2fs_pin_file_control(inode, false)) {
3257 ret = f2fs_convert_inline_inode(inode);
3261 if (f2fs_disable_compressed_file(inode)) {
3266 set_inode_flag(inode, FI_PIN_FILE);
3267 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3269 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3271 inode_unlock(inode);
3272 mnt_drop_write_file(filp);
3276 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3278 struct inode *inode = file_inode(filp);
3281 if (is_inode_flag_set(inode, FI_PIN_FILE))
3282 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3283 return put_user(pin, (u32 __user *)arg);
3286 int f2fs_precache_extents(struct inode *inode)
3288 struct f2fs_inode_info *fi = F2FS_I(inode);
3289 struct f2fs_map_blocks map;
3290 pgoff_t m_next_extent;
3294 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3298 map.m_next_pgofs = NULL;
3299 map.m_next_extent = &m_next_extent;
3300 map.m_seg_type = NO_CHECK_TYPE;
3301 map.m_may_create = false;
3302 end = F2FS_I_SB(inode)->max_file_blocks;
3304 while (map.m_lblk < end) {
3305 map.m_len = end - map.m_lblk;
3307 down_write(&fi->i_gc_rwsem[WRITE]);
3308 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3309 up_write(&fi->i_gc_rwsem[WRITE]);
3313 map.m_lblk = m_next_extent;
3319 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3321 return f2fs_precache_extents(file_inode(filp));
3324 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3326 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3329 if (!capable(CAP_SYS_ADMIN))
3332 if (f2fs_readonly(sbi->sb))
3335 if (copy_from_user(&block_count, (void __user *)arg,
3336 sizeof(block_count)))
3339 return f2fs_resize_fs(sbi, block_count);
3342 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3344 struct inode *inode = file_inode(filp);
3346 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3348 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3349 f2fs_warn(F2FS_I_SB(inode),
3350 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3355 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3358 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3360 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3363 return fsverity_ioctl_measure(filp, (void __user *)arg);
3366 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3368 struct inode *inode = file_inode(filp);
3369 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3374 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3378 down_read(&sbi->sb_lock);
3379 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3380 ARRAY_SIZE(sbi->raw_super->volume_name),
3381 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3382 up_read(&sbi->sb_lock);
3384 if (copy_to_user((char __user *)arg, vbuf,
3385 min(FSLABEL_MAX, count)))
3392 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3394 struct inode *inode = file_inode(filp);
3395 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3399 if (!capable(CAP_SYS_ADMIN))
3402 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3404 return PTR_ERR(vbuf);
3406 err = mnt_want_write_file(filp);
3410 down_write(&sbi->sb_lock);
3412 memset(sbi->raw_super->volume_name, 0,
3413 sizeof(sbi->raw_super->volume_name));
3414 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3415 sbi->raw_super->volume_name,
3416 ARRAY_SIZE(sbi->raw_super->volume_name));
3418 err = f2fs_commit_super(sbi, false);
3420 up_write(&sbi->sb_lock);
3422 mnt_drop_write_file(filp);
3428 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3430 struct inode *inode = file_inode(filp);
3433 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3436 if (!f2fs_compressed_file(inode))
3439 blocks = F2FS_I(inode)->i_compr_blocks;
3440 return put_user(blocks, (u64 __user *)arg);
3443 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3445 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3446 unsigned int released_blocks = 0;
3447 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3451 for (i = 0; i < count; i++) {
3452 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3453 dn->ofs_in_node + i);
3455 if (!__is_valid_data_blkaddr(blkaddr))
3457 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3458 DATA_GENERIC_ENHANCE)))
3459 return -EFSCORRUPTED;
3463 int compr_blocks = 0;
3465 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3466 blkaddr = f2fs_data_blkaddr(dn);
3469 if (blkaddr == COMPRESS_ADDR)
3471 dn->ofs_in_node += cluster_size;
3475 if (__is_valid_data_blkaddr(blkaddr))
3478 if (blkaddr != NEW_ADDR)
3481 dn->data_blkaddr = NULL_ADDR;
3482 f2fs_set_data_blkaddr(dn);
3485 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3486 dec_valid_block_count(sbi, dn->inode,
3487 cluster_size - compr_blocks);
3489 released_blocks += cluster_size - compr_blocks;
3491 count -= cluster_size;
3494 return released_blocks;
3497 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3499 struct inode *inode = file_inode(filp);
3500 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3501 pgoff_t page_idx = 0, last_idx;
3502 unsigned int released_blocks = 0;
3506 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3509 if (!f2fs_compressed_file(inode))
3512 if (f2fs_readonly(sbi->sb))
3515 ret = mnt_want_write_file(filp);
3519 f2fs_balance_fs(F2FS_I_SB(inode), true);
3523 writecount = atomic_read(&inode->i_writecount);
3524 if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) {
3529 if (IS_IMMUTABLE(inode)) {
3534 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3538 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3539 f2fs_set_inode_flags(inode);
3540 inode->i_ctime = current_time(inode);
3541 f2fs_mark_inode_dirty_sync(inode, true);
3543 if (!F2FS_I(inode)->i_compr_blocks)
3546 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3547 down_write(&F2FS_I(inode)->i_mmap_sem);
3549 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3551 while (page_idx < last_idx) {
3552 struct dnode_of_data dn;
3553 pgoff_t end_offset, count;
3555 set_new_dnode(&dn, inode, NULL, NULL, 0);
3556 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3558 if (ret == -ENOENT) {
3559 page_idx = f2fs_get_next_page_offset(&dn,
3567 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3568 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3569 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3571 ret = release_compress_blocks(&dn, count);
3573 f2fs_put_dnode(&dn);
3579 released_blocks += ret;
3582 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3583 up_write(&F2FS_I(inode)->i_mmap_sem);
3585 inode_unlock(inode);
3587 mnt_drop_write_file(filp);
3590 ret = put_user(released_blocks, (u64 __user *)arg);
3591 } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
3592 set_sbi_flag(sbi, SBI_NEED_FSCK);
3593 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3594 "iblocks=%llu, released=%u, compr_blocks=%llu, "
3596 __func__, inode->i_ino, inode->i_blocks,
3598 F2FS_I(inode)->i_compr_blocks);
3604 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3606 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3607 unsigned int reserved_blocks = 0;
3608 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3612 for (i = 0; i < count; i++) {
3613 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3614 dn->ofs_in_node + i);
3616 if (!__is_valid_data_blkaddr(blkaddr))
3618 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3619 DATA_GENERIC_ENHANCE)))
3620 return -EFSCORRUPTED;
3624 int compr_blocks = 0;
3628 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3629 blkaddr = f2fs_data_blkaddr(dn);
3632 if (blkaddr == COMPRESS_ADDR)
3634 dn->ofs_in_node += cluster_size;
3638 if (__is_valid_data_blkaddr(blkaddr)) {
3643 dn->data_blkaddr = NEW_ADDR;
3644 f2fs_set_data_blkaddr(dn);
3647 reserved = cluster_size - compr_blocks;
3648 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3652 if (reserved != cluster_size - compr_blocks)
3655 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3657 reserved_blocks += reserved;
3659 count -= cluster_size;
3662 return reserved_blocks;
3665 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3667 struct inode *inode = file_inode(filp);
3668 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3669 pgoff_t page_idx = 0, last_idx;
3670 unsigned int reserved_blocks = 0;
3673 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3676 if (!f2fs_compressed_file(inode))
3679 if (f2fs_readonly(sbi->sb))
3682 ret = mnt_want_write_file(filp);
3686 if (F2FS_I(inode)->i_compr_blocks)
3689 f2fs_balance_fs(F2FS_I_SB(inode), true);
3693 if (!IS_IMMUTABLE(inode)) {
3698 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3699 down_write(&F2FS_I(inode)->i_mmap_sem);
3701 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3703 while (page_idx < last_idx) {
3704 struct dnode_of_data dn;
3705 pgoff_t end_offset, count;
3707 set_new_dnode(&dn, inode, NULL, NULL, 0);
3708 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3710 if (ret == -ENOENT) {
3711 page_idx = f2fs_get_next_page_offset(&dn,
3719 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3720 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3721 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3723 ret = reserve_compress_blocks(&dn, count);
3725 f2fs_put_dnode(&dn);
3731 reserved_blocks += ret;
3734 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3735 up_write(&F2FS_I(inode)->i_mmap_sem);
3738 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3739 f2fs_set_inode_flags(inode);
3740 inode->i_ctime = current_time(inode);
3741 f2fs_mark_inode_dirty_sync(inode, true);
3744 inode_unlock(inode);
3746 mnt_drop_write_file(filp);
3749 ret = put_user(reserved_blocks, (u64 __user *)arg);
3750 } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
3751 set_sbi_flag(sbi, SBI_NEED_FSCK);
3752 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3753 "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
3755 __func__, inode->i_ino, inode->i_blocks,
3757 F2FS_I(inode)->i_compr_blocks);
3763 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3764 pgoff_t off, block_t block, block_t len, u32 flags)
3766 struct request_queue *q = bdev_get_queue(bdev);
3767 sector_t sector = SECTOR_FROM_BLOCK(block);
3768 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3774 if (flags & F2FS_TRIM_FILE_DISCARD)
3775 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3776 blk_queue_secure_erase(q) ?
3777 BLKDEV_DISCARD_SECURE : 0);
3779 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3780 if (IS_ENCRYPTED(inode))
3781 ret = fscrypt_zeroout_range(inode, off, block, len);
3783 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3790 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3792 struct inode *inode = file_inode(filp);
3793 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3794 struct address_space *mapping = inode->i_mapping;
3795 struct block_device *prev_bdev = NULL;
3796 struct f2fs_sectrim_range range;
3797 pgoff_t index, pg_end, prev_index = 0;
3798 block_t prev_block = 0, len = 0;
3800 bool to_end = false;
3803 if (!(filp->f_mode & FMODE_WRITE))
3806 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3810 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3811 !S_ISREG(inode->i_mode))
3814 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3815 !f2fs_hw_support_discard(sbi)) ||
3816 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3817 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3820 file_start_write(filp);
3823 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3824 range.start >= inode->i_size) {
3832 if (inode->i_size - range.start > range.len) {
3833 end_addr = range.start + range.len;
3835 end_addr = range.len == (u64)-1 ?
3836 sbi->sb->s_maxbytes : inode->i_size;
3840 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3841 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3846 index = F2FS_BYTES_TO_BLK(range.start);
3847 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3849 ret = f2fs_convert_inline_inode(inode);
3853 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3854 down_write(&F2FS_I(inode)->i_mmap_sem);
3856 ret = filemap_write_and_wait_range(mapping, range.start,
3857 to_end ? LLONG_MAX : end_addr - 1);
3861 truncate_inode_pages_range(mapping, range.start,
3862 to_end ? -1 : end_addr - 1);
3864 while (index < pg_end) {
3865 struct dnode_of_data dn;
3866 pgoff_t end_offset, count;
3869 set_new_dnode(&dn, inode, NULL, NULL, 0);
3870 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3872 if (ret == -ENOENT) {
3873 index = f2fs_get_next_page_offset(&dn, index);
3879 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3880 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3881 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3882 struct block_device *cur_bdev;
3883 block_t blkaddr = f2fs_data_blkaddr(&dn);
3885 if (!__is_valid_data_blkaddr(blkaddr))
3888 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3889 DATA_GENERIC_ENHANCE)) {
3890 ret = -EFSCORRUPTED;
3891 f2fs_put_dnode(&dn);
3895 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3896 if (f2fs_is_multi_device(sbi)) {
3897 int di = f2fs_target_device_index(sbi, blkaddr);
3899 blkaddr -= FDEV(di).start_blk;
3903 if (prev_bdev == cur_bdev &&
3904 index == prev_index + len &&
3905 blkaddr == prev_block + len) {
3908 ret = f2fs_secure_erase(prev_bdev,
3909 inode, prev_index, prev_block,
3912 f2fs_put_dnode(&dn);
3921 prev_bdev = cur_bdev;
3923 prev_block = blkaddr;
3928 f2fs_put_dnode(&dn);
3930 if (fatal_signal_pending(current)) {
3938 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3939 prev_block, len, range.flags);
3941 up_write(&F2FS_I(inode)->i_mmap_sem);
3942 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3944 inode_unlock(inode);
3945 file_end_write(filp);
3950 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3952 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
3954 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
3958 case FS_IOC_GETFLAGS:
3959 return f2fs_ioc_getflags(filp, arg);
3960 case FS_IOC_SETFLAGS:
3961 return f2fs_ioc_setflags(filp, arg);
3962 case FS_IOC_GETVERSION:
3963 return f2fs_ioc_getversion(filp, arg);
3964 case F2FS_IOC_START_ATOMIC_WRITE:
3965 return f2fs_ioc_start_atomic_write(filp);
3966 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3967 return f2fs_ioc_commit_atomic_write(filp);
3968 case F2FS_IOC_START_VOLATILE_WRITE:
3969 return f2fs_ioc_start_volatile_write(filp);
3970 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3971 return f2fs_ioc_release_volatile_write(filp);
3972 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3973 return f2fs_ioc_abort_volatile_write(filp);
3974 case F2FS_IOC_SHUTDOWN:
3975 return f2fs_ioc_shutdown(filp, arg);
3977 return f2fs_ioc_fitrim(filp, arg);
3978 case FS_IOC_SET_ENCRYPTION_POLICY:
3979 return f2fs_ioc_set_encryption_policy(filp, arg);
3980 case FS_IOC_GET_ENCRYPTION_POLICY:
3981 return f2fs_ioc_get_encryption_policy(filp, arg);
3982 case FS_IOC_GET_ENCRYPTION_PWSALT:
3983 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3984 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
3985 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
3986 case FS_IOC_ADD_ENCRYPTION_KEY:
3987 return f2fs_ioc_add_encryption_key(filp, arg);
3988 case FS_IOC_REMOVE_ENCRYPTION_KEY:
3989 return f2fs_ioc_remove_encryption_key(filp, arg);
3990 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
3991 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
3992 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
3993 return f2fs_ioc_get_encryption_key_status(filp, arg);
3994 case FS_IOC_GET_ENCRYPTION_NONCE:
3995 return f2fs_ioc_get_encryption_nonce(filp, arg);
3996 case F2FS_IOC_GARBAGE_COLLECT:
3997 return f2fs_ioc_gc(filp, arg);
3998 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3999 return f2fs_ioc_gc_range(filp, arg);
4000 case F2FS_IOC_WRITE_CHECKPOINT:
4001 return f2fs_ioc_write_checkpoint(filp, arg);
4002 case F2FS_IOC_DEFRAGMENT:
4003 return f2fs_ioc_defragment(filp, arg);
4004 case F2FS_IOC_MOVE_RANGE:
4005 return f2fs_ioc_move_range(filp, arg);
4006 case F2FS_IOC_FLUSH_DEVICE:
4007 return f2fs_ioc_flush_device(filp, arg);
4008 case F2FS_IOC_GET_FEATURES:
4009 return f2fs_ioc_get_features(filp, arg);
4010 case FS_IOC_FSGETXATTR:
4011 return f2fs_ioc_fsgetxattr(filp, arg);
4012 case FS_IOC_FSSETXATTR:
4013 return f2fs_ioc_fssetxattr(filp, arg);
4014 case F2FS_IOC_GET_PIN_FILE:
4015 return f2fs_ioc_get_pin_file(filp, arg);
4016 case F2FS_IOC_SET_PIN_FILE:
4017 return f2fs_ioc_set_pin_file(filp, arg);
4018 case F2FS_IOC_PRECACHE_EXTENTS:
4019 return f2fs_ioc_precache_extents(filp, arg);
4020 case F2FS_IOC_RESIZE_FS:
4021 return f2fs_ioc_resize_fs(filp, arg);
4022 case FS_IOC_ENABLE_VERITY:
4023 return f2fs_ioc_enable_verity(filp, arg);
4024 case FS_IOC_MEASURE_VERITY:
4025 return f2fs_ioc_measure_verity(filp, arg);
4026 case FS_IOC_GETFSLABEL:
4027 return f2fs_ioc_getfslabel(filp, arg);
4028 case FS_IOC_SETFSLABEL:
4029 return f2fs_ioc_setfslabel(filp, arg);
4030 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4031 return f2fs_get_compress_blocks(filp, arg);
4032 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4033 return f2fs_release_compress_blocks(filp, arg);
4034 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4035 return f2fs_reserve_compress_blocks(filp, arg);
4036 case F2FS_IOC_SEC_TRIM_FILE:
4037 return f2fs_sec_trim_file(filp, arg);
4043 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4045 struct file *file = iocb->ki_filp;
4046 struct inode *inode = file_inode(file);
4049 if (!f2fs_is_compress_backend_ready(inode))
4052 ret = generic_file_read_iter(iocb, iter);
4055 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4060 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4062 struct file *file = iocb->ki_filp;
4063 struct inode *inode = file_inode(file);
4066 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4071 if (!f2fs_is_compress_backend_ready(inode)) {
4076 if (iocb->ki_flags & IOCB_NOWAIT) {
4077 if (!inode_trylock(inode)) {
4085 ret = generic_write_checks(iocb, from);
4087 bool preallocated = false;
4088 size_t target_size = 0;
4091 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4092 set_inode_flag(inode, FI_NO_PREALLOC);
4094 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4095 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4096 iov_iter_count(from)) ||
4097 f2fs_has_inline_data(inode) ||
4098 f2fs_force_buffered_io(inode, iocb, from)) {
4099 clear_inode_flag(inode, FI_NO_PREALLOC);
4100 inode_unlock(inode);
4107 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4110 if (iocb->ki_flags & IOCB_DIRECT) {
4112 * Convert inline data for Direct I/O before entering
4115 err = f2fs_convert_inline_inode(inode);
4119 * If force_buffere_io() is true, we have to allocate
4120 * blocks all the time, since f2fs_direct_IO will fall
4121 * back to buffered IO.
4123 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4124 allow_outplace_dio(inode, iocb, from))
4127 preallocated = true;
4128 target_size = iocb->ki_pos + iov_iter_count(from);
4130 err = f2fs_preallocate_blocks(iocb, from);
4133 clear_inode_flag(inode, FI_NO_PREALLOC);
4134 inode_unlock(inode);
4139 ret = __generic_file_write_iter(iocb, from);
4140 clear_inode_flag(inode, FI_NO_PREALLOC);
4142 /* if we couldn't write data, we should deallocate blocks. */
4143 if (preallocated && i_size_read(inode) < target_size)
4144 f2fs_truncate(inode);
4147 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4149 inode_unlock(inode);
4151 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4152 iov_iter_count(from), ret);
4154 ret = generic_write_sync(iocb, ret);
4158 #ifdef CONFIG_COMPAT
4159 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4162 case FS_IOC32_GETFLAGS:
4163 cmd = FS_IOC_GETFLAGS;
4165 case FS_IOC32_SETFLAGS:
4166 cmd = FS_IOC_SETFLAGS;
4168 case FS_IOC32_GETVERSION:
4169 cmd = FS_IOC_GETVERSION;
4171 case F2FS_IOC_START_ATOMIC_WRITE:
4172 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4173 case F2FS_IOC_START_VOLATILE_WRITE:
4174 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4175 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4176 case F2FS_IOC_SHUTDOWN:
4178 case FS_IOC_SET_ENCRYPTION_POLICY:
4179 case FS_IOC_GET_ENCRYPTION_PWSALT:
4180 case FS_IOC_GET_ENCRYPTION_POLICY:
4181 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4182 case FS_IOC_ADD_ENCRYPTION_KEY:
4183 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4184 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4185 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4186 case FS_IOC_GET_ENCRYPTION_NONCE:
4187 case F2FS_IOC_GARBAGE_COLLECT:
4188 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4189 case F2FS_IOC_WRITE_CHECKPOINT:
4190 case F2FS_IOC_DEFRAGMENT:
4191 case F2FS_IOC_MOVE_RANGE:
4192 case F2FS_IOC_FLUSH_DEVICE:
4193 case F2FS_IOC_GET_FEATURES:
4194 case FS_IOC_FSGETXATTR:
4195 case FS_IOC_FSSETXATTR:
4196 case F2FS_IOC_GET_PIN_FILE:
4197 case F2FS_IOC_SET_PIN_FILE:
4198 case F2FS_IOC_PRECACHE_EXTENTS:
4199 case F2FS_IOC_RESIZE_FS:
4200 case FS_IOC_ENABLE_VERITY:
4201 case FS_IOC_MEASURE_VERITY:
4202 case FS_IOC_GETFSLABEL:
4203 case FS_IOC_SETFSLABEL:
4204 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4205 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4206 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4207 case F2FS_IOC_SEC_TRIM_FILE:
4210 return -ENOIOCTLCMD;
4212 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4216 const struct file_operations f2fs_file_operations = {
4217 .llseek = f2fs_llseek,
4218 .read_iter = f2fs_file_read_iter,
4219 .write_iter = f2fs_file_write_iter,
4220 .open = f2fs_file_open,
4221 .release = f2fs_release_file,
4222 .mmap = f2fs_file_mmap,
4223 .flush = f2fs_file_flush,
4224 .fsync = f2fs_sync_file,
4225 .fallocate = f2fs_fallocate,
4226 .unlocked_ioctl = f2fs_ioctl,
4227 #ifdef CONFIG_COMPAT
4228 .compat_ioctl = f2fs_compat_ioctl,
4230 .splice_read = generic_file_splice_read,
4231 .splice_write = iter_file_splice_write,