1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
38 struct inode *inode = file_inode(vmf->vma->vm_file);
41 down_read(&F2FS_I(inode)->i_mmap_sem);
42 ret = filemap_fault(vmf);
43 up_read(&F2FS_I(inode)->i_mmap_sem);
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
63 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
66 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67 return VM_FAULT_SIGBUS;
69 if (unlikely(f2fs_cp_error(sbi))) {
74 if (!f2fs_is_checkpoint_ready(sbi)) {
79 err = f2fs_convert_inline_inode(inode);
83 #ifdef CONFIG_F2FS_FS_COMPRESSION
84 if (f2fs_compressed_file(inode)) {
85 int ret = f2fs_is_compressed_cluster(inode, page->index);
95 /* should do out of any locked page */
97 f2fs_balance_fs(sbi, true);
99 sb_start_pagefault(inode->i_sb);
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
103 file_update_time(vmf->vma->vm_file);
104 down_read(&F2FS_I(inode)->i_mmap_sem);
106 if (unlikely(page->mapping != inode->i_mapping ||
107 page_offset(page) > i_size_read(inode) ||
108 !PageUptodate(page))) {
115 /* block allocation */
116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
117 set_new_dnode(&dn, inode, NULL, NULL, 0);
118 err = f2fs_get_block(&dn, page->index);
119 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
122 #ifdef CONFIG_F2FS_FS_COMPRESSION
124 set_new_dnode(&dn, inode, NULL, NULL, 0);
125 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
134 f2fs_wait_on_page_writeback(page, DATA, false, true);
136 /* wait for GCed page writeback via META_MAPPING */
137 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
140 * check to see if the page is mapped already (no holes)
142 if (PageMappedToDisk(page))
145 /* page is wholly or partially inside EOF */
146 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
147 i_size_read(inode)) {
150 offset = i_size_read(inode) & ~PAGE_MASK;
151 zero_user_segment(page, offset, PAGE_SIZE);
153 set_page_dirty(page);
154 if (!PageUptodate(page))
155 SetPageUptodate(page);
157 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
158 f2fs_update_time(sbi, REQ_TIME);
160 trace_f2fs_vm_page_mkwrite(page, DATA);
162 up_read(&F2FS_I(inode)->i_mmap_sem);
164 sb_end_pagefault(inode->i_sb);
166 return block_page_mkwrite_return(err);
169 static const struct vm_operations_struct f2fs_file_vm_ops = {
170 .fault = f2fs_filemap_fault,
171 .map_pages = filemap_map_pages,
172 .page_mkwrite = f2fs_vm_page_mkwrite,
175 static int get_parent_ino(struct inode *inode, nid_t *pino)
177 struct dentry *dentry;
180 * Make sure to get the non-deleted alias. The alias associated with
181 * the open file descriptor being fsync()'ed may be deleted already.
183 dentry = d_find_alias(inode);
187 *pino = parent_ino(dentry);
192 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
194 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
195 enum cp_reason_type cp_reason = CP_NO_NEEDED;
197 if (!S_ISREG(inode->i_mode))
198 cp_reason = CP_NON_REGULAR;
199 else if (f2fs_compressed_file(inode))
200 cp_reason = CP_COMPRESSED;
201 else if (inode->i_nlink != 1)
202 cp_reason = CP_HARDLINK;
203 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
204 cp_reason = CP_SB_NEED_CP;
205 else if (file_wrong_pino(inode))
206 cp_reason = CP_WRONG_PINO;
207 else if (!f2fs_space_for_roll_forward(sbi))
208 cp_reason = CP_NO_SPC_ROLL;
209 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
210 cp_reason = CP_NODE_NEED_CP;
211 else if (test_opt(sbi, FASTBOOT))
212 cp_reason = CP_FASTBOOT_MODE;
213 else if (F2FS_OPTION(sbi).active_logs == 2)
214 cp_reason = CP_SPEC_LOG_NUM;
215 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
216 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
217 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
219 cp_reason = CP_RECOVER_DIR;
224 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
226 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
228 /* But we need to avoid that there are some inode updates */
229 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
235 static void try_to_fix_pino(struct inode *inode)
237 struct f2fs_inode_info *fi = F2FS_I(inode);
240 down_write(&fi->i_sem);
241 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
242 get_parent_ino(inode, &pino)) {
243 f2fs_i_pino_write(inode, pino);
244 file_got_pino(inode);
246 up_write(&fi->i_sem);
249 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
250 int datasync, bool atomic)
252 struct inode *inode = file->f_mapping->host;
253 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
254 nid_t ino = inode->i_ino;
256 enum cp_reason_type cp_reason = 0;
257 struct writeback_control wbc = {
258 .sync_mode = WB_SYNC_ALL,
259 .nr_to_write = LONG_MAX,
262 unsigned int seq_id = 0;
264 if (unlikely(f2fs_readonly(inode->i_sb) ||
265 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
268 trace_f2fs_sync_file_enter(inode);
270 if (S_ISDIR(inode->i_mode))
273 /* if fdatasync is triggered, let's do in-place-update */
274 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
275 set_inode_flag(inode, FI_NEED_IPU);
276 ret = file_write_and_wait_range(file, start, end);
277 clear_inode_flag(inode, FI_NEED_IPU);
280 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
284 /* if the inode is dirty, let's recover all the time */
285 if (!f2fs_skip_inode_update(inode, datasync)) {
286 f2fs_write_inode(inode, NULL);
291 * if there is no written data, don't waste time to write recovery info.
293 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
294 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
296 /* it may call write_inode just prior to fsync */
297 if (need_inode_page_update(sbi, ino))
300 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
301 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
307 * Both of fdatasync() and fsync() are able to be recovered from
310 down_read(&F2FS_I(inode)->i_sem);
311 cp_reason = need_do_checkpoint(inode);
312 up_read(&F2FS_I(inode)->i_sem);
315 /* all the dirty node pages should be flushed for POR */
316 ret = f2fs_sync_fs(inode->i_sb, 1);
319 * We've secured consistency through sync_fs. Following pino
320 * will be used only for fsynced inodes after checkpoint.
322 try_to_fix_pino(inode);
323 clear_inode_flag(inode, FI_APPEND_WRITE);
324 clear_inode_flag(inode, FI_UPDATE_WRITE);
328 atomic_inc(&sbi->wb_sync_req[NODE]);
329 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
330 atomic_dec(&sbi->wb_sync_req[NODE]);
334 /* if cp_error was enabled, we should avoid infinite loop */
335 if (unlikely(f2fs_cp_error(sbi))) {
340 if (f2fs_need_inode_block_update(sbi, ino)) {
341 f2fs_mark_inode_dirty_sync(inode, true);
342 f2fs_write_inode(inode, NULL);
347 * If it's atomic_write, it's just fine to keep write ordering. So
348 * here we don't need to wait for node write completion, since we use
349 * node chain which serializes node blocks. If one of node writes are
350 * reordered, we can see simply broken chain, resulting in stopping
351 * roll-forward recovery. It means we'll recover all or none node blocks
355 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
360 /* once recovery info is written, don't need to tack this */
361 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
362 clear_inode_flag(inode, FI_APPEND_WRITE);
364 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
365 ret = f2fs_issue_flush(sbi, inode->i_ino);
367 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
368 clear_inode_flag(inode, FI_UPDATE_WRITE);
369 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
371 f2fs_update_time(sbi, REQ_TIME);
373 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
377 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
379 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
381 return f2fs_do_sync_file(file, start, end, datasync, false);
384 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
385 pgoff_t index, int whence)
389 if (__is_valid_data_blkaddr(blkaddr))
391 if (blkaddr == NEW_ADDR &&
392 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
396 if (blkaddr == NULL_ADDR)
403 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
405 struct inode *inode = file->f_mapping->host;
406 loff_t maxbytes = inode->i_sb->s_maxbytes;
407 struct dnode_of_data dn;
408 pgoff_t pgofs, end_offset;
409 loff_t data_ofs = offset;
415 isize = i_size_read(inode);
419 /* handle inline data case */
420 if (f2fs_has_inline_data(inode)) {
421 if (whence == SEEK_HOLE) {
424 } else if (whence == SEEK_DATA) {
430 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
432 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
433 set_new_dnode(&dn, inode, NULL, NULL, 0);
434 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
435 if (err && err != -ENOENT) {
437 } else if (err == -ENOENT) {
438 /* direct node does not exists */
439 if (whence == SEEK_DATA) {
440 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
447 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
449 /* find data/hole in dnode block */
450 for (; dn.ofs_in_node < end_offset;
451 dn.ofs_in_node++, pgofs++,
452 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
455 blkaddr = f2fs_data_blkaddr(&dn);
457 if (__is_valid_data_blkaddr(blkaddr) &&
458 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
459 blkaddr, DATA_GENERIC_ENHANCE)) {
464 if (__found_offset(file->f_mapping, blkaddr,
473 if (whence == SEEK_DATA)
476 if (whence == SEEK_HOLE && data_ofs > isize)
479 return vfs_setpos(file, data_ofs, maxbytes);
485 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
487 struct inode *inode = file->f_mapping->host;
488 loff_t maxbytes = inode->i_sb->s_maxbytes;
490 if (f2fs_compressed_file(inode))
491 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
497 return generic_file_llseek_size(file, offset, whence,
498 maxbytes, i_size_read(inode));
503 return f2fs_seek_block(file, offset, whence);
509 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
511 struct inode *inode = file_inode(file);
513 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
516 if (!f2fs_is_compress_backend_ready(inode))
520 vma->vm_ops = &f2fs_file_vm_ops;
521 set_inode_flag(inode, FI_MMAP_FILE);
525 static int f2fs_file_open(struct inode *inode, struct file *filp)
527 int err = fscrypt_file_open(inode, filp);
532 if (!f2fs_is_compress_backend_ready(inode))
535 err = fsverity_file_open(inode, filp);
539 filp->f_mode |= FMODE_NOWAIT;
541 return dquot_file_open(inode, filp);
544 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
546 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
547 struct f2fs_node *raw_node;
548 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
551 bool compressed_cluster = false;
552 int cluster_index = 0, valid_blocks = 0;
553 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
554 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
556 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
557 base = get_extra_isize(dn->inode);
559 raw_node = F2FS_NODE(dn->node_page);
560 addr = blkaddr_in_node(raw_node) + base + ofs;
562 /* Assumption: truncateion starts with cluster */
563 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
564 block_t blkaddr = le32_to_cpu(*addr);
566 if (f2fs_compressed_file(dn->inode) &&
567 !(cluster_index & (cluster_size - 1))) {
568 if (compressed_cluster)
569 f2fs_i_compr_blocks_update(dn->inode,
570 valid_blocks, false);
571 compressed_cluster = (blkaddr == COMPRESS_ADDR);
575 if (blkaddr == NULL_ADDR)
578 dn->data_blkaddr = NULL_ADDR;
579 f2fs_set_data_blkaddr(dn);
581 if (__is_valid_data_blkaddr(blkaddr)) {
582 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
583 DATA_GENERIC_ENHANCE))
585 if (compressed_cluster)
589 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
590 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
592 f2fs_invalidate_blocks(sbi, blkaddr);
594 if (!released || blkaddr != COMPRESS_ADDR)
598 if (compressed_cluster)
599 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
604 * once we invalidate valid blkaddr in range [ofs, ofs + count],
605 * we will invalidate all blkaddr in the whole range.
607 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
609 f2fs_update_extent_cache_range(dn, fofs, 0, len);
610 dec_valid_block_count(sbi, dn->inode, nr_free);
612 dn->ofs_in_node = ofs;
614 f2fs_update_time(sbi, REQ_TIME);
615 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
616 dn->ofs_in_node, nr_free);
619 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
621 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
624 static int truncate_partial_data_page(struct inode *inode, u64 from,
627 loff_t offset = from & (PAGE_SIZE - 1);
628 pgoff_t index = from >> PAGE_SHIFT;
629 struct address_space *mapping = inode->i_mapping;
632 if (!offset && !cache_only)
636 page = find_lock_page(mapping, index);
637 if (page && PageUptodate(page))
639 f2fs_put_page(page, 1);
643 page = f2fs_get_lock_data_page(inode, index, true);
645 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
647 f2fs_wait_on_page_writeback(page, DATA, true, true);
648 zero_user(page, offset, PAGE_SIZE - offset);
650 /* An encrypted inode should have a key and truncate the last page. */
651 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
653 set_page_dirty(page);
654 f2fs_put_page(page, 1);
658 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
660 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
661 struct dnode_of_data dn;
663 int count = 0, err = 0;
665 bool truncate_page = false;
667 trace_f2fs_truncate_blocks_enter(inode, from);
669 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
671 if (free_from >= max_file_blocks(inode))
677 ipage = f2fs_get_node_page(sbi, inode->i_ino);
679 err = PTR_ERR(ipage);
683 if (f2fs_has_inline_data(inode)) {
684 f2fs_truncate_inline_inode(inode, ipage, from);
685 f2fs_put_page(ipage, 1);
686 truncate_page = true;
690 set_new_dnode(&dn, inode, ipage, NULL, 0);
691 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
698 count = ADDRS_PER_PAGE(dn.node_page, inode);
700 count -= dn.ofs_in_node;
701 f2fs_bug_on(sbi, count < 0);
703 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
704 f2fs_truncate_data_blocks_range(&dn, count);
710 err = f2fs_truncate_inode_blocks(inode, free_from);
715 /* lastly zero out the first data page */
717 err = truncate_partial_data_page(inode, from, truncate_page);
719 trace_f2fs_truncate_blocks_exit(inode, err);
723 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
725 u64 free_from = from;
728 #ifdef CONFIG_F2FS_FS_COMPRESSION
730 * for compressed file, only support cluster size
731 * aligned truncation.
733 if (f2fs_compressed_file(inode))
734 free_from = round_up(from,
735 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
738 err = f2fs_do_truncate_blocks(inode, free_from, lock);
742 #ifdef CONFIG_F2FS_FS_COMPRESSION
743 if (from != free_from) {
744 err = f2fs_truncate_partial_cluster(inode, from, lock);
753 int f2fs_truncate(struct inode *inode)
757 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
760 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
761 S_ISLNK(inode->i_mode)))
764 trace_f2fs_truncate(inode);
766 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
767 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
771 err = dquot_initialize(inode);
775 /* we should check inline_data size */
776 if (!f2fs_may_inline_data(inode)) {
777 err = f2fs_convert_inline_inode(inode);
782 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
786 inode->i_mtime = inode->i_ctime = current_time(inode);
787 f2fs_mark_inode_dirty_sync(inode, false);
791 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
792 struct kstat *stat, u32 request_mask, unsigned int query_flags)
794 struct inode *inode = d_inode(path->dentry);
795 struct f2fs_inode_info *fi = F2FS_I(inode);
796 struct f2fs_inode *ri;
799 if (f2fs_has_extra_attr(inode) &&
800 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
801 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
802 stat->result_mask |= STATX_BTIME;
803 stat->btime.tv_sec = fi->i_crtime.tv_sec;
804 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
808 if (flags & F2FS_COMPR_FL)
809 stat->attributes |= STATX_ATTR_COMPRESSED;
810 if (flags & F2FS_APPEND_FL)
811 stat->attributes |= STATX_ATTR_APPEND;
812 if (IS_ENCRYPTED(inode))
813 stat->attributes |= STATX_ATTR_ENCRYPTED;
814 if (flags & F2FS_IMMUTABLE_FL)
815 stat->attributes |= STATX_ATTR_IMMUTABLE;
816 if (flags & F2FS_NODUMP_FL)
817 stat->attributes |= STATX_ATTR_NODUMP;
818 if (IS_VERITY(inode))
819 stat->attributes |= STATX_ATTR_VERITY;
821 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
823 STATX_ATTR_ENCRYPTED |
824 STATX_ATTR_IMMUTABLE |
828 generic_fillattr(&init_user_ns, inode, stat);
830 /* we need to show initial sectors used for inline_data/dentries */
831 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
832 f2fs_has_inline_dentry(inode))
833 stat->blocks += (stat->size + 511) >> 9;
838 #ifdef CONFIG_F2FS_FS_POSIX_ACL
839 static void __setattr_copy(struct user_namespace *mnt_userns,
840 struct inode *inode, const struct iattr *attr)
842 unsigned int ia_valid = attr->ia_valid;
844 if (ia_valid & ATTR_UID)
845 inode->i_uid = attr->ia_uid;
846 if (ia_valid & ATTR_GID)
847 inode->i_gid = attr->ia_gid;
848 if (ia_valid & ATTR_ATIME)
849 inode->i_atime = attr->ia_atime;
850 if (ia_valid & ATTR_MTIME)
851 inode->i_mtime = attr->ia_mtime;
852 if (ia_valid & ATTR_CTIME)
853 inode->i_ctime = attr->ia_ctime;
854 if (ia_valid & ATTR_MODE) {
855 umode_t mode = attr->ia_mode;
856 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
858 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
860 set_acl_inode(inode, mode);
864 #define __setattr_copy setattr_copy
867 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
870 struct inode *inode = d_inode(dentry);
873 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
876 if (unlikely(IS_IMMUTABLE(inode)))
879 if (unlikely(IS_APPEND(inode) &&
880 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
881 ATTR_GID | ATTR_TIMES_SET))))
884 if ((attr->ia_valid & ATTR_SIZE) &&
885 !f2fs_is_compress_backend_ready(inode))
888 err = setattr_prepare(&init_user_ns, dentry, attr);
892 err = fscrypt_prepare_setattr(dentry, attr);
896 err = fsverity_prepare_setattr(dentry, attr);
900 if (is_quota_modification(inode, attr)) {
901 err = dquot_initialize(inode);
905 if ((attr->ia_valid & ATTR_UID &&
906 !uid_eq(attr->ia_uid, inode->i_uid)) ||
907 (attr->ia_valid & ATTR_GID &&
908 !gid_eq(attr->ia_gid, inode->i_gid))) {
909 f2fs_lock_op(F2FS_I_SB(inode));
910 err = dquot_transfer(inode, attr);
912 set_sbi_flag(F2FS_I_SB(inode),
913 SBI_QUOTA_NEED_REPAIR);
914 f2fs_unlock_op(F2FS_I_SB(inode));
918 * update uid/gid under lock_op(), so that dquot and inode can
919 * be updated atomically.
921 if (attr->ia_valid & ATTR_UID)
922 inode->i_uid = attr->ia_uid;
923 if (attr->ia_valid & ATTR_GID)
924 inode->i_gid = attr->ia_gid;
925 f2fs_mark_inode_dirty_sync(inode, true);
926 f2fs_unlock_op(F2FS_I_SB(inode));
929 if (attr->ia_valid & ATTR_SIZE) {
930 loff_t old_size = i_size_read(inode);
932 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
934 * should convert inline inode before i_size_write to
935 * keep smaller than inline_data size with inline flag.
937 err = f2fs_convert_inline_inode(inode);
942 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
943 down_write(&F2FS_I(inode)->i_mmap_sem);
945 truncate_setsize(inode, attr->ia_size);
947 if (attr->ia_size <= old_size)
948 err = f2fs_truncate(inode);
950 * do not trim all blocks after i_size if target size is
951 * larger than i_size.
953 up_write(&F2FS_I(inode)->i_mmap_sem);
954 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
958 spin_lock(&F2FS_I(inode)->i_size_lock);
959 inode->i_mtime = inode->i_ctime = current_time(inode);
960 F2FS_I(inode)->last_disk_size = i_size_read(inode);
961 spin_unlock(&F2FS_I(inode)->i_size_lock);
964 __setattr_copy(&init_user_ns, inode, attr);
966 if (attr->ia_valid & ATTR_MODE) {
967 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
969 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
971 inode->i_mode = F2FS_I(inode)->i_acl_mode;
972 clear_inode_flag(inode, FI_ACL_MODE);
976 /* file size may changed here */
977 f2fs_mark_inode_dirty_sync(inode, true);
979 /* inode change will produce dirty node pages flushed by checkpoint */
980 f2fs_balance_fs(F2FS_I_SB(inode), true);
985 const struct inode_operations f2fs_file_inode_operations = {
986 .getattr = f2fs_getattr,
987 .setattr = f2fs_setattr,
988 .get_acl = f2fs_get_acl,
989 .set_acl = f2fs_set_acl,
990 .listxattr = f2fs_listxattr,
991 .fiemap = f2fs_fiemap,
992 .fileattr_get = f2fs_fileattr_get,
993 .fileattr_set = f2fs_fileattr_set,
996 static int fill_zero(struct inode *inode, pgoff_t index,
997 loff_t start, loff_t len)
999 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1005 f2fs_balance_fs(sbi, true);
1008 page = f2fs_get_new_data_page(inode, NULL, index, false);
1009 f2fs_unlock_op(sbi);
1012 return PTR_ERR(page);
1014 f2fs_wait_on_page_writeback(page, DATA, true, true);
1015 zero_user(page, start, len);
1016 set_page_dirty(page);
1017 f2fs_put_page(page, 1);
1021 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1025 while (pg_start < pg_end) {
1026 struct dnode_of_data dn;
1027 pgoff_t end_offset, count;
1029 set_new_dnode(&dn, inode, NULL, NULL, 0);
1030 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1032 if (err == -ENOENT) {
1033 pg_start = f2fs_get_next_page_offset(&dn,
1040 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1041 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1043 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1045 f2fs_truncate_data_blocks_range(&dn, count);
1046 f2fs_put_dnode(&dn);
1053 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1055 pgoff_t pg_start, pg_end;
1056 loff_t off_start, off_end;
1059 ret = f2fs_convert_inline_inode(inode);
1063 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1064 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1066 off_start = offset & (PAGE_SIZE - 1);
1067 off_end = (offset + len) & (PAGE_SIZE - 1);
1069 if (pg_start == pg_end) {
1070 ret = fill_zero(inode, pg_start, off_start,
1071 off_end - off_start);
1076 ret = fill_zero(inode, pg_start++, off_start,
1077 PAGE_SIZE - off_start);
1082 ret = fill_zero(inode, pg_end, 0, off_end);
1087 if (pg_start < pg_end) {
1088 struct address_space *mapping = inode->i_mapping;
1089 loff_t blk_start, blk_end;
1090 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1092 f2fs_balance_fs(sbi, true);
1094 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1095 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1097 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1098 down_write(&F2FS_I(inode)->i_mmap_sem);
1100 truncate_inode_pages_range(mapping, blk_start,
1104 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1105 f2fs_unlock_op(sbi);
1107 up_write(&F2FS_I(inode)->i_mmap_sem);
1108 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1115 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1116 int *do_replace, pgoff_t off, pgoff_t len)
1118 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1119 struct dnode_of_data dn;
1123 set_new_dnode(&dn, inode, NULL, NULL, 0);
1124 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1125 if (ret && ret != -ENOENT) {
1127 } else if (ret == -ENOENT) {
1128 if (dn.max_level == 0)
1130 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1131 dn.ofs_in_node, len);
1137 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1138 dn.ofs_in_node, len);
1139 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1140 *blkaddr = f2fs_data_blkaddr(&dn);
1142 if (__is_valid_data_blkaddr(*blkaddr) &&
1143 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1144 DATA_GENERIC_ENHANCE)) {
1145 f2fs_put_dnode(&dn);
1146 return -EFSCORRUPTED;
1149 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1151 if (f2fs_lfs_mode(sbi)) {
1152 f2fs_put_dnode(&dn);
1156 /* do not invalidate this block address */
1157 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1161 f2fs_put_dnode(&dn);
1170 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1171 int *do_replace, pgoff_t off, int len)
1173 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174 struct dnode_of_data dn;
1177 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1178 if (*do_replace == 0)
1181 set_new_dnode(&dn, inode, NULL, NULL, 0);
1182 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1184 dec_valid_block_count(sbi, inode, 1);
1185 f2fs_invalidate_blocks(sbi, *blkaddr);
1187 f2fs_update_data_blkaddr(&dn, *blkaddr);
1189 f2fs_put_dnode(&dn);
1194 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1195 block_t *blkaddr, int *do_replace,
1196 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1198 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1203 if (blkaddr[i] == NULL_ADDR && !full) {
1208 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1209 struct dnode_of_data dn;
1210 struct node_info ni;
1214 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1215 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1219 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1221 f2fs_put_dnode(&dn);
1225 ilen = min((pgoff_t)
1226 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1227 dn.ofs_in_node, len - i);
1229 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1230 f2fs_truncate_data_blocks_range(&dn, 1);
1232 if (do_replace[i]) {
1233 f2fs_i_blocks_write(src_inode,
1235 f2fs_i_blocks_write(dst_inode,
1237 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1238 blkaddr[i], ni.version, true, false);
1244 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1245 if (dst_inode->i_size < new_size)
1246 f2fs_i_size_write(dst_inode, new_size);
1247 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1249 f2fs_put_dnode(&dn);
1251 struct page *psrc, *pdst;
1253 psrc = f2fs_get_lock_data_page(src_inode,
1256 return PTR_ERR(psrc);
1257 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1260 f2fs_put_page(psrc, 1);
1261 return PTR_ERR(pdst);
1263 f2fs_copy_page(psrc, pdst);
1264 set_page_dirty(pdst);
1265 f2fs_put_page(pdst, 1);
1266 f2fs_put_page(psrc, 1);
1268 ret = f2fs_truncate_hole(src_inode,
1269 src + i, src + i + 1);
1278 static int __exchange_data_block(struct inode *src_inode,
1279 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1280 pgoff_t len, bool full)
1282 block_t *src_blkaddr;
1288 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1290 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1291 array_size(olen, sizeof(block_t)),
1296 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1297 array_size(olen, sizeof(int)),
1300 kvfree(src_blkaddr);
1304 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1305 do_replace, src, olen);
1309 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1310 do_replace, src, dst, olen, full);
1318 kvfree(src_blkaddr);
1324 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1325 kvfree(src_blkaddr);
1330 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1332 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1333 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1334 pgoff_t start = offset >> PAGE_SHIFT;
1335 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1338 f2fs_balance_fs(sbi, true);
1340 /* avoid gc operation during block exchange */
1341 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1342 down_write(&F2FS_I(inode)->i_mmap_sem);
1345 f2fs_drop_extent_tree(inode);
1346 truncate_pagecache(inode, offset);
1347 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1348 f2fs_unlock_op(sbi);
1350 up_write(&F2FS_I(inode)->i_mmap_sem);
1351 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1355 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1360 if (offset + len >= i_size_read(inode))
1363 /* collapse range should be aligned to block size of f2fs. */
1364 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1367 ret = f2fs_convert_inline_inode(inode);
1371 /* write out all dirty pages from offset */
1372 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1376 ret = f2fs_do_collapse(inode, offset, len);
1380 /* write out all moved pages, if possible */
1381 down_write(&F2FS_I(inode)->i_mmap_sem);
1382 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1383 truncate_pagecache(inode, offset);
1385 new_size = i_size_read(inode) - len;
1386 ret = f2fs_truncate_blocks(inode, new_size, true);
1387 up_write(&F2FS_I(inode)->i_mmap_sem);
1389 f2fs_i_size_write(inode, new_size);
1393 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1396 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1397 pgoff_t index = start;
1398 unsigned int ofs_in_node = dn->ofs_in_node;
1402 for (; index < end; index++, dn->ofs_in_node++) {
1403 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1407 dn->ofs_in_node = ofs_in_node;
1408 ret = f2fs_reserve_new_blocks(dn, count);
1412 dn->ofs_in_node = ofs_in_node;
1413 for (index = start; index < end; index++, dn->ofs_in_node++) {
1414 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1416 * f2fs_reserve_new_blocks will not guarantee entire block
1419 if (dn->data_blkaddr == NULL_ADDR) {
1423 if (dn->data_blkaddr != NEW_ADDR) {
1424 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1425 dn->data_blkaddr = NEW_ADDR;
1426 f2fs_set_data_blkaddr(dn);
1430 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1435 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1438 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1439 struct address_space *mapping = inode->i_mapping;
1440 pgoff_t index, pg_start, pg_end;
1441 loff_t new_size = i_size_read(inode);
1442 loff_t off_start, off_end;
1445 ret = inode_newsize_ok(inode, (len + offset));
1449 ret = f2fs_convert_inline_inode(inode);
1453 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1457 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1458 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1460 off_start = offset & (PAGE_SIZE - 1);
1461 off_end = (offset + len) & (PAGE_SIZE - 1);
1463 if (pg_start == pg_end) {
1464 ret = fill_zero(inode, pg_start, off_start,
1465 off_end - off_start);
1469 new_size = max_t(loff_t, new_size, offset + len);
1472 ret = fill_zero(inode, pg_start++, off_start,
1473 PAGE_SIZE - off_start);
1477 new_size = max_t(loff_t, new_size,
1478 (loff_t)pg_start << PAGE_SHIFT);
1481 for (index = pg_start; index < pg_end;) {
1482 struct dnode_of_data dn;
1483 unsigned int end_offset;
1486 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1487 down_write(&F2FS_I(inode)->i_mmap_sem);
1489 truncate_pagecache_range(inode,
1490 (loff_t)index << PAGE_SHIFT,
1491 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1495 set_new_dnode(&dn, inode, NULL, NULL, 0);
1496 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1498 f2fs_unlock_op(sbi);
1499 up_write(&F2FS_I(inode)->i_mmap_sem);
1500 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1504 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1505 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1507 ret = f2fs_do_zero_range(&dn, index, end);
1508 f2fs_put_dnode(&dn);
1510 f2fs_unlock_op(sbi);
1511 up_write(&F2FS_I(inode)->i_mmap_sem);
1512 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1514 f2fs_balance_fs(sbi, dn.node_changed);
1520 new_size = max_t(loff_t, new_size,
1521 (loff_t)index << PAGE_SHIFT);
1525 ret = fill_zero(inode, pg_end, 0, off_end);
1529 new_size = max_t(loff_t, new_size, offset + len);
1534 if (new_size > i_size_read(inode)) {
1535 if (mode & FALLOC_FL_KEEP_SIZE)
1536 file_set_keep_isize(inode);
1538 f2fs_i_size_write(inode, new_size);
1543 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1545 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1546 pgoff_t nr, pg_start, pg_end, delta, idx;
1550 new_size = i_size_read(inode) + len;
1551 ret = inode_newsize_ok(inode, new_size);
1555 if (offset >= i_size_read(inode))
1558 /* insert range should be aligned to block size of f2fs. */
1559 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1562 ret = f2fs_convert_inline_inode(inode);
1566 f2fs_balance_fs(sbi, true);
1568 down_write(&F2FS_I(inode)->i_mmap_sem);
1569 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1570 up_write(&F2FS_I(inode)->i_mmap_sem);
1574 /* write out all dirty pages from offset */
1575 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1579 pg_start = offset >> PAGE_SHIFT;
1580 pg_end = (offset + len) >> PAGE_SHIFT;
1581 delta = pg_end - pg_start;
1582 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1584 /* avoid gc operation during block exchange */
1585 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1586 down_write(&F2FS_I(inode)->i_mmap_sem);
1587 truncate_pagecache(inode, offset);
1589 while (!ret && idx > pg_start) {
1590 nr = idx - pg_start;
1596 f2fs_drop_extent_tree(inode);
1598 ret = __exchange_data_block(inode, inode, idx,
1599 idx + delta, nr, false);
1600 f2fs_unlock_op(sbi);
1602 up_write(&F2FS_I(inode)->i_mmap_sem);
1603 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1605 /* write out all moved pages, if possible */
1606 down_write(&F2FS_I(inode)->i_mmap_sem);
1607 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1608 truncate_pagecache(inode, offset);
1609 up_write(&F2FS_I(inode)->i_mmap_sem);
1612 f2fs_i_size_write(inode, new_size);
1616 static int expand_inode_data(struct inode *inode, loff_t offset,
1617 loff_t len, int mode)
1619 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1620 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1621 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1622 .m_may_create = true };
1623 pgoff_t pg_start, pg_end;
1624 loff_t new_size = i_size_read(inode);
1626 block_t expanded = 0;
1629 err = inode_newsize_ok(inode, (len + offset));
1633 err = f2fs_convert_inline_inode(inode);
1637 f2fs_balance_fs(sbi, true);
1639 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1640 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1641 off_end = (offset + len) & (PAGE_SIZE - 1);
1643 map.m_lblk = pg_start;
1644 map.m_len = pg_end - pg_start;
1651 if (f2fs_is_pinned_file(inode)) {
1652 block_t sec_blks = BLKS_PER_SEC(sbi);
1653 block_t sec_len = roundup(map.m_len, sec_blks);
1655 map.m_len = sec_blks;
1657 if (has_not_enough_free_secs(sbi, 0,
1658 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1659 down_write(&sbi->gc_lock);
1660 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1661 if (err && err != -ENODATA && err != -EAGAIN)
1665 down_write(&sbi->pin_sem);
1668 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1669 f2fs_unlock_op(sbi);
1671 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1672 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1674 up_write(&sbi->pin_sem);
1676 expanded += map.m_len;
1677 sec_len -= map.m_len;
1678 map.m_lblk += map.m_len;
1679 if (!err && sec_len)
1682 map.m_len = expanded;
1684 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1685 expanded = map.m_len;
1694 last_off = pg_start + expanded - 1;
1696 /* update new size to the failed position */
1697 new_size = (last_off == pg_end) ? offset + len :
1698 (loff_t)(last_off + 1) << PAGE_SHIFT;
1700 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1703 if (new_size > i_size_read(inode)) {
1704 if (mode & FALLOC_FL_KEEP_SIZE)
1705 file_set_keep_isize(inode);
1707 f2fs_i_size_write(inode, new_size);
1713 static long f2fs_fallocate(struct file *file, int mode,
1714 loff_t offset, loff_t len)
1716 struct inode *inode = file_inode(file);
1719 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1721 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1723 if (!f2fs_is_compress_backend_ready(inode))
1726 /* f2fs only support ->fallocate for regular file */
1727 if (!S_ISREG(inode->i_mode))
1730 if (IS_ENCRYPTED(inode) &&
1731 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1734 if (f2fs_compressed_file(inode) &&
1735 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1736 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1739 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1740 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1741 FALLOC_FL_INSERT_RANGE))
1746 if (mode & FALLOC_FL_PUNCH_HOLE) {
1747 if (offset >= inode->i_size)
1750 ret = punch_hole(inode, offset, len);
1751 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1752 ret = f2fs_collapse_range(inode, offset, len);
1753 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1754 ret = f2fs_zero_range(inode, offset, len, mode);
1755 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1756 ret = f2fs_insert_range(inode, offset, len);
1758 ret = expand_inode_data(inode, offset, len, mode);
1762 inode->i_mtime = inode->i_ctime = current_time(inode);
1763 f2fs_mark_inode_dirty_sync(inode, false);
1764 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1768 inode_unlock(inode);
1770 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1774 static int f2fs_release_file(struct inode *inode, struct file *filp)
1777 * f2fs_relase_file is called at every close calls. So we should
1778 * not drop any inmemory pages by close called by other process.
1780 if (!(filp->f_mode & FMODE_WRITE) ||
1781 atomic_read(&inode->i_writecount) != 1)
1784 /* some remained atomic pages should discarded */
1785 if (f2fs_is_atomic_file(inode))
1786 f2fs_drop_inmem_pages(inode);
1787 if (f2fs_is_volatile_file(inode)) {
1788 set_inode_flag(inode, FI_DROP_CACHE);
1789 filemap_fdatawrite(inode->i_mapping);
1790 clear_inode_flag(inode, FI_DROP_CACHE);
1791 clear_inode_flag(inode, FI_VOLATILE_FILE);
1792 stat_dec_volatile_write(inode);
1797 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1799 struct inode *inode = file_inode(file);
1802 * If the process doing a transaction is crashed, we should do
1803 * roll-back. Otherwise, other reader/write can see corrupted database
1804 * until all the writers close its file. Since this should be done
1805 * before dropping file lock, it needs to do in ->flush.
1807 if (f2fs_is_atomic_file(inode) &&
1808 F2FS_I(inode)->inmem_task == current)
1809 f2fs_drop_inmem_pages(inode);
1813 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1815 struct f2fs_inode_info *fi = F2FS_I(inode);
1816 u32 masked_flags = fi->i_flags & mask;
1818 /* mask can be shrunk by flags_valid selector */
1821 /* Is it quota file? Do not allow user to mess with it */
1822 if (IS_NOQUOTA(inode))
1825 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1826 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1828 if (!f2fs_empty_dir(inode))
1832 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1833 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1835 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1839 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1840 if (masked_flags & F2FS_COMPR_FL) {
1841 if (!f2fs_disable_compressed_file(inode))
1844 if (iflags & F2FS_NOCOMP_FL)
1846 if (iflags & F2FS_COMPR_FL) {
1847 if (!f2fs_may_compress(inode))
1849 if (S_ISREG(inode->i_mode) && inode->i_size)
1852 set_compress_context(inode);
1855 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1856 if (masked_flags & F2FS_COMPR_FL)
1860 fi->i_flags = iflags | (fi->i_flags & ~mask);
1861 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1862 (fi->i_flags & F2FS_NOCOMP_FL));
1864 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1865 set_inode_flag(inode, FI_PROJ_INHERIT);
1867 clear_inode_flag(inode, FI_PROJ_INHERIT);
1869 inode->i_ctime = current_time(inode);
1870 f2fs_set_inode_flags(inode);
1871 f2fs_mark_inode_dirty_sync(inode, true);
1875 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1878 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1879 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1880 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1881 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1883 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1884 * FS_IOC_FSSETXATTR is done by the VFS.
1887 static const struct {
1890 } f2fs_fsflags_map[] = {
1891 { F2FS_COMPR_FL, FS_COMPR_FL },
1892 { F2FS_SYNC_FL, FS_SYNC_FL },
1893 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1894 { F2FS_APPEND_FL, FS_APPEND_FL },
1895 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1896 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1897 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1898 { F2FS_INDEX_FL, FS_INDEX_FL },
1899 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1900 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1901 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1904 #define F2FS_GETTABLE_FS_FL ( \
1914 FS_PROJINHERIT_FL | \
1916 FS_INLINE_DATA_FL | \
1921 #define F2FS_SETTABLE_FS_FL ( \
1930 FS_PROJINHERIT_FL | \
1933 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1934 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1939 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1940 if (iflags & f2fs_fsflags_map[i].iflag)
1941 fsflags |= f2fs_fsflags_map[i].fsflag;
1946 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1947 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1952 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1953 if (fsflags & f2fs_fsflags_map[i].fsflag)
1954 iflags |= f2fs_fsflags_map[i].iflag;
1959 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1961 struct inode *inode = file_inode(filp);
1963 return put_user(inode->i_generation, (int __user *)arg);
1966 static int f2fs_ioc_start_atomic_write(struct file *filp)
1968 struct inode *inode = file_inode(filp);
1969 struct f2fs_inode_info *fi = F2FS_I(inode);
1970 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1973 if (!inode_owner_or_capable(&init_user_ns, inode))
1976 if (!S_ISREG(inode->i_mode))
1979 if (filp->f_flags & O_DIRECT)
1982 ret = mnt_want_write_file(filp);
1988 f2fs_disable_compressed_file(inode);
1990 if (f2fs_is_atomic_file(inode)) {
1991 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1996 ret = f2fs_convert_inline_inode(inode);
2000 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2003 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2004 * f2fs_is_atomic_file.
2006 if (get_dirty_pages(inode))
2007 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2008 inode->i_ino, get_dirty_pages(inode));
2009 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2011 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2015 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2016 if (list_empty(&fi->inmem_ilist))
2017 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2018 sbi->atomic_files++;
2019 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2021 /* add inode in inmem_list first and set atomic_file */
2022 set_inode_flag(inode, FI_ATOMIC_FILE);
2023 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2024 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2026 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2027 F2FS_I(inode)->inmem_task = current;
2028 stat_update_max_atomic_write(inode);
2030 inode_unlock(inode);
2031 mnt_drop_write_file(filp);
2035 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2037 struct inode *inode = file_inode(filp);
2040 if (!inode_owner_or_capable(&init_user_ns, inode))
2043 ret = mnt_want_write_file(filp);
2047 f2fs_balance_fs(F2FS_I_SB(inode), true);
2051 if (f2fs_is_volatile_file(inode)) {
2056 if (f2fs_is_atomic_file(inode)) {
2057 ret = f2fs_commit_inmem_pages(inode);
2061 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2063 f2fs_drop_inmem_pages(inode);
2065 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2068 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2069 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2072 inode_unlock(inode);
2073 mnt_drop_write_file(filp);
2077 static int f2fs_ioc_start_volatile_write(struct file *filp)
2079 struct inode *inode = file_inode(filp);
2082 if (!inode_owner_or_capable(&init_user_ns, inode))
2085 if (!S_ISREG(inode->i_mode))
2088 ret = mnt_want_write_file(filp);
2094 if (f2fs_is_volatile_file(inode))
2097 ret = f2fs_convert_inline_inode(inode);
2101 stat_inc_volatile_write(inode);
2102 stat_update_max_volatile_write(inode);
2104 set_inode_flag(inode, FI_VOLATILE_FILE);
2105 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2107 inode_unlock(inode);
2108 mnt_drop_write_file(filp);
2112 static int f2fs_ioc_release_volatile_write(struct file *filp)
2114 struct inode *inode = file_inode(filp);
2117 if (!inode_owner_or_capable(&init_user_ns, inode))
2120 ret = mnt_want_write_file(filp);
2126 if (!f2fs_is_volatile_file(inode))
2129 if (!f2fs_is_first_block_written(inode)) {
2130 ret = truncate_partial_data_page(inode, 0, true);
2134 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2136 inode_unlock(inode);
2137 mnt_drop_write_file(filp);
2141 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2143 struct inode *inode = file_inode(filp);
2146 if (!inode_owner_or_capable(&init_user_ns, inode))
2149 ret = mnt_want_write_file(filp);
2155 if (f2fs_is_atomic_file(inode))
2156 f2fs_drop_inmem_pages(inode);
2157 if (f2fs_is_volatile_file(inode)) {
2158 clear_inode_flag(inode, FI_VOLATILE_FILE);
2159 stat_dec_volatile_write(inode);
2160 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2163 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2165 inode_unlock(inode);
2167 mnt_drop_write_file(filp);
2168 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2172 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2174 struct inode *inode = file_inode(filp);
2175 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2176 struct super_block *sb = sbi->sb;
2180 if (!capable(CAP_SYS_ADMIN))
2183 if (get_user(in, (__u32 __user *)arg))
2186 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2187 ret = mnt_want_write_file(filp);
2189 if (ret == -EROFS) {
2191 f2fs_stop_checkpoint(sbi, false);
2192 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2193 trace_f2fs_shutdown(sbi, in, ret);
2200 case F2FS_GOING_DOWN_FULLSYNC:
2201 ret = freeze_bdev(sb->s_bdev);
2204 f2fs_stop_checkpoint(sbi, false);
2205 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2206 thaw_bdev(sb->s_bdev);
2208 case F2FS_GOING_DOWN_METASYNC:
2209 /* do checkpoint only */
2210 ret = f2fs_sync_fs(sb, 1);
2213 f2fs_stop_checkpoint(sbi, false);
2214 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2216 case F2FS_GOING_DOWN_NOSYNC:
2217 f2fs_stop_checkpoint(sbi, false);
2218 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2220 case F2FS_GOING_DOWN_METAFLUSH:
2221 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2222 f2fs_stop_checkpoint(sbi, false);
2223 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2225 case F2FS_GOING_DOWN_NEED_FSCK:
2226 set_sbi_flag(sbi, SBI_NEED_FSCK);
2227 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2228 set_sbi_flag(sbi, SBI_IS_DIRTY);
2229 /* do checkpoint only */
2230 ret = f2fs_sync_fs(sb, 1);
2237 f2fs_stop_gc_thread(sbi);
2238 f2fs_stop_discard_thread(sbi);
2240 f2fs_drop_discard_cmd(sbi);
2241 clear_opt(sbi, DISCARD);
2243 f2fs_update_time(sbi, REQ_TIME);
2245 if (in != F2FS_GOING_DOWN_FULLSYNC)
2246 mnt_drop_write_file(filp);
2248 trace_f2fs_shutdown(sbi, in, ret);
2253 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2255 struct inode *inode = file_inode(filp);
2256 struct super_block *sb = inode->i_sb;
2257 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2258 struct fstrim_range range;
2261 if (!capable(CAP_SYS_ADMIN))
2264 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2267 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2271 ret = mnt_want_write_file(filp);
2275 range.minlen = max((unsigned int)range.minlen,
2276 q->limits.discard_granularity);
2277 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2278 mnt_drop_write_file(filp);
2282 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2285 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2289 static bool uuid_is_nonzero(__u8 u[16])
2293 for (i = 0; i < 16; i++)
2299 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2301 struct inode *inode = file_inode(filp);
2303 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2306 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2308 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2311 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2313 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2315 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2318 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2320 struct inode *inode = file_inode(filp);
2321 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2324 if (!f2fs_sb_has_encrypt(sbi))
2327 err = mnt_want_write_file(filp);
2331 down_write(&sbi->sb_lock);
2333 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2336 /* update superblock with uuid */
2337 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2339 err = f2fs_commit_super(sbi, false);
2342 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2346 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2350 up_write(&sbi->sb_lock);
2351 mnt_drop_write_file(filp);
2355 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2358 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2361 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2364 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2366 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2369 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2372 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2374 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2377 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2380 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2383 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2386 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2389 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2392 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2395 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2398 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2400 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2403 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2406 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2408 struct inode *inode = file_inode(filp);
2409 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2413 if (!capable(CAP_SYS_ADMIN))
2416 if (get_user(sync, (__u32 __user *)arg))
2419 if (f2fs_readonly(sbi->sb))
2422 ret = mnt_want_write_file(filp);
2427 if (!down_write_trylock(&sbi->gc_lock)) {
2432 down_write(&sbi->gc_lock);
2435 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2437 mnt_drop_write_file(filp);
2441 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2443 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2447 if (!capable(CAP_SYS_ADMIN))
2449 if (f2fs_readonly(sbi->sb))
2452 end = range->start + range->len;
2453 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2454 end >= MAX_BLKADDR(sbi))
2457 ret = mnt_want_write_file(filp);
2463 if (!down_write_trylock(&sbi->gc_lock)) {
2468 down_write(&sbi->gc_lock);
2471 ret = f2fs_gc(sbi, range->sync, true, false,
2472 GET_SEGNO(sbi, range->start));
2478 range->start += BLKS_PER_SEC(sbi);
2479 if (range->start <= end)
2482 mnt_drop_write_file(filp);
2486 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2488 struct f2fs_gc_range range;
2490 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2493 return __f2fs_ioc_gc_range(filp, &range);
2496 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2498 struct inode *inode = file_inode(filp);
2499 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2502 if (!capable(CAP_SYS_ADMIN))
2505 if (f2fs_readonly(sbi->sb))
2508 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2509 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2513 ret = mnt_want_write_file(filp);
2517 ret = f2fs_sync_fs(sbi->sb, 1);
2519 mnt_drop_write_file(filp);
2523 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2525 struct f2fs_defragment *range)
2527 struct inode *inode = file_inode(filp);
2528 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2529 .m_seg_type = NO_CHECK_TYPE,
2530 .m_may_create = false };
2531 struct extent_info ei = {0, 0, 0};
2532 pgoff_t pg_start, pg_end, next_pgofs;
2533 unsigned int blk_per_seg = sbi->blocks_per_seg;
2534 unsigned int total = 0, sec_num;
2535 block_t blk_end = 0;
2536 bool fragmented = false;
2539 /* if in-place-update policy is enabled, don't waste time here */
2540 if (f2fs_should_update_inplace(inode, NULL))
2543 pg_start = range->start >> PAGE_SHIFT;
2544 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2546 f2fs_balance_fs(sbi, true);
2550 /* writeback all dirty pages in the range */
2551 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2552 range->start + range->len - 1);
2557 * lookup mapping info in extent cache, skip defragmenting if physical
2558 * block addresses are continuous.
2560 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2561 if (ei.fofs + ei.len >= pg_end)
2565 map.m_lblk = pg_start;
2566 map.m_next_pgofs = &next_pgofs;
2569 * lookup mapping info in dnode page cache, skip defragmenting if all
2570 * physical block addresses are continuous even if there are hole(s)
2571 * in logical blocks.
2573 while (map.m_lblk < pg_end) {
2574 map.m_len = pg_end - map.m_lblk;
2575 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2579 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2580 map.m_lblk = next_pgofs;
2584 if (blk_end && blk_end != map.m_pblk)
2587 /* record total count of block that we're going to move */
2590 blk_end = map.m_pblk + map.m_len;
2592 map.m_lblk += map.m_len;
2600 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2603 * make sure there are enough free section for LFS allocation, this can
2604 * avoid defragment running in SSR mode when free section are allocated
2607 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2612 map.m_lblk = pg_start;
2613 map.m_len = pg_end - pg_start;
2616 while (map.m_lblk < pg_end) {
2621 map.m_len = pg_end - map.m_lblk;
2622 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2626 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2627 map.m_lblk = next_pgofs;
2631 set_inode_flag(inode, FI_DO_DEFRAG);
2634 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2637 page = f2fs_get_lock_data_page(inode, idx, true);
2639 err = PTR_ERR(page);
2643 set_page_dirty(page);
2644 f2fs_put_page(page, 1);
2653 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2656 clear_inode_flag(inode, FI_DO_DEFRAG);
2658 err = filemap_fdatawrite(inode->i_mapping);
2663 clear_inode_flag(inode, FI_DO_DEFRAG);
2665 inode_unlock(inode);
2667 range->len = (u64)total << PAGE_SHIFT;
2671 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2673 struct inode *inode = file_inode(filp);
2674 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2675 struct f2fs_defragment range;
2678 if (!capable(CAP_SYS_ADMIN))
2681 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2684 if (f2fs_readonly(sbi->sb))
2687 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2691 /* verify alignment of offset & size */
2692 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2695 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2696 max_file_blocks(inode)))
2699 err = mnt_want_write_file(filp);
2703 err = f2fs_defragment_range(sbi, filp, &range);
2704 mnt_drop_write_file(filp);
2706 f2fs_update_time(sbi, REQ_TIME);
2710 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2717 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2718 struct file *file_out, loff_t pos_out, size_t len)
2720 struct inode *src = file_inode(file_in);
2721 struct inode *dst = file_inode(file_out);
2722 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2723 size_t olen = len, dst_max_i_size = 0;
2727 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2728 src->i_sb != dst->i_sb)
2731 if (unlikely(f2fs_readonly(src->i_sb)))
2734 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2737 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2740 if (pos_out < 0 || pos_in < 0)
2744 if (pos_in == pos_out)
2746 if (pos_out > pos_in && pos_out < pos_in + len)
2753 if (!inode_trylock(dst))
2758 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2761 olen = len = src->i_size - pos_in;
2762 if (pos_in + len == src->i_size)
2763 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2769 dst_osize = dst->i_size;
2770 if (pos_out + olen > dst->i_size)
2771 dst_max_i_size = pos_out + olen;
2773 /* verify the end result is block aligned */
2774 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2775 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2776 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2779 ret = f2fs_convert_inline_inode(src);
2783 ret = f2fs_convert_inline_inode(dst);
2787 /* write out all dirty pages from offset */
2788 ret = filemap_write_and_wait_range(src->i_mapping,
2789 pos_in, pos_in + len);
2793 ret = filemap_write_and_wait_range(dst->i_mapping,
2794 pos_out, pos_out + len);
2798 f2fs_balance_fs(sbi, true);
2800 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2803 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2808 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2809 pos_out >> F2FS_BLKSIZE_BITS,
2810 len >> F2FS_BLKSIZE_BITS, false);
2814 f2fs_i_size_write(dst, dst_max_i_size);
2815 else if (dst_osize != dst->i_size)
2816 f2fs_i_size_write(dst, dst_osize);
2818 f2fs_unlock_op(sbi);
2821 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2823 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2832 static int __f2fs_ioc_move_range(struct file *filp,
2833 struct f2fs_move_range *range)
2838 if (!(filp->f_mode & FMODE_READ) ||
2839 !(filp->f_mode & FMODE_WRITE))
2842 dst = fdget(range->dst_fd);
2846 if (!(dst.file->f_mode & FMODE_WRITE)) {
2851 err = mnt_want_write_file(filp);
2855 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2856 range->pos_out, range->len);
2858 mnt_drop_write_file(filp);
2864 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2866 struct f2fs_move_range range;
2868 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2871 return __f2fs_ioc_move_range(filp, &range);
2874 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2876 struct inode *inode = file_inode(filp);
2877 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2878 struct sit_info *sm = SIT_I(sbi);
2879 unsigned int start_segno = 0, end_segno = 0;
2880 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2881 struct f2fs_flush_device range;
2884 if (!capable(CAP_SYS_ADMIN))
2887 if (f2fs_readonly(sbi->sb))
2890 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2893 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2897 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2898 __is_large_section(sbi)) {
2899 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2900 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2904 ret = mnt_want_write_file(filp);
2908 if (range.dev_num != 0)
2909 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2910 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2912 start_segno = sm->last_victim[FLUSH_DEVICE];
2913 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2914 start_segno = dev_start_segno;
2915 end_segno = min(start_segno + range.segments, dev_end_segno);
2917 while (start_segno < end_segno) {
2918 if (!down_write_trylock(&sbi->gc_lock)) {
2922 sm->last_victim[GC_CB] = end_segno + 1;
2923 sm->last_victim[GC_GREEDY] = end_segno + 1;
2924 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2925 ret = f2fs_gc(sbi, true, true, true, start_segno);
2933 mnt_drop_write_file(filp);
2937 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2939 struct inode *inode = file_inode(filp);
2940 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2942 /* Must validate to set it with SQLite behavior in Android. */
2943 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2945 return put_user(sb_feature, (u32 __user *)arg);
2949 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2951 struct dquot *transfer_to[MAXQUOTAS] = {};
2952 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2953 struct super_block *sb = sbi->sb;
2956 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2957 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2958 err = __dquot_transfer(inode, transfer_to);
2960 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2961 dqput(transfer_to[PRJQUOTA]);
2966 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2968 struct f2fs_inode_info *fi = F2FS_I(inode);
2969 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2974 if (!f2fs_sb_has_project_quota(sbi)) {
2975 if (projid != F2FS_DEF_PROJID)
2981 if (!f2fs_has_extra_attr(inode))
2984 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2986 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2990 /* Is it quota file? Do not allow user to mess with it */
2991 if (IS_NOQUOTA(inode))
2994 ipage = f2fs_get_node_page(sbi, inode->i_ino);
2996 return PTR_ERR(ipage);
2998 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3001 f2fs_put_page(ipage, 1);
3004 f2fs_put_page(ipage, 1);
3006 err = dquot_initialize(inode);
3011 err = f2fs_transfer_project_quota(inode, kprojid);
3015 F2FS_I(inode)->i_projid = kprojid;
3016 inode->i_ctime = current_time(inode);
3017 f2fs_mark_inode_dirty_sync(inode, true);
3019 f2fs_unlock_op(sbi);
3023 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3028 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3030 if (projid != F2FS_DEF_PROJID)
3036 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3038 struct inode *inode = d_inode(dentry);
3039 struct f2fs_inode_info *fi = F2FS_I(inode);
3040 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3042 if (IS_ENCRYPTED(inode))
3043 fsflags |= FS_ENCRYPT_FL;
3044 if (IS_VERITY(inode))
3045 fsflags |= FS_VERITY_FL;
3046 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3047 fsflags |= FS_INLINE_DATA_FL;
3048 if (is_inode_flag_set(inode, FI_PIN_FILE))
3049 fsflags |= FS_NOCOW_FL;
3051 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3053 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3054 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3059 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3060 struct dentry *dentry, struct fileattr *fa)
3062 struct inode *inode = d_inode(dentry);
3063 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3067 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3069 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3071 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3073 fsflags &= F2FS_SETTABLE_FS_FL;
3074 if (!fa->flags_valid)
3075 mask &= FS_COMMON_FL;
3077 iflags = f2fs_fsflags_to_iflags(fsflags);
3078 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3081 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3083 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3088 int f2fs_pin_file_control(struct inode *inode, bool inc)
3090 struct f2fs_inode_info *fi = F2FS_I(inode);
3091 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3093 /* Use i_gc_failures for normal file as a risk signal. */
3095 f2fs_i_gc_failures_write(inode,
3096 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3098 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3099 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3100 __func__, inode->i_ino,
3101 fi->i_gc_failures[GC_FAILURE_PIN]);
3102 clear_inode_flag(inode, FI_PIN_FILE);
3108 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3110 struct inode *inode = file_inode(filp);
3114 if (get_user(pin, (__u32 __user *)arg))
3117 if (!S_ISREG(inode->i_mode))
3120 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3123 ret = mnt_want_write_file(filp);
3129 if (f2fs_should_update_outplace(inode, NULL)) {
3135 clear_inode_flag(inode, FI_PIN_FILE);
3136 f2fs_i_gc_failures_write(inode, 0);
3140 if (f2fs_pin_file_control(inode, false)) {
3145 ret = f2fs_convert_inline_inode(inode);
3149 if (!f2fs_disable_compressed_file(inode)) {
3154 set_inode_flag(inode, FI_PIN_FILE);
3155 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3157 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3159 inode_unlock(inode);
3160 mnt_drop_write_file(filp);
3164 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3166 struct inode *inode = file_inode(filp);
3169 if (is_inode_flag_set(inode, FI_PIN_FILE))
3170 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3171 return put_user(pin, (u32 __user *)arg);
3174 int f2fs_precache_extents(struct inode *inode)
3176 struct f2fs_inode_info *fi = F2FS_I(inode);
3177 struct f2fs_map_blocks map;
3178 pgoff_t m_next_extent;
3182 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3186 map.m_next_pgofs = NULL;
3187 map.m_next_extent = &m_next_extent;
3188 map.m_seg_type = NO_CHECK_TYPE;
3189 map.m_may_create = false;
3190 end = max_file_blocks(inode);
3192 while (map.m_lblk < end) {
3193 map.m_len = end - map.m_lblk;
3195 down_write(&fi->i_gc_rwsem[WRITE]);
3196 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3197 up_write(&fi->i_gc_rwsem[WRITE]);
3201 map.m_lblk = m_next_extent;
3207 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3209 return f2fs_precache_extents(file_inode(filp));
3212 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3214 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3217 if (!capable(CAP_SYS_ADMIN))
3220 if (f2fs_readonly(sbi->sb))
3223 if (copy_from_user(&block_count, (void __user *)arg,
3224 sizeof(block_count)))
3227 return f2fs_resize_fs(sbi, block_count);
3230 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3232 struct inode *inode = file_inode(filp);
3234 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3236 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3237 f2fs_warn(F2FS_I_SB(inode),
3238 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3243 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3246 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3248 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3251 return fsverity_ioctl_measure(filp, (void __user *)arg);
3254 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3256 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3259 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3262 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3264 struct inode *inode = file_inode(filp);
3265 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3270 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3274 down_read(&sbi->sb_lock);
3275 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3276 ARRAY_SIZE(sbi->raw_super->volume_name),
3277 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3278 up_read(&sbi->sb_lock);
3280 if (copy_to_user((char __user *)arg, vbuf,
3281 min(FSLABEL_MAX, count)))
3288 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3290 struct inode *inode = file_inode(filp);
3291 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3295 if (!capable(CAP_SYS_ADMIN))
3298 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3300 return PTR_ERR(vbuf);
3302 err = mnt_want_write_file(filp);
3306 down_write(&sbi->sb_lock);
3308 memset(sbi->raw_super->volume_name, 0,
3309 sizeof(sbi->raw_super->volume_name));
3310 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3311 sbi->raw_super->volume_name,
3312 ARRAY_SIZE(sbi->raw_super->volume_name));
3314 err = f2fs_commit_super(sbi, false);
3316 up_write(&sbi->sb_lock);
3318 mnt_drop_write_file(filp);
3324 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3326 struct inode *inode = file_inode(filp);
3329 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3332 if (!f2fs_compressed_file(inode))
3335 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3336 return put_user(blocks, (u64 __user *)arg);
3339 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3341 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3342 unsigned int released_blocks = 0;
3343 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3347 for (i = 0; i < count; i++) {
3348 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3349 dn->ofs_in_node + i);
3351 if (!__is_valid_data_blkaddr(blkaddr))
3353 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3354 DATA_GENERIC_ENHANCE)))
3355 return -EFSCORRUPTED;
3359 int compr_blocks = 0;
3361 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3362 blkaddr = f2fs_data_blkaddr(dn);
3365 if (blkaddr == COMPRESS_ADDR)
3367 dn->ofs_in_node += cluster_size;
3371 if (__is_valid_data_blkaddr(blkaddr))
3374 if (blkaddr != NEW_ADDR)
3377 dn->data_blkaddr = NULL_ADDR;
3378 f2fs_set_data_blkaddr(dn);
3381 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3382 dec_valid_block_count(sbi, dn->inode,
3383 cluster_size - compr_blocks);
3385 released_blocks += cluster_size - compr_blocks;
3387 count -= cluster_size;
3390 return released_blocks;
3393 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3395 struct inode *inode = file_inode(filp);
3396 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3397 pgoff_t page_idx = 0, last_idx;
3398 unsigned int released_blocks = 0;
3402 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3405 if (!f2fs_compressed_file(inode))
3408 if (f2fs_readonly(sbi->sb))
3411 ret = mnt_want_write_file(filp);
3415 f2fs_balance_fs(F2FS_I_SB(inode), true);
3419 writecount = atomic_read(&inode->i_writecount);
3420 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3421 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3426 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3431 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3435 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3436 inode->i_ctime = current_time(inode);
3437 f2fs_mark_inode_dirty_sync(inode, true);
3439 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3442 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3443 down_write(&F2FS_I(inode)->i_mmap_sem);
3445 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3447 while (page_idx < last_idx) {
3448 struct dnode_of_data dn;
3449 pgoff_t end_offset, count;
3451 set_new_dnode(&dn, inode, NULL, NULL, 0);
3452 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3454 if (ret == -ENOENT) {
3455 page_idx = f2fs_get_next_page_offset(&dn,
3463 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3464 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3465 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3467 ret = release_compress_blocks(&dn, count);
3469 f2fs_put_dnode(&dn);
3475 released_blocks += ret;
3478 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3479 up_write(&F2FS_I(inode)->i_mmap_sem);
3481 inode_unlock(inode);
3483 mnt_drop_write_file(filp);
3486 ret = put_user(released_blocks, (u64 __user *)arg);
3487 } else if (released_blocks &&
3488 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3489 set_sbi_flag(sbi, SBI_NEED_FSCK);
3490 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3491 "iblocks=%llu, released=%u, compr_blocks=%u, "
3493 __func__, inode->i_ino, inode->i_blocks,
3495 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3501 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3503 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3504 unsigned int reserved_blocks = 0;
3505 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3509 for (i = 0; i < count; i++) {
3510 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3511 dn->ofs_in_node + i);
3513 if (!__is_valid_data_blkaddr(blkaddr))
3515 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3516 DATA_GENERIC_ENHANCE)))
3517 return -EFSCORRUPTED;
3521 int compr_blocks = 0;
3525 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3526 blkaddr = f2fs_data_blkaddr(dn);
3529 if (blkaddr == COMPRESS_ADDR)
3531 dn->ofs_in_node += cluster_size;
3535 if (__is_valid_data_blkaddr(blkaddr)) {
3540 dn->data_blkaddr = NEW_ADDR;
3541 f2fs_set_data_blkaddr(dn);
3544 reserved = cluster_size - compr_blocks;
3545 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3549 if (reserved != cluster_size - compr_blocks)
3552 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3554 reserved_blocks += reserved;
3556 count -= cluster_size;
3559 return reserved_blocks;
3562 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3564 struct inode *inode = file_inode(filp);
3565 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3566 pgoff_t page_idx = 0, last_idx;
3567 unsigned int reserved_blocks = 0;
3570 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3573 if (!f2fs_compressed_file(inode))
3576 if (f2fs_readonly(sbi->sb))
3579 ret = mnt_want_write_file(filp);
3583 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3586 f2fs_balance_fs(F2FS_I_SB(inode), true);
3590 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3595 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3596 down_write(&F2FS_I(inode)->i_mmap_sem);
3598 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3600 while (page_idx < last_idx) {
3601 struct dnode_of_data dn;
3602 pgoff_t end_offset, count;
3604 set_new_dnode(&dn, inode, NULL, NULL, 0);
3605 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3607 if (ret == -ENOENT) {
3608 page_idx = f2fs_get_next_page_offset(&dn,
3616 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3617 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3618 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3620 ret = reserve_compress_blocks(&dn, count);
3622 f2fs_put_dnode(&dn);
3628 reserved_blocks += ret;
3631 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3632 up_write(&F2FS_I(inode)->i_mmap_sem);
3635 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3636 inode->i_ctime = current_time(inode);
3637 f2fs_mark_inode_dirty_sync(inode, true);
3640 inode_unlock(inode);
3642 mnt_drop_write_file(filp);
3645 ret = put_user(reserved_blocks, (u64 __user *)arg);
3646 } else if (reserved_blocks &&
3647 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3648 set_sbi_flag(sbi, SBI_NEED_FSCK);
3649 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3650 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3652 __func__, inode->i_ino, inode->i_blocks,
3654 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3660 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3661 pgoff_t off, block_t block, block_t len, u32 flags)
3663 struct request_queue *q = bdev_get_queue(bdev);
3664 sector_t sector = SECTOR_FROM_BLOCK(block);
3665 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3671 if (flags & F2FS_TRIM_FILE_DISCARD)
3672 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3673 blk_queue_secure_erase(q) ?
3674 BLKDEV_DISCARD_SECURE : 0);
3676 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3677 if (IS_ENCRYPTED(inode))
3678 ret = fscrypt_zeroout_range(inode, off, block, len);
3680 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3687 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3689 struct inode *inode = file_inode(filp);
3690 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3691 struct address_space *mapping = inode->i_mapping;
3692 struct block_device *prev_bdev = NULL;
3693 struct f2fs_sectrim_range range;
3694 pgoff_t index, pg_end, prev_index = 0;
3695 block_t prev_block = 0, len = 0;
3697 bool to_end = false;
3700 if (!(filp->f_mode & FMODE_WRITE))
3703 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3707 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3708 !S_ISREG(inode->i_mode))
3711 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3712 !f2fs_hw_support_discard(sbi)) ||
3713 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3714 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3717 file_start_write(filp);
3720 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3721 range.start >= inode->i_size) {
3729 if (inode->i_size - range.start > range.len) {
3730 end_addr = range.start + range.len;
3732 end_addr = range.len == (u64)-1 ?
3733 sbi->sb->s_maxbytes : inode->i_size;
3737 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3738 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3743 index = F2FS_BYTES_TO_BLK(range.start);
3744 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3746 ret = f2fs_convert_inline_inode(inode);
3750 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3751 down_write(&F2FS_I(inode)->i_mmap_sem);
3753 ret = filemap_write_and_wait_range(mapping, range.start,
3754 to_end ? LLONG_MAX : end_addr - 1);
3758 truncate_inode_pages_range(mapping, range.start,
3759 to_end ? -1 : end_addr - 1);
3761 while (index < pg_end) {
3762 struct dnode_of_data dn;
3763 pgoff_t end_offset, count;
3766 set_new_dnode(&dn, inode, NULL, NULL, 0);
3767 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3769 if (ret == -ENOENT) {
3770 index = f2fs_get_next_page_offset(&dn, index);
3776 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3777 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3778 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3779 struct block_device *cur_bdev;
3780 block_t blkaddr = f2fs_data_blkaddr(&dn);
3782 if (!__is_valid_data_blkaddr(blkaddr))
3785 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3786 DATA_GENERIC_ENHANCE)) {
3787 ret = -EFSCORRUPTED;
3788 f2fs_put_dnode(&dn);
3792 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3793 if (f2fs_is_multi_device(sbi)) {
3794 int di = f2fs_target_device_index(sbi, blkaddr);
3796 blkaddr -= FDEV(di).start_blk;
3800 if (prev_bdev == cur_bdev &&
3801 index == prev_index + len &&
3802 blkaddr == prev_block + len) {
3805 ret = f2fs_secure_erase(prev_bdev,
3806 inode, prev_index, prev_block,
3809 f2fs_put_dnode(&dn);
3818 prev_bdev = cur_bdev;
3820 prev_block = blkaddr;
3825 f2fs_put_dnode(&dn);
3827 if (fatal_signal_pending(current)) {
3835 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3836 prev_block, len, range.flags);
3838 up_write(&F2FS_I(inode)->i_mmap_sem);
3839 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3841 inode_unlock(inode);
3842 file_end_write(filp);
3847 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3849 struct inode *inode = file_inode(filp);
3850 struct f2fs_comp_option option;
3852 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3855 inode_lock_shared(inode);
3857 if (!f2fs_compressed_file(inode)) {
3858 inode_unlock_shared(inode);
3862 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3863 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3865 inode_unlock_shared(inode);
3867 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3874 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3876 struct inode *inode = file_inode(filp);
3877 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3878 struct f2fs_comp_option option;
3881 if (!f2fs_sb_has_compression(sbi))
3884 if (!(filp->f_mode & FMODE_WRITE))
3887 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3891 if (!f2fs_compressed_file(inode) ||
3892 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3893 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3894 option.algorithm >= COMPRESS_MAX)
3897 file_start_write(filp);
3900 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3905 if (inode->i_size != 0) {
3910 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3911 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3912 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3913 f2fs_mark_inode_dirty_sync(inode, true);
3915 if (!f2fs_is_compress_backend_ready(inode))
3916 f2fs_warn(sbi, "compression algorithm is successfully set, "
3917 "but current kernel doesn't support this algorithm.");
3919 inode_unlock(inode);
3920 file_end_write(filp);
3925 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3927 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3928 struct address_space *mapping = inode->i_mapping;
3930 pgoff_t redirty_idx = page_idx;
3931 int i, page_len = 0, ret = 0;
3933 page_cache_ra_unbounded(&ractl, len, 0);
3935 for (i = 0; i < len; i++, page_idx++) {
3936 page = read_cache_page(mapping, page_idx, NULL, NULL);
3938 ret = PTR_ERR(page);
3944 for (i = 0; i < page_len; i++, redirty_idx++) {
3945 page = find_lock_page(mapping, redirty_idx);
3950 set_page_dirty(page);
3951 f2fs_put_page(page, 1);
3952 f2fs_put_page(page, 0);
3958 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3960 struct inode *inode = file_inode(filp);
3961 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3962 struct f2fs_inode_info *fi = F2FS_I(inode);
3963 pgoff_t page_idx = 0, last_idx;
3964 unsigned int blk_per_seg = sbi->blocks_per_seg;
3965 int cluster_size = F2FS_I(inode)->i_cluster_size;
3968 if (!f2fs_sb_has_compression(sbi) ||
3969 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3972 if (!(filp->f_mode & FMODE_WRITE))
3975 if (!f2fs_compressed_file(inode))
3978 f2fs_balance_fs(F2FS_I_SB(inode), true);
3980 file_start_write(filp);
3983 if (!f2fs_is_compress_backend_ready(inode)) {
3988 if (f2fs_is_mmap_file(inode)) {
3993 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3997 if (!atomic_read(&fi->i_compr_blocks))
4000 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4002 count = last_idx - page_idx;
4004 int len = min(cluster_size, count);
4006 ret = redirty_blocks(inode, page_idx, len);
4010 if (get_dirty_pages(inode) >= blk_per_seg)
4011 filemap_fdatawrite(inode->i_mapping);
4018 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4022 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4025 inode_unlock(inode);
4026 file_end_write(filp);
4031 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4033 struct inode *inode = file_inode(filp);
4034 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4035 pgoff_t page_idx = 0, last_idx;
4036 unsigned int blk_per_seg = sbi->blocks_per_seg;
4037 int cluster_size = F2FS_I(inode)->i_cluster_size;
4040 if (!f2fs_sb_has_compression(sbi) ||
4041 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4044 if (!(filp->f_mode & FMODE_WRITE))
4047 if (!f2fs_compressed_file(inode))
4050 f2fs_balance_fs(F2FS_I_SB(inode), true);
4052 file_start_write(filp);
4055 if (!f2fs_is_compress_backend_ready(inode)) {
4060 if (f2fs_is_mmap_file(inode)) {
4065 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4069 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4071 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4073 count = last_idx - page_idx;
4075 int len = min(cluster_size, count);
4077 ret = redirty_blocks(inode, page_idx, len);
4081 if (get_dirty_pages(inode) >= blk_per_seg)
4082 filemap_fdatawrite(inode->i_mapping);
4089 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4092 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4095 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4098 inode_unlock(inode);
4099 file_end_write(filp);
4104 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4107 case FS_IOC_GETVERSION:
4108 return f2fs_ioc_getversion(filp, arg);
4109 case F2FS_IOC_START_ATOMIC_WRITE:
4110 return f2fs_ioc_start_atomic_write(filp);
4111 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4112 return f2fs_ioc_commit_atomic_write(filp);
4113 case F2FS_IOC_START_VOLATILE_WRITE:
4114 return f2fs_ioc_start_volatile_write(filp);
4115 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4116 return f2fs_ioc_release_volatile_write(filp);
4117 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4118 return f2fs_ioc_abort_volatile_write(filp);
4119 case F2FS_IOC_SHUTDOWN:
4120 return f2fs_ioc_shutdown(filp, arg);
4122 return f2fs_ioc_fitrim(filp, arg);
4123 case FS_IOC_SET_ENCRYPTION_POLICY:
4124 return f2fs_ioc_set_encryption_policy(filp, arg);
4125 case FS_IOC_GET_ENCRYPTION_POLICY:
4126 return f2fs_ioc_get_encryption_policy(filp, arg);
4127 case FS_IOC_GET_ENCRYPTION_PWSALT:
4128 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4129 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4130 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4131 case FS_IOC_ADD_ENCRYPTION_KEY:
4132 return f2fs_ioc_add_encryption_key(filp, arg);
4133 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4134 return f2fs_ioc_remove_encryption_key(filp, arg);
4135 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4136 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4137 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4138 return f2fs_ioc_get_encryption_key_status(filp, arg);
4139 case FS_IOC_GET_ENCRYPTION_NONCE:
4140 return f2fs_ioc_get_encryption_nonce(filp, arg);
4141 case F2FS_IOC_GARBAGE_COLLECT:
4142 return f2fs_ioc_gc(filp, arg);
4143 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4144 return f2fs_ioc_gc_range(filp, arg);
4145 case F2FS_IOC_WRITE_CHECKPOINT:
4146 return f2fs_ioc_write_checkpoint(filp, arg);
4147 case F2FS_IOC_DEFRAGMENT:
4148 return f2fs_ioc_defragment(filp, arg);
4149 case F2FS_IOC_MOVE_RANGE:
4150 return f2fs_ioc_move_range(filp, arg);
4151 case F2FS_IOC_FLUSH_DEVICE:
4152 return f2fs_ioc_flush_device(filp, arg);
4153 case F2FS_IOC_GET_FEATURES:
4154 return f2fs_ioc_get_features(filp, arg);
4155 case F2FS_IOC_GET_PIN_FILE:
4156 return f2fs_ioc_get_pin_file(filp, arg);
4157 case F2FS_IOC_SET_PIN_FILE:
4158 return f2fs_ioc_set_pin_file(filp, arg);
4159 case F2FS_IOC_PRECACHE_EXTENTS:
4160 return f2fs_ioc_precache_extents(filp, arg);
4161 case F2FS_IOC_RESIZE_FS:
4162 return f2fs_ioc_resize_fs(filp, arg);
4163 case FS_IOC_ENABLE_VERITY:
4164 return f2fs_ioc_enable_verity(filp, arg);
4165 case FS_IOC_MEASURE_VERITY:
4166 return f2fs_ioc_measure_verity(filp, arg);
4167 case FS_IOC_READ_VERITY_METADATA:
4168 return f2fs_ioc_read_verity_metadata(filp, arg);
4169 case FS_IOC_GETFSLABEL:
4170 return f2fs_ioc_getfslabel(filp, arg);
4171 case FS_IOC_SETFSLABEL:
4172 return f2fs_ioc_setfslabel(filp, arg);
4173 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4174 return f2fs_get_compress_blocks(filp, arg);
4175 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4176 return f2fs_release_compress_blocks(filp, arg);
4177 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4178 return f2fs_reserve_compress_blocks(filp, arg);
4179 case F2FS_IOC_SEC_TRIM_FILE:
4180 return f2fs_sec_trim_file(filp, arg);
4181 case F2FS_IOC_GET_COMPRESS_OPTION:
4182 return f2fs_ioc_get_compress_option(filp, arg);
4183 case F2FS_IOC_SET_COMPRESS_OPTION:
4184 return f2fs_ioc_set_compress_option(filp, arg);
4185 case F2FS_IOC_DECOMPRESS_FILE:
4186 return f2fs_ioc_decompress_file(filp, arg);
4187 case F2FS_IOC_COMPRESS_FILE:
4188 return f2fs_ioc_compress_file(filp, arg);
4194 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4196 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4198 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4201 return __f2fs_ioctl(filp, cmd, arg);
4204 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4206 struct file *file = iocb->ki_filp;
4207 struct inode *inode = file_inode(file);
4210 if (!f2fs_is_compress_backend_ready(inode))
4213 ret = generic_file_read_iter(iocb, iter);
4216 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4221 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4223 struct file *file = iocb->ki_filp;
4224 struct inode *inode = file_inode(file);
4227 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4232 if (!f2fs_is_compress_backend_ready(inode)) {
4237 if (iocb->ki_flags & IOCB_NOWAIT) {
4238 if (!inode_trylock(inode)) {
4246 if (unlikely(IS_IMMUTABLE(inode))) {
4251 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4256 ret = generic_write_checks(iocb, from);
4258 bool preallocated = false;
4259 size_t target_size = 0;
4262 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4263 set_inode_flag(inode, FI_NO_PREALLOC);
4265 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4266 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4267 iov_iter_count(from)) ||
4268 f2fs_has_inline_data(inode) ||
4269 f2fs_force_buffered_io(inode, iocb, from)) {
4270 clear_inode_flag(inode, FI_NO_PREALLOC);
4271 inode_unlock(inode);
4278 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4281 if (iocb->ki_flags & IOCB_DIRECT) {
4283 * Convert inline data for Direct I/O before entering
4286 err = f2fs_convert_inline_inode(inode);
4290 * If force_buffere_io() is true, we have to allocate
4291 * blocks all the time, since f2fs_direct_IO will fall
4292 * back to buffered IO.
4294 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4295 allow_outplace_dio(inode, iocb, from))
4298 preallocated = true;
4299 target_size = iocb->ki_pos + iov_iter_count(from);
4301 err = f2fs_preallocate_blocks(iocb, from);
4304 clear_inode_flag(inode, FI_NO_PREALLOC);
4305 inode_unlock(inode);
4310 ret = __generic_file_write_iter(iocb, from);
4311 clear_inode_flag(inode, FI_NO_PREALLOC);
4313 /* if we couldn't write data, we should deallocate blocks. */
4314 if (preallocated && i_size_read(inode) < target_size) {
4315 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4316 down_write(&F2FS_I(inode)->i_mmap_sem);
4317 f2fs_truncate(inode);
4318 up_write(&F2FS_I(inode)->i_mmap_sem);
4319 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4323 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4326 inode_unlock(inode);
4328 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4329 iov_iter_count(from), ret);
4331 ret = generic_write_sync(iocb, ret);
4335 #ifdef CONFIG_COMPAT
4336 struct compat_f2fs_gc_range {
4341 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4342 struct compat_f2fs_gc_range)
4344 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4346 struct compat_f2fs_gc_range __user *urange;
4347 struct f2fs_gc_range range;
4350 urange = compat_ptr(arg);
4351 err = get_user(range.sync, &urange->sync);
4352 err |= get_user(range.start, &urange->start);
4353 err |= get_user(range.len, &urange->len);
4357 return __f2fs_ioc_gc_range(file, &range);
4360 struct compat_f2fs_move_range {
4366 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4367 struct compat_f2fs_move_range)
4369 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4371 struct compat_f2fs_move_range __user *urange;
4372 struct f2fs_move_range range;
4375 urange = compat_ptr(arg);
4376 err = get_user(range.dst_fd, &urange->dst_fd);
4377 err |= get_user(range.pos_in, &urange->pos_in);
4378 err |= get_user(range.pos_out, &urange->pos_out);
4379 err |= get_user(range.len, &urange->len);
4383 return __f2fs_ioc_move_range(file, &range);
4386 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4388 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4390 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4394 case FS_IOC32_GETVERSION:
4395 cmd = FS_IOC_GETVERSION;
4397 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4398 return f2fs_compat_ioc_gc_range(file, arg);
4399 case F2FS_IOC32_MOVE_RANGE:
4400 return f2fs_compat_ioc_move_range(file, arg);
4401 case F2FS_IOC_START_ATOMIC_WRITE:
4402 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4403 case F2FS_IOC_START_VOLATILE_WRITE:
4404 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4405 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4406 case F2FS_IOC_SHUTDOWN:
4408 case FS_IOC_SET_ENCRYPTION_POLICY:
4409 case FS_IOC_GET_ENCRYPTION_PWSALT:
4410 case FS_IOC_GET_ENCRYPTION_POLICY:
4411 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4412 case FS_IOC_ADD_ENCRYPTION_KEY:
4413 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4414 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4415 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4416 case FS_IOC_GET_ENCRYPTION_NONCE:
4417 case F2FS_IOC_GARBAGE_COLLECT:
4418 case F2FS_IOC_WRITE_CHECKPOINT:
4419 case F2FS_IOC_DEFRAGMENT:
4420 case F2FS_IOC_FLUSH_DEVICE:
4421 case F2FS_IOC_GET_FEATURES:
4422 case F2FS_IOC_GET_PIN_FILE:
4423 case F2FS_IOC_SET_PIN_FILE:
4424 case F2FS_IOC_PRECACHE_EXTENTS:
4425 case F2FS_IOC_RESIZE_FS:
4426 case FS_IOC_ENABLE_VERITY:
4427 case FS_IOC_MEASURE_VERITY:
4428 case FS_IOC_READ_VERITY_METADATA:
4429 case FS_IOC_GETFSLABEL:
4430 case FS_IOC_SETFSLABEL:
4431 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4432 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4433 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4434 case F2FS_IOC_SEC_TRIM_FILE:
4435 case F2FS_IOC_GET_COMPRESS_OPTION:
4436 case F2FS_IOC_SET_COMPRESS_OPTION:
4437 case F2FS_IOC_DECOMPRESS_FILE:
4438 case F2FS_IOC_COMPRESS_FILE:
4441 return -ENOIOCTLCMD;
4443 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4447 const struct file_operations f2fs_file_operations = {
4448 .llseek = f2fs_llseek,
4449 .read_iter = f2fs_file_read_iter,
4450 .write_iter = f2fs_file_write_iter,
4451 .open = f2fs_file_open,
4452 .release = f2fs_release_file,
4453 .mmap = f2fs_file_mmap,
4454 .flush = f2fs_file_flush,
4455 .fsync = f2fs_sync_file,
4456 .fallocate = f2fs_fallocate,
4457 .unlocked_ioctl = f2fs_ioctl,
4458 #ifdef CONFIG_COMPAT
4459 .compat_ioctl = f2fs_compat_ioctl,
4461 .splice_read = generic_file_splice_read,
4462 .splice_write = iter_file_splice_write,