1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
32 #include <trace/events/f2fs.h>
34 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
36 struct inode *inode = file_inode(vmf->vma->vm_file);
39 down_read(&F2FS_I(inode)->i_mmap_sem);
40 ret = filemap_fault(vmf);
41 up_read(&F2FS_I(inode)->i_mmap_sem);
44 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
47 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
52 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
54 struct page *page = vmf->page;
55 struct inode *inode = file_inode(vmf->vma->vm_file);
56 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
57 struct dnode_of_data dn;
58 bool need_alloc = true;
61 if (unlikely(f2fs_cp_error(sbi))) {
66 if (!f2fs_is_checkpoint_ready(sbi)) {
71 #ifdef CONFIG_F2FS_FS_COMPRESSION
72 if (f2fs_compressed_file(inode)) {
73 int ret = f2fs_is_compressed_cluster(inode, page->index);
79 if (ret < F2FS_I(inode)->i_cluster_size) {
87 /* should do out of any locked page */
89 f2fs_balance_fs(sbi, true);
91 sb_start_pagefault(inode->i_sb);
93 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
95 file_update_time(vmf->vma->vm_file);
96 down_read(&F2FS_I(inode)->i_mmap_sem);
98 if (unlikely(page->mapping != inode->i_mapping ||
99 page_offset(page) > i_size_read(inode) ||
100 !PageUptodate(page))) {
107 /* block allocation */
108 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
109 set_new_dnode(&dn, inode, NULL, NULL, 0);
110 err = f2fs_get_block(&dn, page->index);
112 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
115 #ifdef CONFIG_F2FS_FS_COMPRESSION
117 set_new_dnode(&dn, inode, NULL, NULL, 0);
118 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
127 f2fs_wait_on_page_writeback(page, DATA, false, true);
129 /* wait for GCed page writeback via META_MAPPING */
130 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
133 * check to see if the page is mapped already (no holes)
135 if (PageMappedToDisk(page))
138 /* page is wholly or partially inside EOF */
139 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
140 i_size_read(inode)) {
143 offset = i_size_read(inode) & ~PAGE_MASK;
144 zero_user_segment(page, offset, PAGE_SIZE);
146 set_page_dirty(page);
147 if (!PageUptodate(page))
148 SetPageUptodate(page);
150 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
151 f2fs_update_time(sbi, REQ_TIME);
153 trace_f2fs_vm_page_mkwrite(page, DATA);
155 up_read(&F2FS_I(inode)->i_mmap_sem);
157 sb_end_pagefault(inode->i_sb);
159 return block_page_mkwrite_return(err);
162 static const struct vm_operations_struct f2fs_file_vm_ops = {
163 .fault = f2fs_filemap_fault,
164 .map_pages = filemap_map_pages,
165 .page_mkwrite = f2fs_vm_page_mkwrite,
168 static int get_parent_ino(struct inode *inode, nid_t *pino)
170 struct dentry *dentry;
173 * Make sure to get the non-deleted alias. The alias associated with
174 * the open file descriptor being fsync()'ed may be deleted already.
176 dentry = d_find_alias(inode);
180 *pino = parent_ino(dentry);
185 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
187 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
188 enum cp_reason_type cp_reason = CP_NO_NEEDED;
190 if (!S_ISREG(inode->i_mode))
191 cp_reason = CP_NON_REGULAR;
192 else if (f2fs_compressed_file(inode))
193 cp_reason = CP_COMPRESSED;
194 else if (inode->i_nlink != 1)
195 cp_reason = CP_HARDLINK;
196 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
197 cp_reason = CP_SB_NEED_CP;
198 else if (file_wrong_pino(inode))
199 cp_reason = CP_WRONG_PINO;
200 else if (!f2fs_space_for_roll_forward(sbi))
201 cp_reason = CP_NO_SPC_ROLL;
202 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
203 cp_reason = CP_NODE_NEED_CP;
204 else if (test_opt(sbi, FASTBOOT))
205 cp_reason = CP_FASTBOOT_MODE;
206 else if (F2FS_OPTION(sbi).active_logs == 2)
207 cp_reason = CP_SPEC_LOG_NUM;
208 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
209 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
210 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
212 cp_reason = CP_RECOVER_DIR;
217 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
219 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
221 /* But we need to avoid that there are some inode updates */
222 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
228 static void try_to_fix_pino(struct inode *inode)
230 struct f2fs_inode_info *fi = F2FS_I(inode);
233 down_write(&fi->i_sem);
234 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
235 get_parent_ino(inode, &pino)) {
236 f2fs_i_pino_write(inode, pino);
237 file_got_pino(inode);
239 up_write(&fi->i_sem);
242 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
243 int datasync, bool atomic)
245 struct inode *inode = file->f_mapping->host;
246 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
247 nid_t ino = inode->i_ino;
249 enum cp_reason_type cp_reason = 0;
250 struct writeback_control wbc = {
251 .sync_mode = WB_SYNC_ALL,
252 .nr_to_write = LONG_MAX,
255 unsigned int seq_id = 0;
257 if (unlikely(f2fs_readonly(inode->i_sb) ||
258 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
261 trace_f2fs_sync_file_enter(inode);
263 if (S_ISDIR(inode->i_mode))
266 /* if fdatasync is triggered, let's do in-place-update */
267 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
268 set_inode_flag(inode, FI_NEED_IPU);
269 ret = file_write_and_wait_range(file, start, end);
270 clear_inode_flag(inode, FI_NEED_IPU);
273 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
277 /* if the inode is dirty, let's recover all the time */
278 if (!f2fs_skip_inode_update(inode, datasync)) {
279 f2fs_write_inode(inode, NULL);
284 * if there is no written data, don't waste time to write recovery info.
286 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
287 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
289 /* it may call write_inode just prior to fsync */
290 if (need_inode_page_update(sbi, ino))
293 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
294 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
300 * Both of fdatasync() and fsync() are able to be recovered from
303 down_read(&F2FS_I(inode)->i_sem);
304 cp_reason = need_do_checkpoint(inode);
305 up_read(&F2FS_I(inode)->i_sem);
308 /* all the dirty node pages should be flushed for POR */
309 ret = f2fs_sync_fs(inode->i_sb, 1);
312 * We've secured consistency through sync_fs. Following pino
313 * will be used only for fsynced inodes after checkpoint.
315 try_to_fix_pino(inode);
316 clear_inode_flag(inode, FI_APPEND_WRITE);
317 clear_inode_flag(inode, FI_UPDATE_WRITE);
321 atomic_inc(&sbi->wb_sync_req[NODE]);
322 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
323 atomic_dec(&sbi->wb_sync_req[NODE]);
327 /* if cp_error was enabled, we should avoid infinite loop */
328 if (unlikely(f2fs_cp_error(sbi))) {
333 if (f2fs_need_inode_block_update(sbi, ino)) {
334 f2fs_mark_inode_dirty_sync(inode, true);
335 f2fs_write_inode(inode, NULL);
340 * If it's atomic_write, it's just fine to keep write ordering. So
341 * here we don't need to wait for node write completion, since we use
342 * node chain which serializes node blocks. If one of node writes are
343 * reordered, we can see simply broken chain, resulting in stopping
344 * roll-forward recovery. It means we'll recover all or none node blocks
348 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
353 /* once recovery info is written, don't need to tack this */
354 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
355 clear_inode_flag(inode, FI_APPEND_WRITE);
357 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
358 ret = f2fs_issue_flush(sbi, inode->i_ino);
360 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
361 clear_inode_flag(inode, FI_UPDATE_WRITE);
362 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
364 f2fs_update_time(sbi, REQ_TIME);
366 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
367 f2fs_trace_ios(NULL, 1);
371 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
373 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
375 return f2fs_do_sync_file(file, start, end, datasync, false);
378 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
379 pgoff_t pgofs, int whence)
384 if (whence != SEEK_DATA)
387 /* find first dirty page index */
388 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
397 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
398 pgoff_t dirty, pgoff_t pgofs, int whence)
402 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
403 __is_valid_data_blkaddr(blkaddr))
407 if (blkaddr == NULL_ADDR)
414 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
416 struct inode *inode = file->f_mapping->host;
417 loff_t maxbytes = inode->i_sb->s_maxbytes;
418 struct dnode_of_data dn;
419 pgoff_t pgofs, end_offset, dirty;
420 loff_t data_ofs = offset;
426 isize = i_size_read(inode);
430 /* handle inline data case */
431 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
432 if (whence == SEEK_HOLE)
437 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
439 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
441 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
442 set_new_dnode(&dn, inode, NULL, NULL, 0);
443 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
444 if (err && err != -ENOENT) {
446 } else if (err == -ENOENT) {
447 /* direct node does not exists */
448 if (whence == SEEK_DATA) {
449 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
456 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
458 /* find data/hole in dnode block */
459 for (; dn.ofs_in_node < end_offset;
460 dn.ofs_in_node++, pgofs++,
461 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
464 blkaddr = f2fs_data_blkaddr(&dn);
466 if (__is_valid_data_blkaddr(blkaddr) &&
467 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
468 blkaddr, DATA_GENERIC_ENHANCE)) {
473 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
482 if (whence == SEEK_DATA)
485 if (whence == SEEK_HOLE && data_ofs > isize)
488 return vfs_setpos(file, data_ofs, maxbytes);
494 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
496 struct inode *inode = file->f_mapping->host;
497 loff_t maxbytes = inode->i_sb->s_maxbytes;
503 return generic_file_llseek_size(file, offset, whence,
504 maxbytes, i_size_read(inode));
509 return f2fs_seek_block(file, offset, whence);
515 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
517 struct inode *inode = file_inode(file);
520 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
523 if (!f2fs_is_compress_backend_ready(inode))
526 /* we don't need to use inline_data strictly */
527 err = f2fs_convert_inline_inode(inode);
532 vma->vm_ops = &f2fs_file_vm_ops;
533 set_inode_flag(inode, FI_MMAP_FILE);
537 static int f2fs_file_open(struct inode *inode, struct file *filp)
539 int err = fscrypt_file_open(inode, filp);
544 if (!f2fs_is_compress_backend_ready(inode))
547 err = fsverity_file_open(inode, filp);
551 filp->f_mode |= FMODE_NOWAIT;
553 return dquot_file_open(inode, filp);
556 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
558 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
559 struct f2fs_node *raw_node;
560 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
563 bool compressed_cluster = false;
564 int cluster_index = 0, valid_blocks = 0;
565 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
566 bool released = !F2FS_I(dn->inode)->i_compr_blocks;
568 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
569 base = get_extra_isize(dn->inode);
571 raw_node = F2FS_NODE(dn->node_page);
572 addr = blkaddr_in_node(raw_node) + base + ofs;
574 /* Assumption: truncateion starts with cluster */
575 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
576 block_t blkaddr = le32_to_cpu(*addr);
578 if (f2fs_compressed_file(dn->inode) &&
579 !(cluster_index & (cluster_size - 1))) {
580 if (compressed_cluster)
581 f2fs_i_compr_blocks_update(dn->inode,
582 valid_blocks, false);
583 compressed_cluster = (blkaddr == COMPRESS_ADDR);
587 if (blkaddr == NULL_ADDR)
590 dn->data_blkaddr = NULL_ADDR;
591 f2fs_set_data_blkaddr(dn);
593 if (__is_valid_data_blkaddr(blkaddr)) {
594 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
595 DATA_GENERIC_ENHANCE))
597 if (compressed_cluster)
601 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
602 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
604 f2fs_invalidate_blocks(sbi, blkaddr);
606 if (!released || blkaddr != COMPRESS_ADDR)
610 if (compressed_cluster)
611 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
616 * once we invalidate valid blkaddr in range [ofs, ofs + count],
617 * we will invalidate all blkaddr in the whole range.
619 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
621 f2fs_update_extent_cache_range(dn, fofs, 0, len);
622 dec_valid_block_count(sbi, dn->inode, nr_free);
624 dn->ofs_in_node = ofs;
626 f2fs_update_time(sbi, REQ_TIME);
627 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
628 dn->ofs_in_node, nr_free);
631 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
633 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
636 static int truncate_partial_data_page(struct inode *inode, u64 from,
639 loff_t offset = from & (PAGE_SIZE - 1);
640 pgoff_t index = from >> PAGE_SHIFT;
641 struct address_space *mapping = inode->i_mapping;
644 if (!offset && !cache_only)
648 page = find_lock_page(mapping, index);
649 if (page && PageUptodate(page))
651 f2fs_put_page(page, 1);
655 page = f2fs_get_lock_data_page(inode, index, true);
657 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
659 f2fs_wait_on_page_writeback(page, DATA, true, true);
660 zero_user(page, offset, PAGE_SIZE - offset);
662 /* An encrypted inode should have a key and truncate the last page. */
663 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
665 set_page_dirty(page);
666 f2fs_put_page(page, 1);
670 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
672 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
673 struct dnode_of_data dn;
675 int count = 0, err = 0;
677 bool truncate_page = false;
679 trace_f2fs_truncate_blocks_enter(inode, from);
681 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
683 if (free_from >= sbi->max_file_blocks)
689 ipage = f2fs_get_node_page(sbi, inode->i_ino);
691 err = PTR_ERR(ipage);
695 if (f2fs_has_inline_data(inode)) {
696 f2fs_truncate_inline_inode(inode, ipage, from);
697 f2fs_put_page(ipage, 1);
698 truncate_page = true;
702 set_new_dnode(&dn, inode, ipage, NULL, 0);
703 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
710 count = ADDRS_PER_PAGE(dn.node_page, inode);
712 count -= dn.ofs_in_node;
713 f2fs_bug_on(sbi, count < 0);
715 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
716 f2fs_truncate_data_blocks_range(&dn, count);
722 err = f2fs_truncate_inode_blocks(inode, free_from);
727 /* lastly zero out the first data page */
729 err = truncate_partial_data_page(inode, from, truncate_page);
731 trace_f2fs_truncate_blocks_exit(inode, err);
735 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
737 u64 free_from = from;
740 #ifdef CONFIG_F2FS_FS_COMPRESSION
742 * for compressed file, only support cluster size
743 * aligned truncation.
745 if (f2fs_compressed_file(inode))
746 free_from = round_up(from,
747 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
750 err = f2fs_do_truncate_blocks(inode, free_from, lock);
754 #ifdef CONFIG_F2FS_FS_COMPRESSION
755 if (from != free_from)
756 err = f2fs_truncate_partial_cluster(inode, from, lock);
762 int f2fs_truncate(struct inode *inode)
766 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
769 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
770 S_ISLNK(inode->i_mode)))
773 trace_f2fs_truncate(inode);
775 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
776 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
780 /* we should check inline_data size */
781 if (!f2fs_may_inline_data(inode)) {
782 err = f2fs_convert_inline_inode(inode);
787 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
791 inode->i_mtime = inode->i_ctime = current_time(inode);
792 f2fs_mark_inode_dirty_sync(inode, false);
796 int f2fs_getattr(const struct path *path, struct kstat *stat,
797 u32 request_mask, unsigned int query_flags)
799 struct inode *inode = d_inode(path->dentry);
800 struct f2fs_inode_info *fi = F2FS_I(inode);
801 struct f2fs_inode *ri;
804 if (f2fs_has_extra_attr(inode) &&
805 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
806 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
807 stat->result_mask |= STATX_BTIME;
808 stat->btime.tv_sec = fi->i_crtime.tv_sec;
809 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
813 if (flags & F2FS_COMPR_FL)
814 stat->attributes |= STATX_ATTR_COMPRESSED;
815 if (flags & F2FS_APPEND_FL)
816 stat->attributes |= STATX_ATTR_APPEND;
817 if (IS_ENCRYPTED(inode))
818 stat->attributes |= STATX_ATTR_ENCRYPTED;
819 if (flags & F2FS_IMMUTABLE_FL)
820 stat->attributes |= STATX_ATTR_IMMUTABLE;
821 if (flags & F2FS_NODUMP_FL)
822 stat->attributes |= STATX_ATTR_NODUMP;
823 if (IS_VERITY(inode))
824 stat->attributes |= STATX_ATTR_VERITY;
826 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
828 STATX_ATTR_ENCRYPTED |
829 STATX_ATTR_IMMUTABLE |
833 generic_fillattr(inode, stat);
835 /* we need to show initial sectors used for inline_data/dentries */
836 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
837 f2fs_has_inline_dentry(inode))
838 stat->blocks += (stat->size + 511) >> 9;
843 #ifdef CONFIG_F2FS_FS_POSIX_ACL
844 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
846 unsigned int ia_valid = attr->ia_valid;
848 if (ia_valid & ATTR_UID)
849 inode->i_uid = attr->ia_uid;
850 if (ia_valid & ATTR_GID)
851 inode->i_gid = attr->ia_gid;
852 if (ia_valid & ATTR_ATIME)
853 inode->i_atime = attr->ia_atime;
854 if (ia_valid & ATTR_MTIME)
855 inode->i_mtime = attr->ia_mtime;
856 if (ia_valid & ATTR_CTIME)
857 inode->i_ctime = attr->ia_ctime;
858 if (ia_valid & ATTR_MODE) {
859 umode_t mode = attr->ia_mode;
861 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
863 set_acl_inode(inode, mode);
867 #define __setattr_copy setattr_copy
870 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
872 struct inode *inode = d_inode(dentry);
875 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
878 if ((attr->ia_valid & ATTR_SIZE) &&
879 !f2fs_is_compress_backend_ready(inode))
882 err = setattr_prepare(dentry, attr);
886 err = fscrypt_prepare_setattr(dentry, attr);
890 err = fsverity_prepare_setattr(dentry, attr);
894 if (is_quota_modification(inode, attr)) {
895 err = dquot_initialize(inode);
899 if ((attr->ia_valid & ATTR_UID &&
900 !uid_eq(attr->ia_uid, inode->i_uid)) ||
901 (attr->ia_valid & ATTR_GID &&
902 !gid_eq(attr->ia_gid, inode->i_gid))) {
903 f2fs_lock_op(F2FS_I_SB(inode));
904 err = dquot_transfer(inode, attr);
906 set_sbi_flag(F2FS_I_SB(inode),
907 SBI_QUOTA_NEED_REPAIR);
908 f2fs_unlock_op(F2FS_I_SB(inode));
912 * update uid/gid under lock_op(), so that dquot and inode can
913 * be updated atomically.
915 if (attr->ia_valid & ATTR_UID)
916 inode->i_uid = attr->ia_uid;
917 if (attr->ia_valid & ATTR_GID)
918 inode->i_gid = attr->ia_gid;
919 f2fs_mark_inode_dirty_sync(inode, true);
920 f2fs_unlock_op(F2FS_I_SB(inode));
923 if (attr->ia_valid & ATTR_SIZE) {
924 loff_t old_size = i_size_read(inode);
926 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
928 * should convert inline inode before i_size_write to
929 * keep smaller than inline_data size with inline flag.
931 err = f2fs_convert_inline_inode(inode);
936 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
937 down_write(&F2FS_I(inode)->i_mmap_sem);
939 truncate_setsize(inode, attr->ia_size);
941 if (attr->ia_size <= old_size)
942 err = f2fs_truncate(inode);
944 * do not trim all blocks after i_size if target size is
945 * larger than i_size.
947 up_write(&F2FS_I(inode)->i_mmap_sem);
948 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
952 spin_lock(&F2FS_I(inode)->i_size_lock);
953 inode->i_mtime = inode->i_ctime = current_time(inode);
954 F2FS_I(inode)->last_disk_size = i_size_read(inode);
955 spin_unlock(&F2FS_I(inode)->i_size_lock);
958 __setattr_copy(inode, attr);
960 if (attr->ia_valid & ATTR_MODE) {
961 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
962 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
963 inode->i_mode = F2FS_I(inode)->i_acl_mode;
964 clear_inode_flag(inode, FI_ACL_MODE);
968 /* file size may changed here */
969 f2fs_mark_inode_dirty_sync(inode, true);
971 /* inode change will produce dirty node pages flushed by checkpoint */
972 f2fs_balance_fs(F2FS_I_SB(inode), true);
977 const struct inode_operations f2fs_file_inode_operations = {
978 .getattr = f2fs_getattr,
979 .setattr = f2fs_setattr,
980 .get_acl = f2fs_get_acl,
981 .set_acl = f2fs_set_acl,
982 .listxattr = f2fs_listxattr,
983 .fiemap = f2fs_fiemap,
986 static int fill_zero(struct inode *inode, pgoff_t index,
987 loff_t start, loff_t len)
989 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
995 f2fs_balance_fs(sbi, true);
998 page = f2fs_get_new_data_page(inode, NULL, index, false);
1002 return PTR_ERR(page);
1004 f2fs_wait_on_page_writeback(page, DATA, true, true);
1005 zero_user(page, start, len);
1006 set_page_dirty(page);
1007 f2fs_put_page(page, 1);
1011 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1015 while (pg_start < pg_end) {
1016 struct dnode_of_data dn;
1017 pgoff_t end_offset, count;
1019 set_new_dnode(&dn, inode, NULL, NULL, 0);
1020 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1022 if (err == -ENOENT) {
1023 pg_start = f2fs_get_next_page_offset(&dn,
1030 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1031 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1033 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1035 f2fs_truncate_data_blocks_range(&dn, count);
1036 f2fs_put_dnode(&dn);
1043 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1045 pgoff_t pg_start, pg_end;
1046 loff_t off_start, off_end;
1049 ret = f2fs_convert_inline_inode(inode);
1053 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1054 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1056 off_start = offset & (PAGE_SIZE - 1);
1057 off_end = (offset + len) & (PAGE_SIZE - 1);
1059 if (pg_start == pg_end) {
1060 ret = fill_zero(inode, pg_start, off_start,
1061 off_end - off_start);
1066 ret = fill_zero(inode, pg_start++, off_start,
1067 PAGE_SIZE - off_start);
1072 ret = fill_zero(inode, pg_end, 0, off_end);
1077 if (pg_start < pg_end) {
1078 struct address_space *mapping = inode->i_mapping;
1079 loff_t blk_start, blk_end;
1080 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1082 f2fs_balance_fs(sbi, true);
1084 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1085 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1087 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1088 down_write(&F2FS_I(inode)->i_mmap_sem);
1090 truncate_inode_pages_range(mapping, blk_start,
1094 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1095 f2fs_unlock_op(sbi);
1097 up_write(&F2FS_I(inode)->i_mmap_sem);
1098 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1105 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1106 int *do_replace, pgoff_t off, pgoff_t len)
1108 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1109 struct dnode_of_data dn;
1113 set_new_dnode(&dn, inode, NULL, NULL, 0);
1114 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1115 if (ret && ret != -ENOENT) {
1117 } else if (ret == -ENOENT) {
1118 if (dn.max_level == 0)
1120 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1121 dn.ofs_in_node, len);
1127 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1128 dn.ofs_in_node, len);
1129 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1130 *blkaddr = f2fs_data_blkaddr(&dn);
1132 if (__is_valid_data_blkaddr(*blkaddr) &&
1133 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1134 DATA_GENERIC_ENHANCE)) {
1135 f2fs_put_dnode(&dn);
1136 return -EFSCORRUPTED;
1139 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1141 if (f2fs_lfs_mode(sbi)) {
1142 f2fs_put_dnode(&dn);
1146 /* do not invalidate this block address */
1147 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1151 f2fs_put_dnode(&dn);
1160 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1161 int *do_replace, pgoff_t off, int len)
1163 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1164 struct dnode_of_data dn;
1167 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1168 if (*do_replace == 0)
1171 set_new_dnode(&dn, inode, NULL, NULL, 0);
1172 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1174 dec_valid_block_count(sbi, inode, 1);
1175 f2fs_invalidate_blocks(sbi, *blkaddr);
1177 f2fs_update_data_blkaddr(&dn, *blkaddr);
1179 f2fs_put_dnode(&dn);
1184 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1185 block_t *blkaddr, int *do_replace,
1186 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1188 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1193 if (blkaddr[i] == NULL_ADDR && !full) {
1198 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1199 struct dnode_of_data dn;
1200 struct node_info ni;
1204 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1205 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1209 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1211 f2fs_put_dnode(&dn);
1215 ilen = min((pgoff_t)
1216 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1217 dn.ofs_in_node, len - i);
1219 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1220 f2fs_truncate_data_blocks_range(&dn, 1);
1222 if (do_replace[i]) {
1223 f2fs_i_blocks_write(src_inode,
1225 f2fs_i_blocks_write(dst_inode,
1227 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1228 blkaddr[i], ni.version, true, false);
1234 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1235 if (dst_inode->i_size < new_size)
1236 f2fs_i_size_write(dst_inode, new_size);
1237 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1239 f2fs_put_dnode(&dn);
1241 struct page *psrc, *pdst;
1243 psrc = f2fs_get_lock_data_page(src_inode,
1246 return PTR_ERR(psrc);
1247 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1250 f2fs_put_page(psrc, 1);
1251 return PTR_ERR(pdst);
1253 f2fs_copy_page(psrc, pdst);
1254 set_page_dirty(pdst);
1255 f2fs_put_page(pdst, 1);
1256 f2fs_put_page(psrc, 1);
1258 ret = f2fs_truncate_hole(src_inode,
1259 src + i, src + i + 1);
1268 static int __exchange_data_block(struct inode *src_inode,
1269 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1270 pgoff_t len, bool full)
1272 block_t *src_blkaddr;
1278 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1280 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1281 array_size(olen, sizeof(block_t)),
1286 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1287 array_size(olen, sizeof(int)),
1290 kvfree(src_blkaddr);
1294 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1295 do_replace, src, olen);
1299 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1300 do_replace, src, dst, olen, full);
1308 kvfree(src_blkaddr);
1314 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1315 kvfree(src_blkaddr);
1320 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1322 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1323 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1324 pgoff_t start = offset >> PAGE_SHIFT;
1325 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1328 f2fs_balance_fs(sbi, true);
1330 /* avoid gc operation during block exchange */
1331 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1332 down_write(&F2FS_I(inode)->i_mmap_sem);
1335 f2fs_drop_extent_tree(inode);
1336 truncate_pagecache(inode, offset);
1337 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1338 f2fs_unlock_op(sbi);
1340 up_write(&F2FS_I(inode)->i_mmap_sem);
1341 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1345 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1350 if (offset + len >= i_size_read(inode))
1353 /* collapse range should be aligned to block size of f2fs. */
1354 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1357 ret = f2fs_convert_inline_inode(inode);
1361 /* write out all dirty pages from offset */
1362 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1366 ret = f2fs_do_collapse(inode, offset, len);
1370 /* write out all moved pages, if possible */
1371 down_write(&F2FS_I(inode)->i_mmap_sem);
1372 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1373 truncate_pagecache(inode, offset);
1375 new_size = i_size_read(inode) - len;
1376 truncate_pagecache(inode, new_size);
1378 ret = f2fs_truncate_blocks(inode, new_size, true);
1379 up_write(&F2FS_I(inode)->i_mmap_sem);
1381 f2fs_i_size_write(inode, new_size);
1385 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1388 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1389 pgoff_t index = start;
1390 unsigned int ofs_in_node = dn->ofs_in_node;
1394 for (; index < end; index++, dn->ofs_in_node++) {
1395 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1399 dn->ofs_in_node = ofs_in_node;
1400 ret = f2fs_reserve_new_blocks(dn, count);
1404 dn->ofs_in_node = ofs_in_node;
1405 for (index = start; index < end; index++, dn->ofs_in_node++) {
1406 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1408 * f2fs_reserve_new_blocks will not guarantee entire block
1411 if (dn->data_blkaddr == NULL_ADDR) {
1415 if (dn->data_blkaddr != NEW_ADDR) {
1416 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1417 dn->data_blkaddr = NEW_ADDR;
1418 f2fs_set_data_blkaddr(dn);
1422 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1427 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1430 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1431 struct address_space *mapping = inode->i_mapping;
1432 pgoff_t index, pg_start, pg_end;
1433 loff_t new_size = i_size_read(inode);
1434 loff_t off_start, off_end;
1437 ret = inode_newsize_ok(inode, (len + offset));
1441 ret = f2fs_convert_inline_inode(inode);
1445 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1449 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1450 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1452 off_start = offset & (PAGE_SIZE - 1);
1453 off_end = (offset + len) & (PAGE_SIZE - 1);
1455 if (pg_start == pg_end) {
1456 ret = fill_zero(inode, pg_start, off_start,
1457 off_end - off_start);
1461 new_size = max_t(loff_t, new_size, offset + len);
1464 ret = fill_zero(inode, pg_start++, off_start,
1465 PAGE_SIZE - off_start);
1469 new_size = max_t(loff_t, new_size,
1470 (loff_t)pg_start << PAGE_SHIFT);
1473 for (index = pg_start; index < pg_end;) {
1474 struct dnode_of_data dn;
1475 unsigned int end_offset;
1478 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1479 down_write(&F2FS_I(inode)->i_mmap_sem);
1481 truncate_pagecache_range(inode,
1482 (loff_t)index << PAGE_SHIFT,
1483 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1487 set_new_dnode(&dn, inode, NULL, NULL, 0);
1488 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1490 f2fs_unlock_op(sbi);
1491 up_write(&F2FS_I(inode)->i_mmap_sem);
1492 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1496 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1497 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1499 ret = f2fs_do_zero_range(&dn, index, end);
1500 f2fs_put_dnode(&dn);
1502 f2fs_unlock_op(sbi);
1503 up_write(&F2FS_I(inode)->i_mmap_sem);
1504 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1506 f2fs_balance_fs(sbi, dn.node_changed);
1512 new_size = max_t(loff_t, new_size,
1513 (loff_t)index << PAGE_SHIFT);
1517 ret = fill_zero(inode, pg_end, 0, off_end);
1521 new_size = max_t(loff_t, new_size, offset + len);
1526 if (new_size > i_size_read(inode)) {
1527 if (mode & FALLOC_FL_KEEP_SIZE)
1528 file_set_keep_isize(inode);
1530 f2fs_i_size_write(inode, new_size);
1535 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1537 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1538 pgoff_t nr, pg_start, pg_end, delta, idx;
1542 new_size = i_size_read(inode) + len;
1543 ret = inode_newsize_ok(inode, new_size);
1547 if (offset >= i_size_read(inode))
1550 /* insert range should be aligned to block size of f2fs. */
1551 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1554 ret = f2fs_convert_inline_inode(inode);
1558 f2fs_balance_fs(sbi, true);
1560 down_write(&F2FS_I(inode)->i_mmap_sem);
1561 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1562 up_write(&F2FS_I(inode)->i_mmap_sem);
1566 /* write out all dirty pages from offset */
1567 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1571 pg_start = offset >> PAGE_SHIFT;
1572 pg_end = (offset + len) >> PAGE_SHIFT;
1573 delta = pg_end - pg_start;
1574 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1576 /* avoid gc operation during block exchange */
1577 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1578 down_write(&F2FS_I(inode)->i_mmap_sem);
1579 truncate_pagecache(inode, offset);
1581 while (!ret && idx > pg_start) {
1582 nr = idx - pg_start;
1588 f2fs_drop_extent_tree(inode);
1590 ret = __exchange_data_block(inode, inode, idx,
1591 idx + delta, nr, false);
1592 f2fs_unlock_op(sbi);
1594 up_write(&F2FS_I(inode)->i_mmap_sem);
1595 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1597 /* write out all moved pages, if possible */
1598 down_write(&F2FS_I(inode)->i_mmap_sem);
1599 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1600 truncate_pagecache(inode, offset);
1601 up_write(&F2FS_I(inode)->i_mmap_sem);
1604 f2fs_i_size_write(inode, new_size);
1608 static int expand_inode_data(struct inode *inode, loff_t offset,
1609 loff_t len, int mode)
1611 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1612 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1613 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1614 .m_may_create = true };
1616 loff_t new_size = i_size_read(inode);
1620 err = inode_newsize_ok(inode, (len + offset));
1624 err = f2fs_convert_inline_inode(inode);
1628 f2fs_balance_fs(sbi, true);
1630 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1631 off_end = (offset + len) & (PAGE_SIZE - 1);
1633 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1634 map.m_len = pg_end - map.m_lblk;
1641 if (f2fs_is_pinned_file(inode)) {
1642 block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1643 sbi->log_blocks_per_seg;
1646 if (map.m_len % sbi->blocks_per_seg)
1647 len += sbi->blocks_per_seg;
1649 map.m_len = sbi->blocks_per_seg;
1651 if (has_not_enough_free_secs(sbi, 0,
1652 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1653 down_write(&sbi->gc_lock);
1654 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1655 if (err && err != -ENODATA && err != -EAGAIN)
1659 down_write(&sbi->pin_sem);
1660 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1663 f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
1664 f2fs_unlock_op(sbi);
1666 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1667 up_write(&sbi->pin_sem);
1671 map.m_lblk += map.m_len;
1677 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1686 last_off = map.m_lblk + map.m_len - 1;
1688 /* update new size to the failed position */
1689 new_size = (last_off == pg_end) ? offset + len :
1690 (loff_t)(last_off + 1) << PAGE_SHIFT;
1692 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1695 if (new_size > i_size_read(inode)) {
1696 if (mode & FALLOC_FL_KEEP_SIZE)
1697 file_set_keep_isize(inode);
1699 f2fs_i_size_write(inode, new_size);
1705 static long f2fs_fallocate(struct file *file, int mode,
1706 loff_t offset, loff_t len)
1708 struct inode *inode = file_inode(file);
1711 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1713 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1715 if (!f2fs_is_compress_backend_ready(inode))
1718 /* f2fs only support ->fallocate for regular file */
1719 if (!S_ISREG(inode->i_mode))
1722 if (IS_ENCRYPTED(inode) &&
1723 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1726 if (f2fs_compressed_file(inode) &&
1727 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1728 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1731 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1732 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1733 FALLOC_FL_INSERT_RANGE))
1738 if (mode & FALLOC_FL_PUNCH_HOLE) {
1739 if (offset >= inode->i_size)
1742 ret = punch_hole(inode, offset, len);
1743 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1744 ret = f2fs_collapse_range(inode, offset, len);
1745 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1746 ret = f2fs_zero_range(inode, offset, len, mode);
1747 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1748 ret = f2fs_insert_range(inode, offset, len);
1750 ret = expand_inode_data(inode, offset, len, mode);
1754 inode->i_mtime = inode->i_ctime = current_time(inode);
1755 f2fs_mark_inode_dirty_sync(inode, false);
1756 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1760 inode_unlock(inode);
1762 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1766 static int f2fs_release_file(struct inode *inode, struct file *filp)
1769 * f2fs_relase_file is called at every close calls. So we should
1770 * not drop any inmemory pages by close called by other process.
1772 if (!(filp->f_mode & FMODE_WRITE) ||
1773 atomic_read(&inode->i_writecount) != 1)
1776 /* some remained atomic pages should discarded */
1777 if (f2fs_is_atomic_file(inode))
1778 f2fs_drop_inmem_pages(inode);
1779 if (f2fs_is_volatile_file(inode)) {
1780 set_inode_flag(inode, FI_DROP_CACHE);
1781 filemap_fdatawrite(inode->i_mapping);
1782 clear_inode_flag(inode, FI_DROP_CACHE);
1783 clear_inode_flag(inode, FI_VOLATILE_FILE);
1784 stat_dec_volatile_write(inode);
1789 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1791 struct inode *inode = file_inode(file);
1794 * If the process doing a transaction is crashed, we should do
1795 * roll-back. Otherwise, other reader/write can see corrupted database
1796 * until all the writers close its file. Since this should be done
1797 * before dropping file lock, it needs to do in ->flush.
1799 if (f2fs_is_atomic_file(inode) &&
1800 F2FS_I(inode)->inmem_task == current)
1801 f2fs_drop_inmem_pages(inode);
1805 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1807 struct f2fs_inode_info *fi = F2FS_I(inode);
1808 u32 masked_flags = fi->i_flags & mask;
1810 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1812 /* Is it quota file? Do not allow user to mess with it */
1813 if (IS_NOQUOTA(inode))
1816 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1817 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1819 if (!f2fs_empty_dir(inode))
1823 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1824 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1826 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1830 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1831 if (masked_flags & F2FS_COMPR_FL) {
1832 if (f2fs_disable_compressed_file(inode))
1835 if (iflags & F2FS_NOCOMP_FL)
1837 if (iflags & F2FS_COMPR_FL) {
1838 if (!f2fs_may_compress(inode))
1841 set_compress_context(inode);
1844 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1845 if (masked_flags & F2FS_COMPR_FL)
1849 fi->i_flags = iflags | (fi->i_flags & ~mask);
1850 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1851 (fi->i_flags & F2FS_NOCOMP_FL));
1853 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1854 set_inode_flag(inode, FI_PROJ_INHERIT);
1856 clear_inode_flag(inode, FI_PROJ_INHERIT);
1858 inode->i_ctime = current_time(inode);
1859 f2fs_set_inode_flags(inode);
1860 f2fs_mark_inode_dirty_sync(inode, true);
1864 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1867 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1868 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1869 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1870 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1873 static const struct {
1876 } f2fs_fsflags_map[] = {
1877 { F2FS_COMPR_FL, FS_COMPR_FL },
1878 { F2FS_SYNC_FL, FS_SYNC_FL },
1879 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1880 { F2FS_APPEND_FL, FS_APPEND_FL },
1881 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1882 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1883 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1884 { F2FS_INDEX_FL, FS_INDEX_FL },
1885 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1886 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1887 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1890 #define F2FS_GETTABLE_FS_FL ( \
1900 FS_PROJINHERIT_FL | \
1902 FS_INLINE_DATA_FL | \
1907 #define F2FS_SETTABLE_FS_FL ( \
1916 FS_PROJINHERIT_FL | \
1919 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1920 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1925 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1926 if (iflags & f2fs_fsflags_map[i].iflag)
1927 fsflags |= f2fs_fsflags_map[i].fsflag;
1932 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1933 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1938 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1939 if (fsflags & f2fs_fsflags_map[i].fsflag)
1940 iflags |= f2fs_fsflags_map[i].iflag;
1945 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1947 struct inode *inode = file_inode(filp);
1948 struct f2fs_inode_info *fi = F2FS_I(inode);
1949 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1951 if (IS_ENCRYPTED(inode))
1952 fsflags |= FS_ENCRYPT_FL;
1953 if (IS_VERITY(inode))
1954 fsflags |= FS_VERITY_FL;
1955 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1956 fsflags |= FS_INLINE_DATA_FL;
1957 if (is_inode_flag_set(inode, FI_PIN_FILE))
1958 fsflags |= FS_NOCOW_FL;
1960 fsflags &= F2FS_GETTABLE_FS_FL;
1962 return put_user(fsflags, (int __user *)arg);
1965 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1967 struct inode *inode = file_inode(filp);
1968 struct f2fs_inode_info *fi = F2FS_I(inode);
1969 u32 fsflags, old_fsflags;
1973 if (!inode_owner_or_capable(inode))
1976 if (get_user(fsflags, (int __user *)arg))
1979 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1981 fsflags &= F2FS_SETTABLE_FS_FL;
1983 iflags = f2fs_fsflags_to_iflags(fsflags);
1984 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1987 ret = mnt_want_write_file(filp);
1993 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1994 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1998 ret = f2fs_setflags_common(inode, iflags,
1999 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2001 inode_unlock(inode);
2002 mnt_drop_write_file(filp);
2006 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2008 struct inode *inode = file_inode(filp);
2010 return put_user(inode->i_generation, (int __user *)arg);
2013 static int f2fs_ioc_start_atomic_write(struct file *filp)
2015 struct inode *inode = file_inode(filp);
2016 struct f2fs_inode_info *fi = F2FS_I(inode);
2017 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2020 if (!inode_owner_or_capable(inode))
2023 if (!S_ISREG(inode->i_mode))
2026 if (filp->f_flags & O_DIRECT)
2029 ret = mnt_want_write_file(filp);
2035 f2fs_disable_compressed_file(inode);
2037 if (f2fs_is_atomic_file(inode)) {
2038 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2043 ret = f2fs_convert_inline_inode(inode);
2047 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2050 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2051 * f2fs_is_atomic_file.
2053 if (get_dirty_pages(inode))
2054 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2055 inode->i_ino, get_dirty_pages(inode));
2056 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2058 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2062 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2063 if (list_empty(&fi->inmem_ilist))
2064 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2065 sbi->atomic_files++;
2066 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2068 /* add inode in inmem_list first and set atomic_file */
2069 set_inode_flag(inode, FI_ATOMIC_FILE);
2070 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2071 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2073 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2074 F2FS_I(inode)->inmem_task = current;
2075 stat_update_max_atomic_write(inode);
2077 inode_unlock(inode);
2078 mnt_drop_write_file(filp);
2082 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2084 struct inode *inode = file_inode(filp);
2087 if (!inode_owner_or_capable(inode))
2090 ret = mnt_want_write_file(filp);
2094 f2fs_balance_fs(F2FS_I_SB(inode), true);
2098 if (f2fs_is_volatile_file(inode)) {
2103 if (f2fs_is_atomic_file(inode)) {
2104 ret = f2fs_commit_inmem_pages(inode);
2108 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2110 f2fs_drop_inmem_pages(inode);
2112 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2115 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2116 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2119 inode_unlock(inode);
2120 mnt_drop_write_file(filp);
2124 static int f2fs_ioc_start_volatile_write(struct file *filp)
2126 struct inode *inode = file_inode(filp);
2129 if (!inode_owner_or_capable(inode))
2132 if (!S_ISREG(inode->i_mode))
2135 ret = mnt_want_write_file(filp);
2141 if (f2fs_is_volatile_file(inode))
2144 ret = f2fs_convert_inline_inode(inode);
2148 stat_inc_volatile_write(inode);
2149 stat_update_max_volatile_write(inode);
2151 set_inode_flag(inode, FI_VOLATILE_FILE);
2152 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2154 inode_unlock(inode);
2155 mnt_drop_write_file(filp);
2159 static int f2fs_ioc_release_volatile_write(struct file *filp)
2161 struct inode *inode = file_inode(filp);
2164 if (!inode_owner_or_capable(inode))
2167 ret = mnt_want_write_file(filp);
2173 if (!f2fs_is_volatile_file(inode))
2176 if (!f2fs_is_first_block_written(inode)) {
2177 ret = truncate_partial_data_page(inode, 0, true);
2181 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2183 inode_unlock(inode);
2184 mnt_drop_write_file(filp);
2188 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2190 struct inode *inode = file_inode(filp);
2193 if (!inode_owner_or_capable(inode))
2196 ret = mnt_want_write_file(filp);
2202 if (f2fs_is_atomic_file(inode))
2203 f2fs_drop_inmem_pages(inode);
2204 if (f2fs_is_volatile_file(inode)) {
2205 clear_inode_flag(inode, FI_VOLATILE_FILE);
2206 stat_dec_volatile_write(inode);
2207 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2210 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2212 inode_unlock(inode);
2214 mnt_drop_write_file(filp);
2215 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2219 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2221 struct inode *inode = file_inode(filp);
2222 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2223 struct super_block *sb = sbi->sb;
2227 if (!capable(CAP_SYS_ADMIN))
2230 if (get_user(in, (__u32 __user *)arg))
2233 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2234 ret = mnt_want_write_file(filp);
2236 if (ret == -EROFS) {
2238 f2fs_stop_checkpoint(sbi, false);
2239 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2240 trace_f2fs_shutdown(sbi, in, ret);
2247 case F2FS_GOING_DOWN_FULLSYNC:
2248 sb = freeze_bdev(sb->s_bdev);
2254 f2fs_stop_checkpoint(sbi, false);
2255 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2256 thaw_bdev(sb->s_bdev, sb);
2259 case F2FS_GOING_DOWN_METASYNC:
2260 /* do checkpoint only */
2261 ret = f2fs_sync_fs(sb, 1);
2264 f2fs_stop_checkpoint(sbi, false);
2265 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2267 case F2FS_GOING_DOWN_NOSYNC:
2268 f2fs_stop_checkpoint(sbi, false);
2269 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2271 case F2FS_GOING_DOWN_METAFLUSH:
2272 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2273 f2fs_stop_checkpoint(sbi, false);
2274 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2276 case F2FS_GOING_DOWN_NEED_FSCK:
2277 set_sbi_flag(sbi, SBI_NEED_FSCK);
2278 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2279 set_sbi_flag(sbi, SBI_IS_DIRTY);
2280 /* do checkpoint only */
2281 ret = f2fs_sync_fs(sb, 1);
2288 f2fs_stop_gc_thread(sbi);
2289 f2fs_stop_discard_thread(sbi);
2291 f2fs_drop_discard_cmd(sbi);
2292 clear_opt(sbi, DISCARD);
2294 f2fs_update_time(sbi, REQ_TIME);
2296 if (in != F2FS_GOING_DOWN_FULLSYNC)
2297 mnt_drop_write_file(filp);
2299 trace_f2fs_shutdown(sbi, in, ret);
2304 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2306 struct inode *inode = file_inode(filp);
2307 struct super_block *sb = inode->i_sb;
2308 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2309 struct fstrim_range range;
2312 if (!capable(CAP_SYS_ADMIN))
2315 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2318 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2322 ret = mnt_want_write_file(filp);
2326 range.minlen = max((unsigned int)range.minlen,
2327 q->limits.discard_granularity);
2328 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2329 mnt_drop_write_file(filp);
2333 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2336 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2340 static bool uuid_is_nonzero(__u8 u[16])
2344 for (i = 0; i < 16; i++)
2350 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2352 struct inode *inode = file_inode(filp);
2354 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2357 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2359 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2362 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2364 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2366 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2369 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2371 struct inode *inode = file_inode(filp);
2372 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2375 if (!f2fs_sb_has_encrypt(sbi))
2378 err = mnt_want_write_file(filp);
2382 down_write(&sbi->sb_lock);
2384 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2387 /* update superblock with uuid */
2388 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2390 err = f2fs_commit_super(sbi, false);
2393 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2397 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2401 up_write(&sbi->sb_lock);
2402 mnt_drop_write_file(filp);
2406 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2409 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2412 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2415 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2417 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2420 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2423 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2425 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2428 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2431 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2434 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2437 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2440 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2443 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2446 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2449 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2451 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2454 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2457 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2459 struct inode *inode = file_inode(filp);
2460 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2464 if (!capable(CAP_SYS_ADMIN))
2467 if (get_user(sync, (__u32 __user *)arg))
2470 if (f2fs_readonly(sbi->sb))
2473 ret = mnt_want_write_file(filp);
2478 if (!down_write_trylock(&sbi->gc_lock)) {
2483 down_write(&sbi->gc_lock);
2486 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2488 mnt_drop_write_file(filp);
2492 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2494 struct inode *inode = file_inode(filp);
2495 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2496 struct f2fs_gc_range range;
2500 if (!capable(CAP_SYS_ADMIN))
2503 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2507 if (f2fs_readonly(sbi->sb))
2510 end = range.start + range.len;
2511 if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
2512 end >= MAX_BLKADDR(sbi))
2515 ret = mnt_want_write_file(filp);
2521 if (!down_write_trylock(&sbi->gc_lock)) {
2526 down_write(&sbi->gc_lock);
2529 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2530 range.start += BLKS_PER_SEC(sbi);
2531 if (range.start <= end)
2534 mnt_drop_write_file(filp);
2538 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2540 struct inode *inode = file_inode(filp);
2541 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2544 if (!capable(CAP_SYS_ADMIN))
2547 if (f2fs_readonly(sbi->sb))
2550 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2551 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2555 ret = mnt_want_write_file(filp);
2559 ret = f2fs_sync_fs(sbi->sb, 1);
2561 mnt_drop_write_file(filp);
2565 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2567 struct f2fs_defragment *range)
2569 struct inode *inode = file_inode(filp);
2570 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2571 .m_seg_type = NO_CHECK_TYPE ,
2572 .m_may_create = false };
2573 struct extent_info ei = {0, 0, 0};
2574 pgoff_t pg_start, pg_end, next_pgofs;
2575 unsigned int blk_per_seg = sbi->blocks_per_seg;
2576 unsigned int total = 0, sec_num;
2577 block_t blk_end = 0;
2578 bool fragmented = false;
2581 /* if in-place-update policy is enabled, don't waste time here */
2582 if (f2fs_should_update_inplace(inode, NULL))
2585 pg_start = range->start >> PAGE_SHIFT;
2586 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2588 f2fs_balance_fs(sbi, true);
2592 /* writeback all dirty pages in the range */
2593 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2594 range->start + range->len - 1);
2599 * lookup mapping info in extent cache, skip defragmenting if physical
2600 * block addresses are continuous.
2602 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2603 if (ei.fofs + ei.len >= pg_end)
2607 map.m_lblk = pg_start;
2608 map.m_next_pgofs = &next_pgofs;
2611 * lookup mapping info in dnode page cache, skip defragmenting if all
2612 * physical block addresses are continuous even if there are hole(s)
2613 * in logical blocks.
2615 while (map.m_lblk < pg_end) {
2616 map.m_len = pg_end - map.m_lblk;
2617 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2621 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2622 map.m_lblk = next_pgofs;
2626 if (blk_end && blk_end != map.m_pblk)
2629 /* record total count of block that we're going to move */
2632 blk_end = map.m_pblk + map.m_len;
2634 map.m_lblk += map.m_len;
2642 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2645 * make sure there are enough free section for LFS allocation, this can
2646 * avoid defragment running in SSR mode when free section are allocated
2649 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2654 map.m_lblk = pg_start;
2655 map.m_len = pg_end - pg_start;
2658 while (map.m_lblk < pg_end) {
2663 map.m_len = pg_end - map.m_lblk;
2664 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2668 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2669 map.m_lblk = next_pgofs;
2673 set_inode_flag(inode, FI_DO_DEFRAG);
2676 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2679 page = f2fs_get_lock_data_page(inode, idx, true);
2681 err = PTR_ERR(page);
2685 set_page_dirty(page);
2686 f2fs_put_page(page, 1);
2695 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2698 clear_inode_flag(inode, FI_DO_DEFRAG);
2700 err = filemap_fdatawrite(inode->i_mapping);
2705 clear_inode_flag(inode, FI_DO_DEFRAG);
2707 inode_unlock(inode);
2709 range->len = (u64)total << PAGE_SHIFT;
2713 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2715 struct inode *inode = file_inode(filp);
2716 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2717 struct f2fs_defragment range;
2720 if (!capable(CAP_SYS_ADMIN))
2723 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2726 if (f2fs_readonly(sbi->sb))
2729 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2733 /* verify alignment of offset & size */
2734 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2737 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2738 sbi->max_file_blocks))
2741 err = mnt_want_write_file(filp);
2745 err = f2fs_defragment_range(sbi, filp, &range);
2746 mnt_drop_write_file(filp);
2748 f2fs_update_time(sbi, REQ_TIME);
2752 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2759 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2760 struct file *file_out, loff_t pos_out, size_t len)
2762 struct inode *src = file_inode(file_in);
2763 struct inode *dst = file_inode(file_out);
2764 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2765 size_t olen = len, dst_max_i_size = 0;
2769 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2770 src->i_sb != dst->i_sb)
2773 if (unlikely(f2fs_readonly(src->i_sb)))
2776 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2779 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2783 if (pos_in == pos_out)
2785 if (pos_out > pos_in && pos_out < pos_in + len)
2792 if (!inode_trylock(dst))
2797 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2800 olen = len = src->i_size - pos_in;
2801 if (pos_in + len == src->i_size)
2802 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2808 dst_osize = dst->i_size;
2809 if (pos_out + olen > dst->i_size)
2810 dst_max_i_size = pos_out + olen;
2812 /* verify the end result is block aligned */
2813 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2814 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2815 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2818 ret = f2fs_convert_inline_inode(src);
2822 ret = f2fs_convert_inline_inode(dst);
2826 /* write out all dirty pages from offset */
2827 ret = filemap_write_and_wait_range(src->i_mapping,
2828 pos_in, pos_in + len);
2832 ret = filemap_write_and_wait_range(dst->i_mapping,
2833 pos_out, pos_out + len);
2837 f2fs_balance_fs(sbi, true);
2839 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2842 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2847 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2848 pos_out >> F2FS_BLKSIZE_BITS,
2849 len >> F2FS_BLKSIZE_BITS, false);
2853 f2fs_i_size_write(dst, dst_max_i_size);
2854 else if (dst_osize != dst->i_size)
2855 f2fs_i_size_write(dst, dst_osize);
2857 f2fs_unlock_op(sbi);
2860 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2862 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2871 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2873 struct f2fs_move_range range;
2877 if (!(filp->f_mode & FMODE_READ) ||
2878 !(filp->f_mode & FMODE_WRITE))
2881 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2885 dst = fdget(range.dst_fd);
2889 if (!(dst.file->f_mode & FMODE_WRITE)) {
2894 err = mnt_want_write_file(filp);
2898 err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2899 range.pos_out, range.len);
2901 mnt_drop_write_file(filp);
2905 if (copy_to_user((struct f2fs_move_range __user *)arg,
2906 &range, sizeof(range)))
2913 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2915 struct inode *inode = file_inode(filp);
2916 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2917 struct sit_info *sm = SIT_I(sbi);
2918 unsigned int start_segno = 0, end_segno = 0;
2919 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2920 struct f2fs_flush_device range;
2923 if (!capable(CAP_SYS_ADMIN))
2926 if (f2fs_readonly(sbi->sb))
2929 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2932 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2936 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2937 __is_large_section(sbi)) {
2938 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2939 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2943 ret = mnt_want_write_file(filp);
2947 if (range.dev_num != 0)
2948 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2949 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2951 start_segno = sm->last_victim[FLUSH_DEVICE];
2952 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2953 start_segno = dev_start_segno;
2954 end_segno = min(start_segno + range.segments, dev_end_segno);
2956 while (start_segno < end_segno) {
2957 if (!down_write_trylock(&sbi->gc_lock)) {
2961 sm->last_victim[GC_CB] = end_segno + 1;
2962 sm->last_victim[GC_GREEDY] = end_segno + 1;
2963 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2964 ret = f2fs_gc(sbi, true, true, start_segno);
2972 mnt_drop_write_file(filp);
2976 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2978 struct inode *inode = file_inode(filp);
2979 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2981 /* Must validate to set it with SQLite behavior in Android. */
2982 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2984 return put_user(sb_feature, (u32 __user *)arg);
2988 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2990 struct dquot *transfer_to[MAXQUOTAS] = {};
2991 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2992 struct super_block *sb = sbi->sb;
2995 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2996 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2997 err = __dquot_transfer(inode, transfer_to);
2999 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3000 dqput(transfer_to[PRJQUOTA]);
3005 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3007 struct inode *inode = file_inode(filp);
3008 struct f2fs_inode_info *fi = F2FS_I(inode);
3009 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3014 if (!f2fs_sb_has_project_quota(sbi)) {
3015 if (projid != F2FS_DEF_PROJID)
3021 if (!f2fs_has_extra_attr(inode))
3024 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3026 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3030 /* Is it quota file? Do not allow user to mess with it */
3031 if (IS_NOQUOTA(inode))
3034 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3036 return PTR_ERR(ipage);
3038 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3041 f2fs_put_page(ipage, 1);
3044 f2fs_put_page(ipage, 1);
3046 err = dquot_initialize(inode);
3051 err = f2fs_transfer_project_quota(inode, kprojid);
3055 F2FS_I(inode)->i_projid = kprojid;
3056 inode->i_ctime = current_time(inode);
3057 f2fs_mark_inode_dirty_sync(inode, true);
3059 f2fs_unlock_op(sbi);
3063 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3068 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3070 if (projid != F2FS_DEF_PROJID)
3076 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3079 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3080 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3081 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3084 static const struct {
3087 } f2fs_xflags_map[] = {
3088 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3089 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3090 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3091 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3092 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3093 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3096 #define F2FS_SUPPORTED_XFLAGS ( \
3098 FS_XFLAG_IMMUTABLE | \
3101 FS_XFLAG_NOATIME | \
3102 FS_XFLAG_PROJINHERIT)
3104 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3105 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3110 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3111 if (iflags & f2fs_xflags_map[i].iflag)
3112 xflags |= f2fs_xflags_map[i].xflag;
3117 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3118 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3123 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3124 if (xflags & f2fs_xflags_map[i].xflag)
3125 iflags |= f2fs_xflags_map[i].iflag;
3130 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3132 struct f2fs_inode_info *fi = F2FS_I(inode);
3134 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3136 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3137 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3140 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3142 struct inode *inode = file_inode(filp);
3145 f2fs_fill_fsxattr(inode, &fa);
3147 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3152 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3154 struct inode *inode = file_inode(filp);
3155 struct fsxattr fa, old_fa;
3159 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3162 /* Make sure caller has proper permission */
3163 if (!inode_owner_or_capable(inode))
3166 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3169 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3170 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3173 err = mnt_want_write_file(filp);
3179 f2fs_fill_fsxattr(inode, &old_fa);
3180 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3184 err = f2fs_setflags_common(inode, iflags,
3185 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3189 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3191 inode_unlock(inode);
3192 mnt_drop_write_file(filp);
3196 int f2fs_pin_file_control(struct inode *inode, bool inc)
3198 struct f2fs_inode_info *fi = F2FS_I(inode);
3199 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3201 /* Use i_gc_failures for normal file as a risk signal. */
3203 f2fs_i_gc_failures_write(inode,
3204 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3206 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3207 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3208 __func__, inode->i_ino,
3209 fi->i_gc_failures[GC_FAILURE_PIN]);
3210 clear_inode_flag(inode, FI_PIN_FILE);
3216 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3218 struct inode *inode = file_inode(filp);
3222 if (get_user(pin, (__u32 __user *)arg))
3225 if (!S_ISREG(inode->i_mode))
3228 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3231 ret = mnt_want_write_file(filp);
3237 if (f2fs_should_update_outplace(inode, NULL)) {
3243 clear_inode_flag(inode, FI_PIN_FILE);
3244 f2fs_i_gc_failures_write(inode, 0);
3248 if (f2fs_pin_file_control(inode, false)) {
3253 ret = f2fs_convert_inline_inode(inode);
3257 if (f2fs_disable_compressed_file(inode)) {
3262 set_inode_flag(inode, FI_PIN_FILE);
3263 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3265 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3267 inode_unlock(inode);
3268 mnt_drop_write_file(filp);
3272 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3274 struct inode *inode = file_inode(filp);
3277 if (is_inode_flag_set(inode, FI_PIN_FILE))
3278 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3279 return put_user(pin, (u32 __user *)arg);
3282 int f2fs_precache_extents(struct inode *inode)
3284 struct f2fs_inode_info *fi = F2FS_I(inode);
3285 struct f2fs_map_blocks map;
3286 pgoff_t m_next_extent;
3290 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3294 map.m_next_pgofs = NULL;
3295 map.m_next_extent = &m_next_extent;
3296 map.m_seg_type = NO_CHECK_TYPE;
3297 map.m_may_create = false;
3298 end = F2FS_I_SB(inode)->max_file_blocks;
3300 while (map.m_lblk < end) {
3301 map.m_len = end - map.m_lblk;
3303 down_write(&fi->i_gc_rwsem[WRITE]);
3304 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3305 up_write(&fi->i_gc_rwsem[WRITE]);
3309 map.m_lblk = m_next_extent;
3315 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3317 return f2fs_precache_extents(file_inode(filp));
3320 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3322 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3325 if (!capable(CAP_SYS_ADMIN))
3328 if (f2fs_readonly(sbi->sb))
3331 if (copy_from_user(&block_count, (void __user *)arg,
3332 sizeof(block_count)))
3335 return f2fs_resize_fs(sbi, block_count);
3338 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3340 struct inode *inode = file_inode(filp);
3342 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3344 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3345 f2fs_warn(F2FS_I_SB(inode),
3346 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3351 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3354 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3356 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3359 return fsverity_ioctl_measure(filp, (void __user *)arg);
3362 static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
3364 struct inode *inode = file_inode(filp);
3365 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3370 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3374 down_read(&sbi->sb_lock);
3375 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3376 ARRAY_SIZE(sbi->raw_super->volume_name),
3377 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3378 up_read(&sbi->sb_lock);
3380 if (copy_to_user((char __user *)arg, vbuf,
3381 min(FSLABEL_MAX, count)))
3388 static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
3390 struct inode *inode = file_inode(filp);
3391 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3395 if (!capable(CAP_SYS_ADMIN))
3398 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3400 return PTR_ERR(vbuf);
3402 err = mnt_want_write_file(filp);
3406 down_write(&sbi->sb_lock);
3408 memset(sbi->raw_super->volume_name, 0,
3409 sizeof(sbi->raw_super->volume_name));
3410 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3411 sbi->raw_super->volume_name,
3412 ARRAY_SIZE(sbi->raw_super->volume_name));
3414 err = f2fs_commit_super(sbi, false);
3416 up_write(&sbi->sb_lock);
3418 mnt_drop_write_file(filp);
3424 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3426 struct inode *inode = file_inode(filp);
3429 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3432 if (!f2fs_compressed_file(inode))
3435 blocks = F2FS_I(inode)->i_compr_blocks;
3436 return put_user(blocks, (u64 __user *)arg);
3439 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3441 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3442 unsigned int released_blocks = 0;
3443 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3447 for (i = 0; i < count; i++) {
3448 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3449 dn->ofs_in_node + i);
3451 if (!__is_valid_data_blkaddr(blkaddr))
3453 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3454 DATA_GENERIC_ENHANCE)))
3455 return -EFSCORRUPTED;
3459 int compr_blocks = 0;
3461 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3462 blkaddr = f2fs_data_blkaddr(dn);
3465 if (blkaddr == COMPRESS_ADDR)
3467 dn->ofs_in_node += cluster_size;
3471 if (__is_valid_data_blkaddr(blkaddr))
3474 if (blkaddr != NEW_ADDR)
3477 dn->data_blkaddr = NULL_ADDR;
3478 f2fs_set_data_blkaddr(dn);
3481 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3482 dec_valid_block_count(sbi, dn->inode,
3483 cluster_size - compr_blocks);
3485 released_blocks += cluster_size - compr_blocks;
3487 count -= cluster_size;
3490 return released_blocks;
3493 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3495 struct inode *inode = file_inode(filp);
3496 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3497 pgoff_t page_idx = 0, last_idx;
3498 unsigned int released_blocks = 0;
3502 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3505 if (!f2fs_compressed_file(inode))
3508 if (f2fs_readonly(sbi->sb))
3511 ret = mnt_want_write_file(filp);
3515 f2fs_balance_fs(F2FS_I_SB(inode), true);
3519 writecount = atomic_read(&inode->i_writecount);
3520 if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) {
3525 if (IS_IMMUTABLE(inode)) {
3530 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3534 if (!F2FS_I(inode)->i_compr_blocks)
3537 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3538 f2fs_set_inode_flags(inode);
3539 inode->i_ctime = current_time(inode);
3540 f2fs_mark_inode_dirty_sync(inode, true);
3542 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3543 down_write(&F2FS_I(inode)->i_mmap_sem);
3545 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3547 while (page_idx < last_idx) {
3548 struct dnode_of_data dn;
3549 pgoff_t end_offset, count;
3551 set_new_dnode(&dn, inode, NULL, NULL, 0);
3552 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3554 if (ret == -ENOENT) {
3555 page_idx = f2fs_get_next_page_offset(&dn,
3563 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3564 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3565 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3567 ret = release_compress_blocks(&dn, count);
3569 f2fs_put_dnode(&dn);
3575 released_blocks += ret;
3578 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3579 up_write(&F2FS_I(inode)->i_mmap_sem);
3581 inode_unlock(inode);
3583 mnt_drop_write_file(filp);
3586 ret = put_user(released_blocks, (u64 __user *)arg);
3587 } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
3588 set_sbi_flag(sbi, SBI_NEED_FSCK);
3589 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3590 "iblocks=%llu, released=%u, compr_blocks=%llu, "
3592 __func__, inode->i_ino, inode->i_blocks,
3594 F2FS_I(inode)->i_compr_blocks);
3600 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3602 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3603 unsigned int reserved_blocks = 0;
3604 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3608 for (i = 0; i < count; i++) {
3609 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3610 dn->ofs_in_node + i);
3612 if (!__is_valid_data_blkaddr(blkaddr))
3614 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3615 DATA_GENERIC_ENHANCE)))
3616 return -EFSCORRUPTED;
3620 int compr_blocks = 0;
3624 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3625 blkaddr = f2fs_data_blkaddr(dn);
3628 if (blkaddr == COMPRESS_ADDR)
3630 dn->ofs_in_node += cluster_size;
3634 if (__is_valid_data_blkaddr(blkaddr)) {
3639 dn->data_blkaddr = NEW_ADDR;
3640 f2fs_set_data_blkaddr(dn);
3643 reserved = cluster_size - compr_blocks;
3644 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3648 if (reserved != cluster_size - compr_blocks)
3651 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3653 reserved_blocks += reserved;
3655 count -= cluster_size;
3658 return reserved_blocks;
3661 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3663 struct inode *inode = file_inode(filp);
3664 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3665 pgoff_t page_idx = 0, last_idx;
3666 unsigned int reserved_blocks = 0;
3669 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3672 if (!f2fs_compressed_file(inode))
3675 if (f2fs_readonly(sbi->sb))
3678 ret = mnt_want_write_file(filp);
3682 if (F2FS_I(inode)->i_compr_blocks)
3685 f2fs_balance_fs(F2FS_I_SB(inode), true);
3689 if (!IS_IMMUTABLE(inode)) {
3694 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3695 down_write(&F2FS_I(inode)->i_mmap_sem);
3697 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3699 while (page_idx < last_idx) {
3700 struct dnode_of_data dn;
3701 pgoff_t end_offset, count;
3703 set_new_dnode(&dn, inode, NULL, NULL, 0);
3704 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3706 if (ret == -ENOENT) {
3707 page_idx = f2fs_get_next_page_offset(&dn,
3715 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3716 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3717 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3719 ret = reserve_compress_blocks(&dn, count);
3721 f2fs_put_dnode(&dn);
3727 reserved_blocks += ret;
3730 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3731 up_write(&F2FS_I(inode)->i_mmap_sem);
3734 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3735 f2fs_set_inode_flags(inode);
3736 inode->i_ctime = current_time(inode);
3737 f2fs_mark_inode_dirty_sync(inode, true);
3740 inode_unlock(inode);
3742 mnt_drop_write_file(filp);
3745 ret = put_user(reserved_blocks, (u64 __user *)arg);
3746 } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
3747 set_sbi_flag(sbi, SBI_NEED_FSCK);
3748 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3749 "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
3751 __func__, inode->i_ino, inode->i_blocks,
3753 F2FS_I(inode)->i_compr_blocks);
3759 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3761 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
3763 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
3767 case F2FS_IOC_GETFLAGS:
3768 return f2fs_ioc_getflags(filp, arg);
3769 case F2FS_IOC_SETFLAGS:
3770 return f2fs_ioc_setflags(filp, arg);
3771 case F2FS_IOC_GETVERSION:
3772 return f2fs_ioc_getversion(filp, arg);
3773 case F2FS_IOC_START_ATOMIC_WRITE:
3774 return f2fs_ioc_start_atomic_write(filp);
3775 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3776 return f2fs_ioc_commit_atomic_write(filp);
3777 case F2FS_IOC_START_VOLATILE_WRITE:
3778 return f2fs_ioc_start_volatile_write(filp);
3779 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3780 return f2fs_ioc_release_volatile_write(filp);
3781 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3782 return f2fs_ioc_abort_volatile_write(filp);
3783 case F2FS_IOC_SHUTDOWN:
3784 return f2fs_ioc_shutdown(filp, arg);
3786 return f2fs_ioc_fitrim(filp, arg);
3787 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3788 return f2fs_ioc_set_encryption_policy(filp, arg);
3789 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3790 return f2fs_ioc_get_encryption_policy(filp, arg);
3791 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3792 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3793 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
3794 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
3795 case FS_IOC_ADD_ENCRYPTION_KEY:
3796 return f2fs_ioc_add_encryption_key(filp, arg);
3797 case FS_IOC_REMOVE_ENCRYPTION_KEY:
3798 return f2fs_ioc_remove_encryption_key(filp, arg);
3799 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
3800 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
3801 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
3802 return f2fs_ioc_get_encryption_key_status(filp, arg);
3803 case FS_IOC_GET_ENCRYPTION_NONCE:
3804 return f2fs_ioc_get_encryption_nonce(filp, arg);
3805 case F2FS_IOC_GARBAGE_COLLECT:
3806 return f2fs_ioc_gc(filp, arg);
3807 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3808 return f2fs_ioc_gc_range(filp, arg);
3809 case F2FS_IOC_WRITE_CHECKPOINT:
3810 return f2fs_ioc_write_checkpoint(filp, arg);
3811 case F2FS_IOC_DEFRAGMENT:
3812 return f2fs_ioc_defragment(filp, arg);
3813 case F2FS_IOC_MOVE_RANGE:
3814 return f2fs_ioc_move_range(filp, arg);
3815 case F2FS_IOC_FLUSH_DEVICE:
3816 return f2fs_ioc_flush_device(filp, arg);
3817 case F2FS_IOC_GET_FEATURES:
3818 return f2fs_ioc_get_features(filp, arg);
3819 case F2FS_IOC_FSGETXATTR:
3820 return f2fs_ioc_fsgetxattr(filp, arg);
3821 case F2FS_IOC_FSSETXATTR:
3822 return f2fs_ioc_fssetxattr(filp, arg);
3823 case F2FS_IOC_GET_PIN_FILE:
3824 return f2fs_ioc_get_pin_file(filp, arg);
3825 case F2FS_IOC_SET_PIN_FILE:
3826 return f2fs_ioc_set_pin_file(filp, arg);
3827 case F2FS_IOC_PRECACHE_EXTENTS:
3828 return f2fs_ioc_precache_extents(filp, arg);
3829 case F2FS_IOC_RESIZE_FS:
3830 return f2fs_ioc_resize_fs(filp, arg);
3831 case FS_IOC_ENABLE_VERITY:
3832 return f2fs_ioc_enable_verity(filp, arg);
3833 case FS_IOC_MEASURE_VERITY:
3834 return f2fs_ioc_measure_verity(filp, arg);
3835 case F2FS_IOC_GET_VOLUME_NAME:
3836 return f2fs_get_volume_name(filp, arg);
3837 case F2FS_IOC_SET_VOLUME_NAME:
3838 return f2fs_set_volume_name(filp, arg);
3839 case F2FS_IOC_GET_COMPRESS_BLOCKS:
3840 return f2fs_get_compress_blocks(filp, arg);
3841 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
3842 return f2fs_release_compress_blocks(filp, arg);
3843 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
3844 return f2fs_reserve_compress_blocks(filp, arg);
3850 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
3852 struct file *file = iocb->ki_filp;
3853 struct inode *inode = file_inode(file);
3856 if (!f2fs_is_compress_backend_ready(inode))
3859 ret = generic_file_read_iter(iocb, iter);
3862 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
3867 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3869 struct file *file = iocb->ki_filp;
3870 struct inode *inode = file_inode(file);
3873 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
3878 if (!f2fs_is_compress_backend_ready(inode)) {
3883 if (iocb->ki_flags & IOCB_NOWAIT) {
3884 if (!inode_trylock(inode)) {
3892 ret = generic_write_checks(iocb, from);
3894 bool preallocated = false;
3895 size_t target_size = 0;
3898 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
3899 set_inode_flag(inode, FI_NO_PREALLOC);
3901 if ((iocb->ki_flags & IOCB_NOWAIT)) {
3902 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
3903 iov_iter_count(from)) ||
3904 f2fs_has_inline_data(inode) ||
3905 f2fs_force_buffered_io(inode, iocb, from)) {
3906 clear_inode_flag(inode, FI_NO_PREALLOC);
3907 inode_unlock(inode);
3914 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
3917 if (iocb->ki_flags & IOCB_DIRECT) {
3919 * Convert inline data for Direct I/O before entering
3922 err = f2fs_convert_inline_inode(inode);
3926 * If force_buffere_io() is true, we have to allocate
3927 * blocks all the time, since f2fs_direct_IO will fall
3928 * back to buffered IO.
3930 if (!f2fs_force_buffered_io(inode, iocb, from) &&
3931 allow_outplace_dio(inode, iocb, from))
3934 preallocated = true;
3935 target_size = iocb->ki_pos + iov_iter_count(from);
3937 err = f2fs_preallocate_blocks(iocb, from);
3940 clear_inode_flag(inode, FI_NO_PREALLOC);
3941 inode_unlock(inode);
3946 ret = __generic_file_write_iter(iocb, from);
3947 clear_inode_flag(inode, FI_NO_PREALLOC);
3949 /* if we couldn't write data, we should deallocate blocks. */
3950 if (preallocated && i_size_read(inode) < target_size)
3951 f2fs_truncate(inode);
3954 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
3956 inode_unlock(inode);
3958 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
3959 iov_iter_count(from), ret);
3961 ret = generic_write_sync(iocb, ret);
3965 #ifdef CONFIG_COMPAT
3966 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3969 case F2FS_IOC32_GETFLAGS:
3970 cmd = F2FS_IOC_GETFLAGS;
3972 case F2FS_IOC32_SETFLAGS:
3973 cmd = F2FS_IOC_SETFLAGS;
3975 case F2FS_IOC32_GETVERSION:
3976 cmd = F2FS_IOC_GETVERSION;
3978 case F2FS_IOC_START_ATOMIC_WRITE:
3979 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3980 case F2FS_IOC_START_VOLATILE_WRITE:
3981 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3982 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3983 case F2FS_IOC_SHUTDOWN:
3985 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3986 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3987 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3988 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
3989 case FS_IOC_ADD_ENCRYPTION_KEY:
3990 case FS_IOC_REMOVE_ENCRYPTION_KEY:
3991 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
3992 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
3993 case FS_IOC_GET_ENCRYPTION_NONCE:
3994 case F2FS_IOC_GARBAGE_COLLECT:
3995 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3996 case F2FS_IOC_WRITE_CHECKPOINT:
3997 case F2FS_IOC_DEFRAGMENT:
3998 case F2FS_IOC_MOVE_RANGE:
3999 case F2FS_IOC_FLUSH_DEVICE:
4000 case F2FS_IOC_GET_FEATURES:
4001 case F2FS_IOC_FSGETXATTR:
4002 case F2FS_IOC_FSSETXATTR:
4003 case F2FS_IOC_GET_PIN_FILE:
4004 case F2FS_IOC_SET_PIN_FILE:
4005 case F2FS_IOC_PRECACHE_EXTENTS:
4006 case F2FS_IOC_RESIZE_FS:
4007 case FS_IOC_ENABLE_VERITY:
4008 case FS_IOC_MEASURE_VERITY:
4009 case F2FS_IOC_GET_VOLUME_NAME:
4010 case F2FS_IOC_SET_VOLUME_NAME:
4011 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4012 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4013 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4016 return -ENOIOCTLCMD;
4018 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4022 const struct file_operations f2fs_file_operations = {
4023 .llseek = f2fs_llseek,
4024 .read_iter = f2fs_file_read_iter,
4025 .write_iter = f2fs_file_write_iter,
4026 .open = f2fs_file_open,
4027 .release = f2fs_release_file,
4028 .mmap = f2fs_file_mmap,
4029 .flush = f2fs_file_flush,
4030 .fsync = f2fs_sync_file,
4031 .fallocate = f2fs_fallocate,
4032 .unlocked_ioctl = f2fs_ioctl,
4033 #ifdef CONFIG_COMPAT
4034 .compat_ioctl = f2fs_compat_ioctl,
4036 .splice_read = generic_file_splice_read,
4037 .splice_write = iter_file_splice_write,