1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
32 #include <trace/events/f2fs.h>
33 #include <uapi/linux/f2fs.h>
35 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37 struct inode *inode = file_inode(vmf->vma->vm_file);
40 down_read(&F2FS_I(inode)->i_mmap_sem);
41 ret = filemap_fault(vmf);
42 up_read(&F2FS_I(inode)->i_mmap_sem);
45 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
48 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
53 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55 struct page *page = vmf->page;
56 struct inode *inode = file_inode(vmf->vma->vm_file);
57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58 struct dnode_of_data dn;
59 bool need_alloc = true;
62 if (unlikely(IS_IMMUTABLE(inode)))
63 return VM_FAULT_SIGBUS;
65 if (unlikely(f2fs_cp_error(sbi))) {
70 if (!f2fs_is_checkpoint_ready(sbi)) {
75 err = f2fs_convert_inline_inode(inode);
79 #ifdef CONFIG_F2FS_FS_COMPRESSION
80 if (f2fs_compressed_file(inode)) {
81 int ret = f2fs_is_compressed_cluster(inode, page->index);
87 if (ret < F2FS_I(inode)->i_cluster_size) {
95 /* should do out of any locked page */
97 f2fs_balance_fs(sbi, true);
99 sb_start_pagefault(inode->i_sb);
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
103 file_update_time(vmf->vma->vm_file);
104 down_read(&F2FS_I(inode)->i_mmap_sem);
106 if (unlikely(page->mapping != inode->i_mapping ||
107 page_offset(page) > i_size_read(inode) ||
108 !PageUptodate(page))) {
115 /* block allocation */
116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
117 set_new_dnode(&dn, inode, NULL, NULL, 0);
118 err = f2fs_get_block(&dn, page->index);
120 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
123 #ifdef CONFIG_F2FS_FS_COMPRESSION
125 set_new_dnode(&dn, inode, NULL, NULL, 0);
126 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
135 f2fs_wait_on_page_writeback(page, DATA, false, true);
137 /* wait for GCed page writeback via META_MAPPING */
138 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
141 * check to see if the page is mapped already (no holes)
143 if (PageMappedToDisk(page))
146 /* page is wholly or partially inside EOF */
147 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
148 i_size_read(inode)) {
151 offset = i_size_read(inode) & ~PAGE_MASK;
152 zero_user_segment(page, offset, PAGE_SIZE);
154 set_page_dirty(page);
155 if (!PageUptodate(page))
156 SetPageUptodate(page);
158 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
159 f2fs_update_time(sbi, REQ_TIME);
161 trace_f2fs_vm_page_mkwrite(page, DATA);
163 up_read(&F2FS_I(inode)->i_mmap_sem);
165 sb_end_pagefault(inode->i_sb);
167 return block_page_mkwrite_return(err);
170 static const struct vm_operations_struct f2fs_file_vm_ops = {
171 .fault = f2fs_filemap_fault,
172 .map_pages = filemap_map_pages,
173 .page_mkwrite = f2fs_vm_page_mkwrite,
176 static int get_parent_ino(struct inode *inode, nid_t *pino)
178 struct dentry *dentry;
181 * Make sure to get the non-deleted alias. The alias associated with
182 * the open file descriptor being fsync()'ed may be deleted already.
184 dentry = d_find_alias(inode);
188 *pino = parent_ino(dentry);
193 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
195 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
196 enum cp_reason_type cp_reason = CP_NO_NEEDED;
198 if (!S_ISREG(inode->i_mode))
199 cp_reason = CP_NON_REGULAR;
200 else if (f2fs_compressed_file(inode))
201 cp_reason = CP_COMPRESSED;
202 else if (inode->i_nlink != 1)
203 cp_reason = CP_HARDLINK;
204 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
205 cp_reason = CP_SB_NEED_CP;
206 else if (file_wrong_pino(inode))
207 cp_reason = CP_WRONG_PINO;
208 else if (!f2fs_space_for_roll_forward(sbi))
209 cp_reason = CP_NO_SPC_ROLL;
210 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
211 cp_reason = CP_NODE_NEED_CP;
212 else if (test_opt(sbi, FASTBOOT))
213 cp_reason = CP_FASTBOOT_MODE;
214 else if (F2FS_OPTION(sbi).active_logs == 2)
215 cp_reason = CP_SPEC_LOG_NUM;
216 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
217 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
218 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
220 cp_reason = CP_RECOVER_DIR;
225 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
227 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
229 /* But we need to avoid that there are some inode updates */
230 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
236 static void try_to_fix_pino(struct inode *inode)
238 struct f2fs_inode_info *fi = F2FS_I(inode);
241 down_write(&fi->i_sem);
242 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
243 get_parent_ino(inode, &pino)) {
244 f2fs_i_pino_write(inode, pino);
245 file_got_pino(inode);
247 up_write(&fi->i_sem);
250 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
251 int datasync, bool atomic)
253 struct inode *inode = file->f_mapping->host;
254 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
255 nid_t ino = inode->i_ino;
257 enum cp_reason_type cp_reason = 0;
258 struct writeback_control wbc = {
259 .sync_mode = WB_SYNC_ALL,
260 .nr_to_write = LONG_MAX,
263 unsigned int seq_id = 0;
265 if (unlikely(f2fs_readonly(inode->i_sb) ||
266 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
269 trace_f2fs_sync_file_enter(inode);
271 if (S_ISDIR(inode->i_mode))
274 /* if fdatasync is triggered, let's do in-place-update */
275 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
276 set_inode_flag(inode, FI_NEED_IPU);
277 ret = file_write_and_wait_range(file, start, end);
278 clear_inode_flag(inode, FI_NEED_IPU);
281 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
285 /* if the inode is dirty, let's recover all the time */
286 if (!f2fs_skip_inode_update(inode, datasync)) {
287 f2fs_write_inode(inode, NULL);
292 * if there is no written data, don't waste time to write recovery info.
294 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
295 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
297 /* it may call write_inode just prior to fsync */
298 if (need_inode_page_update(sbi, ino))
301 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
302 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
308 * Both of fdatasync() and fsync() are able to be recovered from
311 down_read(&F2FS_I(inode)->i_sem);
312 cp_reason = need_do_checkpoint(inode);
313 up_read(&F2FS_I(inode)->i_sem);
316 /* all the dirty node pages should be flushed for POR */
317 ret = f2fs_sync_fs(inode->i_sb, 1);
320 * We've secured consistency through sync_fs. Following pino
321 * will be used only for fsynced inodes after checkpoint.
323 try_to_fix_pino(inode);
324 clear_inode_flag(inode, FI_APPEND_WRITE);
325 clear_inode_flag(inode, FI_UPDATE_WRITE);
329 atomic_inc(&sbi->wb_sync_req[NODE]);
330 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
331 atomic_dec(&sbi->wb_sync_req[NODE]);
335 /* if cp_error was enabled, we should avoid infinite loop */
336 if (unlikely(f2fs_cp_error(sbi))) {
341 if (f2fs_need_inode_block_update(sbi, ino)) {
342 f2fs_mark_inode_dirty_sync(inode, true);
343 f2fs_write_inode(inode, NULL);
348 * If it's atomic_write, it's just fine to keep write ordering. So
349 * here we don't need to wait for node write completion, since we use
350 * node chain which serializes node blocks. If one of node writes are
351 * reordered, we can see simply broken chain, resulting in stopping
352 * roll-forward recovery. It means we'll recover all or none node blocks
356 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
361 /* once recovery info is written, don't need to tack this */
362 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
363 clear_inode_flag(inode, FI_APPEND_WRITE);
365 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
366 ret = f2fs_issue_flush(sbi, inode->i_ino);
368 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
369 clear_inode_flag(inode, FI_UPDATE_WRITE);
370 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
372 f2fs_update_time(sbi, REQ_TIME);
374 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
378 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
380 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
382 return f2fs_do_sync_file(file, start, end, datasync, false);
385 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
386 pgoff_t index, int whence)
390 if (__is_valid_data_blkaddr(blkaddr))
392 if (blkaddr == NEW_ADDR &&
393 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
397 if (blkaddr == NULL_ADDR)
404 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
406 struct inode *inode = file->f_mapping->host;
407 loff_t maxbytes = inode->i_sb->s_maxbytes;
408 struct dnode_of_data dn;
409 pgoff_t pgofs, end_offset;
410 loff_t data_ofs = offset;
416 isize = i_size_read(inode);
420 /* handle inline data case */
421 if (f2fs_has_inline_data(inode)) {
422 if (whence == SEEK_HOLE) {
425 } else if (whence == SEEK_DATA) {
431 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
433 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
434 set_new_dnode(&dn, inode, NULL, NULL, 0);
435 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
436 if (err && err != -ENOENT) {
438 } else if (err == -ENOENT) {
439 /* direct node does not exists */
440 if (whence == SEEK_DATA) {
441 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
448 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
450 /* find data/hole in dnode block */
451 for (; dn.ofs_in_node < end_offset;
452 dn.ofs_in_node++, pgofs++,
453 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
456 blkaddr = f2fs_data_blkaddr(&dn);
458 if (__is_valid_data_blkaddr(blkaddr) &&
459 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
460 blkaddr, DATA_GENERIC_ENHANCE)) {
465 if (__found_offset(file->f_mapping, blkaddr,
474 if (whence == SEEK_DATA)
477 if (whence == SEEK_HOLE && data_ofs > isize)
480 return vfs_setpos(file, data_ofs, maxbytes);
486 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
488 struct inode *inode = file->f_mapping->host;
489 loff_t maxbytes = inode->i_sb->s_maxbytes;
491 if (f2fs_compressed_file(inode))
492 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
498 return generic_file_llseek_size(file, offset, whence,
499 maxbytes, i_size_read(inode));
504 return f2fs_seek_block(file, offset, whence);
510 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
512 struct inode *inode = file_inode(file);
514 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
517 if (!f2fs_is_compress_backend_ready(inode))
521 vma->vm_ops = &f2fs_file_vm_ops;
522 set_inode_flag(inode, FI_MMAP_FILE);
526 static int f2fs_file_open(struct inode *inode, struct file *filp)
528 int err = fscrypt_file_open(inode, filp);
533 if (!f2fs_is_compress_backend_ready(inode))
536 err = fsverity_file_open(inode, filp);
540 filp->f_mode |= FMODE_NOWAIT;
542 return dquot_file_open(inode, filp);
545 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
547 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
548 struct f2fs_node *raw_node;
549 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
552 bool compressed_cluster = false;
553 int cluster_index = 0, valid_blocks = 0;
554 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
557 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
558 base = get_extra_isize(dn->inode);
560 raw_node = F2FS_NODE(dn->node_page);
561 addr = blkaddr_in_node(raw_node) + base + ofs;
563 /* Assumption: truncateion starts with cluster */
564 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
565 block_t blkaddr = le32_to_cpu(*addr);
567 if (f2fs_compressed_file(dn->inode) &&
568 !(cluster_index & (cluster_size - 1))) {
569 if (compressed_cluster)
570 f2fs_i_compr_blocks_update(dn->inode,
571 valid_blocks, false);
572 compressed_cluster = (blkaddr == COMPRESS_ADDR);
576 if (blkaddr == NULL_ADDR)
579 dn->data_blkaddr = NULL_ADDR;
580 f2fs_set_data_blkaddr(dn);
582 if (__is_valid_data_blkaddr(blkaddr)) {
583 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
584 DATA_GENERIC_ENHANCE))
586 if (compressed_cluster)
590 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
591 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
593 f2fs_invalidate_blocks(sbi, blkaddr);
595 if (!released || blkaddr != COMPRESS_ADDR)
599 if (compressed_cluster)
600 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
605 * once we invalidate valid blkaddr in range [ofs, ofs + count],
606 * we will invalidate all blkaddr in the whole range.
608 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
610 f2fs_update_extent_cache_range(dn, fofs, 0, len);
611 dec_valid_block_count(sbi, dn->inode, nr_free);
613 dn->ofs_in_node = ofs;
615 f2fs_update_time(sbi, REQ_TIME);
616 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
617 dn->ofs_in_node, nr_free);
620 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
622 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
625 static int truncate_partial_data_page(struct inode *inode, u64 from,
628 loff_t offset = from & (PAGE_SIZE - 1);
629 pgoff_t index = from >> PAGE_SHIFT;
630 struct address_space *mapping = inode->i_mapping;
633 if (!offset && !cache_only)
637 page = find_lock_page(mapping, index);
638 if (page && PageUptodate(page))
640 f2fs_put_page(page, 1);
644 page = f2fs_get_lock_data_page(inode, index, true);
646 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
648 f2fs_wait_on_page_writeback(page, DATA, true, true);
649 zero_user(page, offset, PAGE_SIZE - offset);
651 /* An encrypted inode should have a key and truncate the last page. */
652 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
654 set_page_dirty(page);
655 f2fs_put_page(page, 1);
659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
661 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
662 struct dnode_of_data dn;
664 int count = 0, err = 0;
666 bool truncate_page = false;
668 trace_f2fs_truncate_blocks_enter(inode, from);
670 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
672 if (free_from >= max_file_blocks(inode))
678 ipage = f2fs_get_node_page(sbi, inode->i_ino);
680 err = PTR_ERR(ipage);
684 if (f2fs_has_inline_data(inode)) {
685 f2fs_truncate_inline_inode(inode, ipage, from);
686 f2fs_put_page(ipage, 1);
687 truncate_page = true;
691 set_new_dnode(&dn, inode, ipage, NULL, 0);
692 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
699 count = ADDRS_PER_PAGE(dn.node_page, inode);
701 count -= dn.ofs_in_node;
702 f2fs_bug_on(sbi, count < 0);
704 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
705 f2fs_truncate_data_blocks_range(&dn, count);
711 err = f2fs_truncate_inode_blocks(inode, free_from);
716 /* lastly zero out the first data page */
718 err = truncate_partial_data_page(inode, from, truncate_page);
720 trace_f2fs_truncate_blocks_exit(inode, err);
724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
726 u64 free_from = from;
729 #ifdef CONFIG_F2FS_FS_COMPRESSION
731 * for compressed file, only support cluster size
732 * aligned truncation.
734 if (f2fs_compressed_file(inode))
735 free_from = round_up(from,
736 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
739 err = f2fs_do_truncate_blocks(inode, free_from, lock);
743 #ifdef CONFIG_F2FS_FS_COMPRESSION
744 if (from != free_from) {
745 err = f2fs_truncate_partial_cluster(inode, from, lock);
754 int f2fs_truncate(struct inode *inode)
758 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
761 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
762 S_ISLNK(inode->i_mode)))
765 trace_f2fs_truncate(inode);
767 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
768 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
772 err = dquot_initialize(inode);
776 /* we should check inline_data size */
777 if (!f2fs_may_inline_data(inode)) {
778 err = f2fs_convert_inline_inode(inode);
783 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
787 inode->i_mtime = inode->i_ctime = current_time(inode);
788 f2fs_mark_inode_dirty_sync(inode, false);
792 int f2fs_getattr(const struct path *path, struct kstat *stat,
793 u32 request_mask, unsigned int query_flags)
795 struct inode *inode = d_inode(path->dentry);
796 struct f2fs_inode_info *fi = F2FS_I(inode);
797 struct f2fs_inode *ri;
800 if (f2fs_has_extra_attr(inode) &&
801 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
802 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
803 stat->result_mask |= STATX_BTIME;
804 stat->btime.tv_sec = fi->i_crtime.tv_sec;
805 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
809 if (flags & F2FS_COMPR_FL)
810 stat->attributes |= STATX_ATTR_COMPRESSED;
811 if (flags & F2FS_APPEND_FL)
812 stat->attributes |= STATX_ATTR_APPEND;
813 if (IS_ENCRYPTED(inode))
814 stat->attributes |= STATX_ATTR_ENCRYPTED;
815 if (flags & F2FS_IMMUTABLE_FL)
816 stat->attributes |= STATX_ATTR_IMMUTABLE;
817 if (flags & F2FS_NODUMP_FL)
818 stat->attributes |= STATX_ATTR_NODUMP;
819 if (IS_VERITY(inode))
820 stat->attributes |= STATX_ATTR_VERITY;
822 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
824 STATX_ATTR_ENCRYPTED |
825 STATX_ATTR_IMMUTABLE |
829 generic_fillattr(inode, stat);
831 /* we need to show initial sectors used for inline_data/dentries */
832 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
833 f2fs_has_inline_dentry(inode))
834 stat->blocks += (stat->size + 511) >> 9;
839 #ifdef CONFIG_F2FS_FS_POSIX_ACL
840 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
842 unsigned int ia_valid = attr->ia_valid;
844 if (ia_valid & ATTR_UID)
845 inode->i_uid = attr->ia_uid;
846 if (ia_valid & ATTR_GID)
847 inode->i_gid = attr->ia_gid;
848 if (ia_valid & ATTR_ATIME)
849 inode->i_atime = attr->ia_atime;
850 if (ia_valid & ATTR_MTIME)
851 inode->i_mtime = attr->ia_mtime;
852 if (ia_valid & ATTR_CTIME)
853 inode->i_ctime = attr->ia_ctime;
854 if (ia_valid & ATTR_MODE) {
855 umode_t mode = attr->ia_mode;
857 if (!in_group_p(inode->i_gid) &&
858 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
860 set_acl_inode(inode, mode);
864 #define __setattr_copy setattr_copy
867 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
869 struct inode *inode = d_inode(dentry);
872 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
875 if (unlikely(IS_IMMUTABLE(inode)))
878 if (unlikely(IS_APPEND(inode) &&
879 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
880 ATTR_GID | ATTR_TIMES_SET))))
883 if ((attr->ia_valid & ATTR_SIZE) &&
884 !f2fs_is_compress_backend_ready(inode))
887 err = setattr_prepare(dentry, attr);
891 err = fscrypt_prepare_setattr(dentry, attr);
895 err = fsverity_prepare_setattr(dentry, attr);
899 if (is_quota_modification(inode, attr)) {
900 err = dquot_initialize(inode);
904 if ((attr->ia_valid & ATTR_UID &&
905 !uid_eq(attr->ia_uid, inode->i_uid)) ||
906 (attr->ia_valid & ATTR_GID &&
907 !gid_eq(attr->ia_gid, inode->i_gid))) {
908 f2fs_lock_op(F2FS_I_SB(inode));
909 err = dquot_transfer(inode, attr);
911 set_sbi_flag(F2FS_I_SB(inode),
912 SBI_QUOTA_NEED_REPAIR);
913 f2fs_unlock_op(F2FS_I_SB(inode));
917 * update uid/gid under lock_op(), so that dquot and inode can
918 * be updated atomically.
920 if (attr->ia_valid & ATTR_UID)
921 inode->i_uid = attr->ia_uid;
922 if (attr->ia_valid & ATTR_GID)
923 inode->i_gid = attr->ia_gid;
924 f2fs_mark_inode_dirty_sync(inode, true);
925 f2fs_unlock_op(F2FS_I_SB(inode));
928 if (attr->ia_valid & ATTR_SIZE) {
929 loff_t old_size = i_size_read(inode);
931 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
933 * should convert inline inode before i_size_write to
934 * keep smaller than inline_data size with inline flag.
936 err = f2fs_convert_inline_inode(inode);
941 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
942 down_write(&F2FS_I(inode)->i_mmap_sem);
944 truncate_setsize(inode, attr->ia_size);
946 if (attr->ia_size <= old_size)
947 err = f2fs_truncate(inode);
949 * do not trim all blocks after i_size if target size is
950 * larger than i_size.
952 up_write(&F2FS_I(inode)->i_mmap_sem);
953 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
957 spin_lock(&F2FS_I(inode)->i_size_lock);
958 inode->i_mtime = inode->i_ctime = current_time(inode);
959 F2FS_I(inode)->last_disk_size = i_size_read(inode);
960 spin_unlock(&F2FS_I(inode)->i_size_lock);
963 __setattr_copy(inode, attr);
965 if (attr->ia_valid & ATTR_MODE) {
966 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
968 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
970 inode->i_mode = F2FS_I(inode)->i_acl_mode;
971 clear_inode_flag(inode, FI_ACL_MODE);
975 /* file size may changed here */
976 f2fs_mark_inode_dirty_sync(inode, true);
978 /* inode change will produce dirty node pages flushed by checkpoint */
979 f2fs_balance_fs(F2FS_I_SB(inode), true);
984 const struct inode_operations f2fs_file_inode_operations = {
985 .getattr = f2fs_getattr,
986 .setattr = f2fs_setattr,
987 .get_acl = f2fs_get_acl,
988 .set_acl = f2fs_set_acl,
989 .listxattr = f2fs_listxattr,
990 .fiemap = f2fs_fiemap,
993 static int fill_zero(struct inode *inode, pgoff_t index,
994 loff_t start, loff_t len)
996 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1002 f2fs_balance_fs(sbi, true);
1005 page = f2fs_get_new_data_page(inode, NULL, index, false);
1006 f2fs_unlock_op(sbi);
1009 return PTR_ERR(page);
1011 f2fs_wait_on_page_writeback(page, DATA, true, true);
1012 zero_user(page, start, len);
1013 set_page_dirty(page);
1014 f2fs_put_page(page, 1);
1018 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1022 while (pg_start < pg_end) {
1023 struct dnode_of_data dn;
1024 pgoff_t end_offset, count;
1026 set_new_dnode(&dn, inode, NULL, NULL, 0);
1027 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1029 if (err == -ENOENT) {
1030 pg_start = f2fs_get_next_page_offset(&dn,
1037 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1038 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1040 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1042 f2fs_truncate_data_blocks_range(&dn, count);
1043 f2fs_put_dnode(&dn);
1050 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1052 pgoff_t pg_start, pg_end;
1053 loff_t off_start, off_end;
1056 ret = f2fs_convert_inline_inode(inode);
1060 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1061 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1063 off_start = offset & (PAGE_SIZE - 1);
1064 off_end = (offset + len) & (PAGE_SIZE - 1);
1066 if (pg_start == pg_end) {
1067 ret = fill_zero(inode, pg_start, off_start,
1068 off_end - off_start);
1073 ret = fill_zero(inode, pg_start++, off_start,
1074 PAGE_SIZE - off_start);
1079 ret = fill_zero(inode, pg_end, 0, off_end);
1084 if (pg_start < pg_end) {
1085 struct address_space *mapping = inode->i_mapping;
1086 loff_t blk_start, blk_end;
1087 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1089 f2fs_balance_fs(sbi, true);
1091 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1092 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1094 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1095 down_write(&F2FS_I(inode)->i_mmap_sem);
1097 truncate_inode_pages_range(mapping, blk_start,
1101 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1102 f2fs_unlock_op(sbi);
1104 up_write(&F2FS_I(inode)->i_mmap_sem);
1105 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1112 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1113 int *do_replace, pgoff_t off, pgoff_t len)
1115 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1116 struct dnode_of_data dn;
1120 set_new_dnode(&dn, inode, NULL, NULL, 0);
1121 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1122 if (ret && ret != -ENOENT) {
1124 } else if (ret == -ENOENT) {
1125 if (dn.max_level == 0)
1127 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1128 dn.ofs_in_node, len);
1134 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1135 dn.ofs_in_node, len);
1136 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1137 *blkaddr = f2fs_data_blkaddr(&dn);
1139 if (__is_valid_data_blkaddr(*blkaddr) &&
1140 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1141 DATA_GENERIC_ENHANCE)) {
1142 f2fs_put_dnode(&dn);
1143 return -EFSCORRUPTED;
1146 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1148 if (f2fs_lfs_mode(sbi)) {
1149 f2fs_put_dnode(&dn);
1153 /* do not invalidate this block address */
1154 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1158 f2fs_put_dnode(&dn);
1167 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1168 int *do_replace, pgoff_t off, int len)
1170 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1171 struct dnode_of_data dn;
1174 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1175 if (*do_replace == 0)
1178 set_new_dnode(&dn, inode, NULL, NULL, 0);
1179 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1181 dec_valid_block_count(sbi, inode, 1);
1182 f2fs_invalidate_blocks(sbi, *blkaddr);
1184 f2fs_update_data_blkaddr(&dn, *blkaddr);
1186 f2fs_put_dnode(&dn);
1191 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1192 block_t *blkaddr, int *do_replace,
1193 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1195 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1200 if (blkaddr[i] == NULL_ADDR && !full) {
1205 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1206 struct dnode_of_data dn;
1207 struct node_info ni;
1211 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1212 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1216 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1218 f2fs_put_dnode(&dn);
1222 ilen = min((pgoff_t)
1223 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1224 dn.ofs_in_node, len - i);
1226 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1227 f2fs_truncate_data_blocks_range(&dn, 1);
1229 if (do_replace[i]) {
1230 f2fs_i_blocks_write(src_inode,
1232 f2fs_i_blocks_write(dst_inode,
1234 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1235 blkaddr[i], ni.version, true, false);
1241 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1242 if (dst_inode->i_size < new_size)
1243 f2fs_i_size_write(dst_inode, new_size);
1244 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1246 f2fs_put_dnode(&dn);
1248 struct page *psrc, *pdst;
1250 psrc = f2fs_get_lock_data_page(src_inode,
1253 return PTR_ERR(psrc);
1254 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1257 f2fs_put_page(psrc, 1);
1258 return PTR_ERR(pdst);
1260 f2fs_copy_page(psrc, pdst);
1261 set_page_dirty(pdst);
1262 f2fs_put_page(pdst, 1);
1263 f2fs_put_page(psrc, 1);
1265 ret = f2fs_truncate_hole(src_inode,
1266 src + i, src + i + 1);
1275 static int __exchange_data_block(struct inode *src_inode,
1276 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1277 pgoff_t len, bool full)
1279 block_t *src_blkaddr;
1285 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1287 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1288 array_size(olen, sizeof(block_t)),
1293 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1294 array_size(olen, sizeof(int)),
1297 kvfree(src_blkaddr);
1301 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1302 do_replace, src, olen);
1306 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1307 do_replace, src, dst, olen, full);
1315 kvfree(src_blkaddr);
1321 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1322 kvfree(src_blkaddr);
1327 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1329 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1330 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1331 pgoff_t start = offset >> PAGE_SHIFT;
1332 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1335 f2fs_balance_fs(sbi, true);
1337 /* avoid gc operation during block exchange */
1338 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1339 down_write(&F2FS_I(inode)->i_mmap_sem);
1342 f2fs_drop_extent_tree(inode);
1343 truncate_pagecache(inode, offset);
1344 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1345 f2fs_unlock_op(sbi);
1347 up_write(&F2FS_I(inode)->i_mmap_sem);
1348 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1352 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1357 if (offset + len >= i_size_read(inode))
1360 /* collapse range should be aligned to block size of f2fs. */
1361 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1364 ret = f2fs_convert_inline_inode(inode);
1368 /* write out all dirty pages from offset */
1369 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1373 ret = f2fs_do_collapse(inode, offset, len);
1377 /* write out all moved pages, if possible */
1378 down_write(&F2FS_I(inode)->i_mmap_sem);
1379 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1380 truncate_pagecache(inode, offset);
1382 new_size = i_size_read(inode) - len;
1383 ret = f2fs_truncate_blocks(inode, new_size, true);
1384 up_write(&F2FS_I(inode)->i_mmap_sem);
1386 f2fs_i_size_write(inode, new_size);
1390 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1393 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1394 pgoff_t index = start;
1395 unsigned int ofs_in_node = dn->ofs_in_node;
1399 for (; index < end; index++, dn->ofs_in_node++) {
1400 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1404 dn->ofs_in_node = ofs_in_node;
1405 ret = f2fs_reserve_new_blocks(dn, count);
1409 dn->ofs_in_node = ofs_in_node;
1410 for (index = start; index < end; index++, dn->ofs_in_node++) {
1411 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1413 * f2fs_reserve_new_blocks will not guarantee entire block
1416 if (dn->data_blkaddr == NULL_ADDR) {
1420 if (dn->data_blkaddr != NEW_ADDR) {
1421 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1422 dn->data_blkaddr = NEW_ADDR;
1423 f2fs_set_data_blkaddr(dn);
1427 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1432 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1435 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1436 struct address_space *mapping = inode->i_mapping;
1437 pgoff_t index, pg_start, pg_end;
1438 loff_t new_size = i_size_read(inode);
1439 loff_t off_start, off_end;
1442 ret = inode_newsize_ok(inode, (len + offset));
1446 ret = f2fs_convert_inline_inode(inode);
1450 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1454 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1455 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1457 off_start = offset & (PAGE_SIZE - 1);
1458 off_end = (offset + len) & (PAGE_SIZE - 1);
1460 if (pg_start == pg_end) {
1461 ret = fill_zero(inode, pg_start, off_start,
1462 off_end - off_start);
1466 new_size = max_t(loff_t, new_size, offset + len);
1469 ret = fill_zero(inode, pg_start++, off_start,
1470 PAGE_SIZE - off_start);
1474 new_size = max_t(loff_t, new_size,
1475 (loff_t)pg_start << PAGE_SHIFT);
1478 for (index = pg_start; index < pg_end;) {
1479 struct dnode_of_data dn;
1480 unsigned int end_offset;
1483 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1484 down_write(&F2FS_I(inode)->i_mmap_sem);
1486 truncate_pagecache_range(inode,
1487 (loff_t)index << PAGE_SHIFT,
1488 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1492 set_new_dnode(&dn, inode, NULL, NULL, 0);
1493 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1495 f2fs_unlock_op(sbi);
1496 up_write(&F2FS_I(inode)->i_mmap_sem);
1497 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1501 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1502 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1504 ret = f2fs_do_zero_range(&dn, index, end);
1505 f2fs_put_dnode(&dn);
1507 f2fs_unlock_op(sbi);
1508 up_write(&F2FS_I(inode)->i_mmap_sem);
1509 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1511 f2fs_balance_fs(sbi, dn.node_changed);
1517 new_size = max_t(loff_t, new_size,
1518 (loff_t)index << PAGE_SHIFT);
1522 ret = fill_zero(inode, pg_end, 0, off_end);
1526 new_size = max_t(loff_t, new_size, offset + len);
1531 if (new_size > i_size_read(inode)) {
1532 if (mode & FALLOC_FL_KEEP_SIZE)
1533 file_set_keep_isize(inode);
1535 f2fs_i_size_write(inode, new_size);
1540 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1542 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1543 pgoff_t nr, pg_start, pg_end, delta, idx;
1547 new_size = i_size_read(inode) + len;
1548 ret = inode_newsize_ok(inode, new_size);
1552 if (offset >= i_size_read(inode))
1555 /* insert range should be aligned to block size of f2fs. */
1556 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1559 ret = f2fs_convert_inline_inode(inode);
1563 f2fs_balance_fs(sbi, true);
1565 down_write(&F2FS_I(inode)->i_mmap_sem);
1566 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1567 up_write(&F2FS_I(inode)->i_mmap_sem);
1571 /* write out all dirty pages from offset */
1572 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1576 pg_start = offset >> PAGE_SHIFT;
1577 pg_end = (offset + len) >> PAGE_SHIFT;
1578 delta = pg_end - pg_start;
1579 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1581 /* avoid gc operation during block exchange */
1582 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1583 down_write(&F2FS_I(inode)->i_mmap_sem);
1584 truncate_pagecache(inode, offset);
1586 while (!ret && idx > pg_start) {
1587 nr = idx - pg_start;
1593 f2fs_drop_extent_tree(inode);
1595 ret = __exchange_data_block(inode, inode, idx,
1596 idx + delta, nr, false);
1597 f2fs_unlock_op(sbi);
1599 up_write(&F2FS_I(inode)->i_mmap_sem);
1600 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1602 /* write out all moved pages, if possible */
1603 down_write(&F2FS_I(inode)->i_mmap_sem);
1604 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1605 truncate_pagecache(inode, offset);
1606 up_write(&F2FS_I(inode)->i_mmap_sem);
1609 f2fs_i_size_write(inode, new_size);
1613 static int expand_inode_data(struct inode *inode, loff_t offset,
1614 loff_t len, int mode)
1616 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1617 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1618 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1619 .m_may_create = true };
1621 loff_t new_size = i_size_read(inode);
1625 err = inode_newsize_ok(inode, (len + offset));
1629 err = f2fs_convert_inline_inode(inode);
1633 f2fs_balance_fs(sbi, true);
1635 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1636 off_end = (offset + len) & (PAGE_SIZE - 1);
1638 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1639 map.m_len = pg_end - map.m_lblk;
1646 if (f2fs_is_pinned_file(inode)) {
1647 block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1648 sbi->log_blocks_per_seg;
1651 if (map.m_len % sbi->blocks_per_seg)
1652 len += sbi->blocks_per_seg;
1654 map.m_len = sbi->blocks_per_seg;
1656 if (has_not_enough_free_secs(sbi, 0,
1657 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1658 down_write(&sbi->gc_lock);
1659 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1660 if (err && err != -ENODATA && err != -EAGAIN)
1664 down_write(&sbi->pin_sem);
1667 f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
1668 f2fs_unlock_op(sbi);
1670 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1671 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1673 up_write(&sbi->pin_sem);
1677 map.m_lblk += map.m_len;
1683 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1692 last_off = map.m_lblk + map.m_len - 1;
1694 /* update new size to the failed position */
1695 new_size = (last_off == pg_end) ? offset + len :
1696 (loff_t)(last_off + 1) << PAGE_SHIFT;
1698 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1701 if (new_size > i_size_read(inode)) {
1702 if (mode & FALLOC_FL_KEEP_SIZE)
1703 file_set_keep_isize(inode);
1705 f2fs_i_size_write(inode, new_size);
1711 static long f2fs_fallocate(struct file *file, int mode,
1712 loff_t offset, loff_t len)
1714 struct inode *inode = file_inode(file);
1717 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1719 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1721 if (!f2fs_is_compress_backend_ready(inode))
1724 /* f2fs only support ->fallocate for regular file */
1725 if (!S_ISREG(inode->i_mode))
1728 if (IS_ENCRYPTED(inode) &&
1729 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1732 if (f2fs_compressed_file(inode) &&
1733 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1734 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1737 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1738 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1739 FALLOC_FL_INSERT_RANGE))
1744 if (mode & FALLOC_FL_PUNCH_HOLE) {
1745 if (offset >= inode->i_size)
1748 ret = punch_hole(inode, offset, len);
1749 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1750 ret = f2fs_collapse_range(inode, offset, len);
1751 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1752 ret = f2fs_zero_range(inode, offset, len, mode);
1753 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1754 ret = f2fs_insert_range(inode, offset, len);
1756 ret = expand_inode_data(inode, offset, len, mode);
1760 inode->i_mtime = inode->i_ctime = current_time(inode);
1761 f2fs_mark_inode_dirty_sync(inode, false);
1762 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1766 inode_unlock(inode);
1768 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1772 static int f2fs_release_file(struct inode *inode, struct file *filp)
1775 * f2fs_relase_file is called at every close calls. So we should
1776 * not drop any inmemory pages by close called by other process.
1778 if (!(filp->f_mode & FMODE_WRITE) ||
1779 atomic_read(&inode->i_writecount) != 1)
1782 /* some remained atomic pages should discarded */
1783 if (f2fs_is_atomic_file(inode))
1784 f2fs_drop_inmem_pages(inode);
1785 if (f2fs_is_volatile_file(inode)) {
1786 set_inode_flag(inode, FI_DROP_CACHE);
1787 filemap_fdatawrite(inode->i_mapping);
1788 clear_inode_flag(inode, FI_DROP_CACHE);
1789 clear_inode_flag(inode, FI_VOLATILE_FILE);
1790 stat_dec_volatile_write(inode);
1795 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1797 struct inode *inode = file_inode(file);
1800 * If the process doing a transaction is crashed, we should do
1801 * roll-back. Otherwise, other reader/write can see corrupted database
1802 * until all the writers close its file. Since this should be done
1803 * before dropping file lock, it needs to do in ->flush.
1805 if (f2fs_is_atomic_file(inode) &&
1806 F2FS_I(inode)->inmem_task == current)
1807 f2fs_drop_inmem_pages(inode);
1811 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1813 struct f2fs_inode_info *fi = F2FS_I(inode);
1814 u32 masked_flags = fi->i_flags & mask;
1816 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1818 /* Is it quota file? Do not allow user to mess with it */
1819 if (IS_NOQUOTA(inode))
1822 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1823 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1825 if (!f2fs_empty_dir(inode))
1829 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1830 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1832 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1836 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1837 if (masked_flags & F2FS_COMPR_FL) {
1838 if (!f2fs_disable_compressed_file(inode))
1841 if (iflags & F2FS_NOCOMP_FL)
1843 if (iflags & F2FS_COMPR_FL) {
1844 if (!f2fs_may_compress(inode))
1846 if (S_ISREG(inode->i_mode) && inode->i_size)
1849 set_compress_context(inode);
1852 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1853 if (masked_flags & F2FS_COMPR_FL)
1857 fi->i_flags = iflags | (fi->i_flags & ~mask);
1858 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1859 (fi->i_flags & F2FS_NOCOMP_FL));
1861 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1862 set_inode_flag(inode, FI_PROJ_INHERIT);
1864 clear_inode_flag(inode, FI_PROJ_INHERIT);
1866 inode->i_ctime = current_time(inode);
1867 f2fs_set_inode_flags(inode);
1868 f2fs_mark_inode_dirty_sync(inode, true);
1872 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1875 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1876 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1877 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1878 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1881 static const struct {
1884 } f2fs_fsflags_map[] = {
1885 { F2FS_COMPR_FL, FS_COMPR_FL },
1886 { F2FS_SYNC_FL, FS_SYNC_FL },
1887 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1888 { F2FS_APPEND_FL, FS_APPEND_FL },
1889 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1890 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1891 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1892 { F2FS_INDEX_FL, FS_INDEX_FL },
1893 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1894 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1895 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1898 #define F2FS_GETTABLE_FS_FL ( \
1908 FS_PROJINHERIT_FL | \
1910 FS_INLINE_DATA_FL | \
1915 #define F2FS_SETTABLE_FS_FL ( \
1924 FS_PROJINHERIT_FL | \
1927 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1928 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1933 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1934 if (iflags & f2fs_fsflags_map[i].iflag)
1935 fsflags |= f2fs_fsflags_map[i].fsflag;
1940 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1941 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1946 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1947 if (fsflags & f2fs_fsflags_map[i].fsflag)
1948 iflags |= f2fs_fsflags_map[i].iflag;
1953 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1955 struct inode *inode = file_inode(filp);
1956 struct f2fs_inode_info *fi = F2FS_I(inode);
1957 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1959 if (IS_ENCRYPTED(inode))
1960 fsflags |= FS_ENCRYPT_FL;
1961 if (IS_VERITY(inode))
1962 fsflags |= FS_VERITY_FL;
1963 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1964 fsflags |= FS_INLINE_DATA_FL;
1965 if (is_inode_flag_set(inode, FI_PIN_FILE))
1966 fsflags |= FS_NOCOW_FL;
1968 fsflags &= F2FS_GETTABLE_FS_FL;
1970 return put_user(fsflags, (int __user *)arg);
1973 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1975 struct inode *inode = file_inode(filp);
1976 struct f2fs_inode_info *fi = F2FS_I(inode);
1977 u32 fsflags, old_fsflags;
1981 if (!inode_owner_or_capable(inode))
1984 if (get_user(fsflags, (int __user *)arg))
1987 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1989 fsflags &= F2FS_SETTABLE_FS_FL;
1991 iflags = f2fs_fsflags_to_iflags(fsflags);
1992 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1995 ret = mnt_want_write_file(filp);
2001 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2002 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2006 ret = f2fs_setflags_common(inode, iflags,
2007 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2009 inode_unlock(inode);
2010 mnt_drop_write_file(filp);
2014 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2016 struct inode *inode = file_inode(filp);
2018 return put_user(inode->i_generation, (int __user *)arg);
2021 static int f2fs_ioc_start_atomic_write(struct file *filp)
2023 struct inode *inode = file_inode(filp);
2024 struct f2fs_inode_info *fi = F2FS_I(inode);
2025 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2028 if (!inode_owner_or_capable(inode))
2031 if (!S_ISREG(inode->i_mode))
2034 if (filp->f_flags & O_DIRECT)
2037 ret = mnt_want_write_file(filp);
2043 f2fs_disable_compressed_file(inode);
2045 if (f2fs_is_atomic_file(inode)) {
2046 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2051 ret = f2fs_convert_inline_inode(inode);
2055 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2058 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2059 * f2fs_is_atomic_file.
2061 if (get_dirty_pages(inode))
2062 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2063 inode->i_ino, get_dirty_pages(inode));
2064 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2066 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2070 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2071 if (list_empty(&fi->inmem_ilist))
2072 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2073 sbi->atomic_files++;
2074 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2076 /* add inode in inmem_list first and set atomic_file */
2077 set_inode_flag(inode, FI_ATOMIC_FILE);
2078 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2079 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2081 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2082 F2FS_I(inode)->inmem_task = current;
2083 stat_update_max_atomic_write(inode);
2085 inode_unlock(inode);
2086 mnt_drop_write_file(filp);
2090 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2092 struct inode *inode = file_inode(filp);
2095 if (!inode_owner_or_capable(inode))
2098 ret = mnt_want_write_file(filp);
2102 f2fs_balance_fs(F2FS_I_SB(inode), true);
2106 if (f2fs_is_volatile_file(inode)) {
2111 if (f2fs_is_atomic_file(inode)) {
2112 ret = f2fs_commit_inmem_pages(inode);
2116 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2118 f2fs_drop_inmem_pages(inode);
2120 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2123 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2124 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2127 inode_unlock(inode);
2128 mnt_drop_write_file(filp);
2132 static int f2fs_ioc_start_volatile_write(struct file *filp)
2134 struct inode *inode = file_inode(filp);
2137 if (!inode_owner_or_capable(inode))
2140 if (!S_ISREG(inode->i_mode))
2143 ret = mnt_want_write_file(filp);
2149 if (f2fs_is_volatile_file(inode))
2152 ret = f2fs_convert_inline_inode(inode);
2156 stat_inc_volatile_write(inode);
2157 stat_update_max_volatile_write(inode);
2159 set_inode_flag(inode, FI_VOLATILE_FILE);
2160 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2162 inode_unlock(inode);
2163 mnt_drop_write_file(filp);
2167 static int f2fs_ioc_release_volatile_write(struct file *filp)
2169 struct inode *inode = file_inode(filp);
2172 if (!inode_owner_or_capable(inode))
2175 ret = mnt_want_write_file(filp);
2181 if (!f2fs_is_volatile_file(inode))
2184 if (!f2fs_is_first_block_written(inode)) {
2185 ret = truncate_partial_data_page(inode, 0, true);
2189 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2191 inode_unlock(inode);
2192 mnt_drop_write_file(filp);
2196 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2198 struct inode *inode = file_inode(filp);
2201 if (!inode_owner_or_capable(inode))
2204 ret = mnt_want_write_file(filp);
2210 if (f2fs_is_atomic_file(inode))
2211 f2fs_drop_inmem_pages(inode);
2212 if (f2fs_is_volatile_file(inode)) {
2213 clear_inode_flag(inode, FI_VOLATILE_FILE);
2214 stat_dec_volatile_write(inode);
2215 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2218 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2220 inode_unlock(inode);
2222 mnt_drop_write_file(filp);
2223 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2227 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2229 struct inode *inode = file_inode(filp);
2230 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2231 struct super_block *sb = sbi->sb;
2235 if (!capable(CAP_SYS_ADMIN))
2238 if (get_user(in, (__u32 __user *)arg))
2241 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2242 ret = mnt_want_write_file(filp);
2244 if (ret == -EROFS) {
2246 f2fs_stop_checkpoint(sbi, false);
2247 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2248 trace_f2fs_shutdown(sbi, in, ret);
2255 case F2FS_GOING_DOWN_FULLSYNC:
2256 ret = freeze_bdev(sb->s_bdev);
2259 f2fs_stop_checkpoint(sbi, false);
2260 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2261 thaw_bdev(sb->s_bdev);
2263 case F2FS_GOING_DOWN_METASYNC:
2264 /* do checkpoint only */
2265 ret = f2fs_sync_fs(sb, 1);
2268 f2fs_stop_checkpoint(sbi, false);
2269 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2271 case F2FS_GOING_DOWN_NOSYNC:
2272 f2fs_stop_checkpoint(sbi, false);
2273 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2275 case F2FS_GOING_DOWN_METAFLUSH:
2276 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2277 f2fs_stop_checkpoint(sbi, false);
2278 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2280 case F2FS_GOING_DOWN_NEED_FSCK:
2281 set_sbi_flag(sbi, SBI_NEED_FSCK);
2282 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2283 set_sbi_flag(sbi, SBI_IS_DIRTY);
2284 /* do checkpoint only */
2285 ret = f2fs_sync_fs(sb, 1);
2292 f2fs_stop_gc_thread(sbi);
2293 f2fs_stop_discard_thread(sbi);
2295 f2fs_drop_discard_cmd(sbi);
2296 clear_opt(sbi, DISCARD);
2298 f2fs_update_time(sbi, REQ_TIME);
2300 if (in != F2FS_GOING_DOWN_FULLSYNC)
2301 mnt_drop_write_file(filp);
2303 trace_f2fs_shutdown(sbi, in, ret);
2308 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2310 struct inode *inode = file_inode(filp);
2311 struct super_block *sb = inode->i_sb;
2312 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2313 struct fstrim_range range;
2316 if (!capable(CAP_SYS_ADMIN))
2319 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2322 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2326 ret = mnt_want_write_file(filp);
2330 range.minlen = max((unsigned int)range.minlen,
2331 q->limits.discard_granularity);
2332 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2333 mnt_drop_write_file(filp);
2337 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2340 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2344 static bool uuid_is_nonzero(__u8 u[16])
2348 for (i = 0; i < 16; i++)
2354 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2356 struct inode *inode = file_inode(filp);
2358 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2361 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2363 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2366 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2368 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2370 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2373 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2375 struct inode *inode = file_inode(filp);
2376 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2379 if (!f2fs_sb_has_encrypt(sbi))
2382 err = mnt_want_write_file(filp);
2386 down_write(&sbi->sb_lock);
2388 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2391 /* update superblock with uuid */
2392 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2394 err = f2fs_commit_super(sbi, false);
2397 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2401 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2405 up_write(&sbi->sb_lock);
2406 mnt_drop_write_file(filp);
2410 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2413 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2416 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2419 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2421 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2424 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2427 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2429 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2432 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2435 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2438 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2441 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2444 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2447 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2450 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2453 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2455 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2458 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2461 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2463 struct inode *inode = file_inode(filp);
2464 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2468 if (!capable(CAP_SYS_ADMIN))
2471 if (get_user(sync, (__u32 __user *)arg))
2474 if (f2fs_readonly(sbi->sb))
2477 ret = mnt_want_write_file(filp);
2482 if (!down_write_trylock(&sbi->gc_lock)) {
2487 down_write(&sbi->gc_lock);
2490 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2492 mnt_drop_write_file(filp);
2496 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2498 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2502 if (!capable(CAP_SYS_ADMIN))
2504 if (f2fs_readonly(sbi->sb))
2507 end = range->start + range->len;
2508 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2509 end >= MAX_BLKADDR(sbi))
2512 ret = mnt_want_write_file(filp);
2518 if (!down_write_trylock(&sbi->gc_lock)) {
2523 down_write(&sbi->gc_lock);
2526 ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
2532 range->start += BLKS_PER_SEC(sbi);
2533 if (range->start <= end)
2536 mnt_drop_write_file(filp);
2540 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2542 struct f2fs_gc_range range;
2544 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2547 return __f2fs_ioc_gc_range(filp, &range);
2550 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2552 struct inode *inode = file_inode(filp);
2553 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2556 if (!capable(CAP_SYS_ADMIN))
2559 if (f2fs_readonly(sbi->sb))
2562 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2563 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2567 ret = mnt_want_write_file(filp);
2571 ret = f2fs_sync_fs(sbi->sb, 1);
2573 mnt_drop_write_file(filp);
2577 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2579 struct f2fs_defragment *range)
2581 struct inode *inode = file_inode(filp);
2582 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2583 .m_seg_type = NO_CHECK_TYPE ,
2584 .m_may_create = false };
2585 struct extent_info ei = {0, 0, 0};
2586 pgoff_t pg_start, pg_end, next_pgofs;
2587 unsigned int blk_per_seg = sbi->blocks_per_seg;
2588 unsigned int total = 0, sec_num;
2589 block_t blk_end = 0;
2590 bool fragmented = false;
2593 /* if in-place-update policy is enabled, don't waste time here */
2594 if (f2fs_should_update_inplace(inode, NULL))
2597 pg_start = range->start >> PAGE_SHIFT;
2598 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2600 f2fs_balance_fs(sbi, true);
2604 /* writeback all dirty pages in the range */
2605 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2606 range->start + range->len - 1);
2611 * lookup mapping info in extent cache, skip defragmenting if physical
2612 * block addresses are continuous.
2614 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2615 if (ei.fofs + ei.len >= pg_end)
2619 map.m_lblk = pg_start;
2620 map.m_next_pgofs = &next_pgofs;
2623 * lookup mapping info in dnode page cache, skip defragmenting if all
2624 * physical block addresses are continuous even if there are hole(s)
2625 * in logical blocks.
2627 while (map.m_lblk < pg_end) {
2628 map.m_len = pg_end - map.m_lblk;
2629 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2633 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2634 map.m_lblk = next_pgofs;
2638 if (blk_end && blk_end != map.m_pblk)
2641 /* record total count of block that we're going to move */
2644 blk_end = map.m_pblk + map.m_len;
2646 map.m_lblk += map.m_len;
2654 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2657 * make sure there are enough free section for LFS allocation, this can
2658 * avoid defragment running in SSR mode when free section are allocated
2661 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2666 map.m_lblk = pg_start;
2667 map.m_len = pg_end - pg_start;
2670 while (map.m_lblk < pg_end) {
2675 map.m_len = pg_end - map.m_lblk;
2676 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2680 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2681 map.m_lblk = next_pgofs;
2685 set_inode_flag(inode, FI_DO_DEFRAG);
2688 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2691 page = f2fs_get_lock_data_page(inode, idx, true);
2693 err = PTR_ERR(page);
2697 set_page_dirty(page);
2698 f2fs_put_page(page, 1);
2707 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2710 clear_inode_flag(inode, FI_DO_DEFRAG);
2712 err = filemap_fdatawrite(inode->i_mapping);
2717 clear_inode_flag(inode, FI_DO_DEFRAG);
2719 inode_unlock(inode);
2721 range->len = (u64)total << PAGE_SHIFT;
2725 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2727 struct inode *inode = file_inode(filp);
2728 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2729 struct f2fs_defragment range;
2732 if (!capable(CAP_SYS_ADMIN))
2735 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2738 if (f2fs_readonly(sbi->sb))
2741 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2745 /* verify alignment of offset & size */
2746 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2749 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2750 max_file_blocks(inode)))
2753 err = mnt_want_write_file(filp);
2757 err = f2fs_defragment_range(sbi, filp, &range);
2758 mnt_drop_write_file(filp);
2760 f2fs_update_time(sbi, REQ_TIME);
2764 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2771 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2772 struct file *file_out, loff_t pos_out, size_t len)
2774 struct inode *src = file_inode(file_in);
2775 struct inode *dst = file_inode(file_out);
2776 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2777 size_t olen = len, dst_max_i_size = 0;
2781 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2782 src->i_sb != dst->i_sb)
2785 if (unlikely(f2fs_readonly(src->i_sb)))
2788 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2791 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2794 if (pos_out < 0 || pos_in < 0)
2798 if (pos_in == pos_out)
2800 if (pos_out > pos_in && pos_out < pos_in + len)
2807 if (!inode_trylock(dst))
2812 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2815 olen = len = src->i_size - pos_in;
2816 if (pos_in + len == src->i_size)
2817 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2823 dst_osize = dst->i_size;
2824 if (pos_out + olen > dst->i_size)
2825 dst_max_i_size = pos_out + olen;
2827 /* verify the end result is block aligned */
2828 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2829 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2830 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2833 ret = f2fs_convert_inline_inode(src);
2837 ret = f2fs_convert_inline_inode(dst);
2841 /* write out all dirty pages from offset */
2842 ret = filemap_write_and_wait_range(src->i_mapping,
2843 pos_in, pos_in + len);
2847 ret = filemap_write_and_wait_range(dst->i_mapping,
2848 pos_out, pos_out + len);
2852 f2fs_balance_fs(sbi, true);
2854 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2857 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2862 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2863 pos_out >> F2FS_BLKSIZE_BITS,
2864 len >> F2FS_BLKSIZE_BITS, false);
2868 f2fs_i_size_write(dst, dst_max_i_size);
2869 else if (dst_osize != dst->i_size)
2870 f2fs_i_size_write(dst, dst_osize);
2872 f2fs_unlock_op(sbi);
2875 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2877 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2886 static int __f2fs_ioc_move_range(struct file *filp,
2887 struct f2fs_move_range *range)
2892 if (!(filp->f_mode & FMODE_READ) ||
2893 !(filp->f_mode & FMODE_WRITE))
2896 dst = fdget(range->dst_fd);
2900 if (!(dst.file->f_mode & FMODE_WRITE)) {
2905 err = mnt_want_write_file(filp);
2909 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2910 range->pos_out, range->len);
2912 mnt_drop_write_file(filp);
2918 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2920 struct f2fs_move_range range;
2922 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2925 return __f2fs_ioc_move_range(filp, &range);
2928 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2930 struct inode *inode = file_inode(filp);
2931 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2932 struct sit_info *sm = SIT_I(sbi);
2933 unsigned int start_segno = 0, end_segno = 0;
2934 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2935 struct f2fs_flush_device range;
2938 if (!capable(CAP_SYS_ADMIN))
2941 if (f2fs_readonly(sbi->sb))
2944 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2947 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2951 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2952 __is_large_section(sbi)) {
2953 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2954 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2958 ret = mnt_want_write_file(filp);
2962 if (range.dev_num != 0)
2963 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2964 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2966 start_segno = sm->last_victim[FLUSH_DEVICE];
2967 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2968 start_segno = dev_start_segno;
2969 end_segno = min(start_segno + range.segments, dev_end_segno);
2971 while (start_segno < end_segno) {
2972 if (!down_write_trylock(&sbi->gc_lock)) {
2976 sm->last_victim[GC_CB] = end_segno + 1;
2977 sm->last_victim[GC_GREEDY] = end_segno + 1;
2978 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2979 ret = f2fs_gc(sbi, true, true, start_segno);
2987 mnt_drop_write_file(filp);
2991 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2993 struct inode *inode = file_inode(filp);
2994 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2996 /* Must validate to set it with SQLite behavior in Android. */
2997 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2999 return put_user(sb_feature, (u32 __user *)arg);
3003 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3005 struct dquot *transfer_to[MAXQUOTAS] = {};
3006 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3007 struct super_block *sb = sbi->sb;
3010 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3011 if (!IS_ERR(transfer_to[PRJQUOTA])) {
3012 err = __dquot_transfer(inode, transfer_to);
3014 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3015 dqput(transfer_to[PRJQUOTA]);
3020 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3022 struct inode *inode = file_inode(filp);
3023 struct f2fs_inode_info *fi = F2FS_I(inode);
3024 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3029 if (!f2fs_sb_has_project_quota(sbi)) {
3030 if (projid != F2FS_DEF_PROJID)
3036 if (!f2fs_has_extra_attr(inode))
3039 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3041 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3045 /* Is it quota file? Do not allow user to mess with it */
3046 if (IS_NOQUOTA(inode))
3049 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3051 return PTR_ERR(ipage);
3053 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3056 f2fs_put_page(ipage, 1);
3059 f2fs_put_page(ipage, 1);
3061 err = dquot_initialize(inode);
3066 err = f2fs_transfer_project_quota(inode, kprojid);
3070 F2FS_I(inode)->i_projid = kprojid;
3071 inode->i_ctime = current_time(inode);
3072 f2fs_mark_inode_dirty_sync(inode, true);
3074 f2fs_unlock_op(sbi);
3078 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3083 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3085 if (projid != F2FS_DEF_PROJID)
3091 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3094 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3095 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3096 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3099 static const struct {
3102 } f2fs_xflags_map[] = {
3103 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3104 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3105 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3106 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3107 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3108 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3111 #define F2FS_SUPPORTED_XFLAGS ( \
3113 FS_XFLAG_IMMUTABLE | \
3116 FS_XFLAG_NOATIME | \
3117 FS_XFLAG_PROJINHERIT)
3119 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3120 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3125 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3126 if (iflags & f2fs_xflags_map[i].iflag)
3127 xflags |= f2fs_xflags_map[i].xflag;
3132 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3133 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3138 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3139 if (xflags & f2fs_xflags_map[i].xflag)
3140 iflags |= f2fs_xflags_map[i].iflag;
3145 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3147 struct f2fs_inode_info *fi = F2FS_I(inode);
3149 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3151 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3152 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3155 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3157 struct inode *inode = file_inode(filp);
3160 f2fs_fill_fsxattr(inode, &fa);
3162 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3167 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3169 struct inode *inode = file_inode(filp);
3170 struct fsxattr fa, old_fa;
3174 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3177 /* Make sure caller has proper permission */
3178 if (!inode_owner_or_capable(inode))
3181 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3184 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3185 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3188 err = mnt_want_write_file(filp);
3194 f2fs_fill_fsxattr(inode, &old_fa);
3195 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3199 err = f2fs_setflags_common(inode, iflags,
3200 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3204 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3206 inode_unlock(inode);
3207 mnt_drop_write_file(filp);
3211 int f2fs_pin_file_control(struct inode *inode, bool inc)
3213 struct f2fs_inode_info *fi = F2FS_I(inode);
3214 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3216 /* Use i_gc_failures for normal file as a risk signal. */
3218 f2fs_i_gc_failures_write(inode,
3219 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3221 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3222 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3223 __func__, inode->i_ino,
3224 fi->i_gc_failures[GC_FAILURE_PIN]);
3225 clear_inode_flag(inode, FI_PIN_FILE);
3231 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3233 struct inode *inode = file_inode(filp);
3237 if (get_user(pin, (__u32 __user *)arg))
3240 if (!S_ISREG(inode->i_mode))
3243 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3246 ret = mnt_want_write_file(filp);
3252 if (f2fs_should_update_outplace(inode, NULL)) {
3258 clear_inode_flag(inode, FI_PIN_FILE);
3259 f2fs_i_gc_failures_write(inode, 0);
3263 if (f2fs_pin_file_control(inode, false)) {
3268 ret = f2fs_convert_inline_inode(inode);
3272 if (!f2fs_disable_compressed_file(inode)) {
3277 set_inode_flag(inode, FI_PIN_FILE);
3278 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3280 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3282 inode_unlock(inode);
3283 mnt_drop_write_file(filp);
3287 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3289 struct inode *inode = file_inode(filp);
3292 if (is_inode_flag_set(inode, FI_PIN_FILE))
3293 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3294 return put_user(pin, (u32 __user *)arg);
3297 int f2fs_precache_extents(struct inode *inode)
3299 struct f2fs_inode_info *fi = F2FS_I(inode);
3300 struct f2fs_map_blocks map;
3301 pgoff_t m_next_extent;
3305 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3309 map.m_next_pgofs = NULL;
3310 map.m_next_extent = &m_next_extent;
3311 map.m_seg_type = NO_CHECK_TYPE;
3312 map.m_may_create = false;
3313 end = max_file_blocks(inode);
3315 while (map.m_lblk < end) {
3316 map.m_len = end - map.m_lblk;
3318 down_write(&fi->i_gc_rwsem[WRITE]);
3319 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3320 up_write(&fi->i_gc_rwsem[WRITE]);
3324 map.m_lblk = m_next_extent;
3330 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3332 return f2fs_precache_extents(file_inode(filp));
3335 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3337 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3340 if (!capable(CAP_SYS_ADMIN))
3343 if (f2fs_readonly(sbi->sb))
3346 if (copy_from_user(&block_count, (void __user *)arg,
3347 sizeof(block_count)))
3350 return f2fs_resize_fs(sbi, block_count);
3353 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3355 struct inode *inode = file_inode(filp);
3357 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3359 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3360 f2fs_warn(F2FS_I_SB(inode),
3361 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3366 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3369 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3371 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3374 return fsverity_ioctl_measure(filp, (void __user *)arg);
3377 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3379 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3382 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3385 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3387 struct inode *inode = file_inode(filp);
3388 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3393 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3397 down_read(&sbi->sb_lock);
3398 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3399 ARRAY_SIZE(sbi->raw_super->volume_name),
3400 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3401 up_read(&sbi->sb_lock);
3403 if (copy_to_user((char __user *)arg, vbuf,
3404 min(FSLABEL_MAX, count)))
3411 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3413 struct inode *inode = file_inode(filp);
3414 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3418 if (!capable(CAP_SYS_ADMIN))
3421 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3423 return PTR_ERR(vbuf);
3425 err = mnt_want_write_file(filp);
3429 down_write(&sbi->sb_lock);
3431 memset(sbi->raw_super->volume_name, 0,
3432 sizeof(sbi->raw_super->volume_name));
3433 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3434 sbi->raw_super->volume_name,
3435 ARRAY_SIZE(sbi->raw_super->volume_name));
3437 err = f2fs_commit_super(sbi, false);
3439 up_write(&sbi->sb_lock);
3441 mnt_drop_write_file(filp);
3447 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3449 struct inode *inode = file_inode(filp);
3452 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3455 if (!f2fs_compressed_file(inode))
3458 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3459 return put_user(blocks, (u64 __user *)arg);
3462 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3464 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3465 unsigned int released_blocks = 0;
3466 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3470 for (i = 0; i < count; i++) {
3471 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3472 dn->ofs_in_node + i);
3474 if (!__is_valid_data_blkaddr(blkaddr))
3476 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3477 DATA_GENERIC_ENHANCE)))
3478 return -EFSCORRUPTED;
3482 int compr_blocks = 0;
3484 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3485 blkaddr = f2fs_data_blkaddr(dn);
3488 if (blkaddr == COMPRESS_ADDR)
3490 dn->ofs_in_node += cluster_size;
3494 if (__is_valid_data_blkaddr(blkaddr))
3497 if (blkaddr != NEW_ADDR)
3500 dn->data_blkaddr = NULL_ADDR;
3501 f2fs_set_data_blkaddr(dn);
3504 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3505 dec_valid_block_count(sbi, dn->inode,
3506 cluster_size - compr_blocks);
3508 released_blocks += cluster_size - compr_blocks;
3510 count -= cluster_size;
3513 return released_blocks;
3516 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3518 struct inode *inode = file_inode(filp);
3519 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3520 pgoff_t page_idx = 0, last_idx;
3521 unsigned int released_blocks = 0;
3525 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3528 if (!f2fs_compressed_file(inode))
3531 if (f2fs_readonly(sbi->sb))
3534 ret = mnt_want_write_file(filp);
3538 f2fs_balance_fs(F2FS_I_SB(inode), true);
3542 writecount = atomic_read(&inode->i_writecount);
3543 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3544 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3549 if (IS_IMMUTABLE(inode)) {
3554 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3558 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3559 f2fs_set_inode_flags(inode);
3560 inode->i_ctime = current_time(inode);
3561 f2fs_mark_inode_dirty_sync(inode, true);
3563 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3566 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3567 down_write(&F2FS_I(inode)->i_mmap_sem);
3569 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3571 while (page_idx < last_idx) {
3572 struct dnode_of_data dn;
3573 pgoff_t end_offset, count;
3575 set_new_dnode(&dn, inode, NULL, NULL, 0);
3576 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3578 if (ret == -ENOENT) {
3579 page_idx = f2fs_get_next_page_offset(&dn,
3587 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3588 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3589 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3591 ret = release_compress_blocks(&dn, count);
3593 f2fs_put_dnode(&dn);
3599 released_blocks += ret;
3602 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3603 up_write(&F2FS_I(inode)->i_mmap_sem);
3605 inode_unlock(inode);
3607 mnt_drop_write_file(filp);
3610 ret = put_user(released_blocks, (u64 __user *)arg);
3611 } else if (released_blocks &&
3612 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3613 set_sbi_flag(sbi, SBI_NEED_FSCK);
3614 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3615 "iblocks=%llu, released=%u, compr_blocks=%u, "
3617 __func__, inode->i_ino, inode->i_blocks,
3619 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3625 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3627 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3628 unsigned int reserved_blocks = 0;
3629 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3633 for (i = 0; i < count; i++) {
3634 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3635 dn->ofs_in_node + i);
3637 if (!__is_valid_data_blkaddr(blkaddr))
3639 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3640 DATA_GENERIC_ENHANCE)))
3641 return -EFSCORRUPTED;
3645 int compr_blocks = 0;
3649 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3650 blkaddr = f2fs_data_blkaddr(dn);
3653 if (blkaddr == COMPRESS_ADDR)
3655 dn->ofs_in_node += cluster_size;
3659 if (__is_valid_data_blkaddr(blkaddr)) {
3664 dn->data_blkaddr = NEW_ADDR;
3665 f2fs_set_data_blkaddr(dn);
3668 reserved = cluster_size - compr_blocks;
3669 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3673 if (reserved != cluster_size - compr_blocks)
3676 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3678 reserved_blocks += reserved;
3680 count -= cluster_size;
3683 return reserved_blocks;
3686 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3688 struct inode *inode = file_inode(filp);
3689 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3690 pgoff_t page_idx = 0, last_idx;
3691 unsigned int reserved_blocks = 0;
3694 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3697 if (!f2fs_compressed_file(inode))
3700 if (f2fs_readonly(sbi->sb))
3703 ret = mnt_want_write_file(filp);
3707 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3710 f2fs_balance_fs(F2FS_I_SB(inode), true);
3714 if (!IS_IMMUTABLE(inode)) {
3719 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3720 down_write(&F2FS_I(inode)->i_mmap_sem);
3722 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3724 while (page_idx < last_idx) {
3725 struct dnode_of_data dn;
3726 pgoff_t end_offset, count;
3728 set_new_dnode(&dn, inode, NULL, NULL, 0);
3729 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3731 if (ret == -ENOENT) {
3732 page_idx = f2fs_get_next_page_offset(&dn,
3740 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3741 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3742 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3744 ret = reserve_compress_blocks(&dn, count);
3746 f2fs_put_dnode(&dn);
3752 reserved_blocks += ret;
3755 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3756 up_write(&F2FS_I(inode)->i_mmap_sem);
3759 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3760 f2fs_set_inode_flags(inode);
3761 inode->i_ctime = current_time(inode);
3762 f2fs_mark_inode_dirty_sync(inode, true);
3765 inode_unlock(inode);
3767 mnt_drop_write_file(filp);
3770 ret = put_user(reserved_blocks, (u64 __user *)arg);
3771 } else if (reserved_blocks &&
3772 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3773 set_sbi_flag(sbi, SBI_NEED_FSCK);
3774 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3775 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3777 __func__, inode->i_ino, inode->i_blocks,
3779 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3785 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3786 pgoff_t off, block_t block, block_t len, u32 flags)
3788 struct request_queue *q = bdev_get_queue(bdev);
3789 sector_t sector = SECTOR_FROM_BLOCK(block);
3790 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3796 if (flags & F2FS_TRIM_FILE_DISCARD)
3797 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3798 blk_queue_secure_erase(q) ?
3799 BLKDEV_DISCARD_SECURE : 0);
3801 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3802 if (IS_ENCRYPTED(inode))
3803 ret = fscrypt_zeroout_range(inode, off, block, len);
3805 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3812 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3814 struct inode *inode = file_inode(filp);
3815 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3816 struct address_space *mapping = inode->i_mapping;
3817 struct block_device *prev_bdev = NULL;
3818 struct f2fs_sectrim_range range;
3819 pgoff_t index, pg_end, prev_index = 0;
3820 block_t prev_block = 0, len = 0;
3822 bool to_end = false;
3825 if (!(filp->f_mode & FMODE_WRITE))
3828 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3832 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3833 !S_ISREG(inode->i_mode))
3836 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3837 !f2fs_hw_support_discard(sbi)) ||
3838 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3839 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3842 file_start_write(filp);
3845 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3846 range.start >= inode->i_size) {
3854 if (inode->i_size - range.start > range.len) {
3855 end_addr = range.start + range.len;
3857 end_addr = range.len == (u64)-1 ?
3858 sbi->sb->s_maxbytes : inode->i_size;
3862 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3863 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3868 index = F2FS_BYTES_TO_BLK(range.start);
3869 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3871 ret = f2fs_convert_inline_inode(inode);
3875 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3876 down_write(&F2FS_I(inode)->i_mmap_sem);
3878 ret = filemap_write_and_wait_range(mapping, range.start,
3879 to_end ? LLONG_MAX : end_addr - 1);
3883 truncate_inode_pages_range(mapping, range.start,
3884 to_end ? -1 : end_addr - 1);
3886 while (index < pg_end) {
3887 struct dnode_of_data dn;
3888 pgoff_t end_offset, count;
3891 set_new_dnode(&dn, inode, NULL, NULL, 0);
3892 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3894 if (ret == -ENOENT) {
3895 index = f2fs_get_next_page_offset(&dn, index);
3901 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3902 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3903 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3904 struct block_device *cur_bdev;
3905 block_t blkaddr = f2fs_data_blkaddr(&dn);
3907 if (!__is_valid_data_blkaddr(blkaddr))
3910 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3911 DATA_GENERIC_ENHANCE)) {
3912 ret = -EFSCORRUPTED;
3913 f2fs_put_dnode(&dn);
3917 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3918 if (f2fs_is_multi_device(sbi)) {
3919 int di = f2fs_target_device_index(sbi, blkaddr);
3921 blkaddr -= FDEV(di).start_blk;
3925 if (prev_bdev == cur_bdev &&
3926 index == prev_index + len &&
3927 blkaddr == prev_block + len) {
3930 ret = f2fs_secure_erase(prev_bdev,
3931 inode, prev_index, prev_block,
3934 f2fs_put_dnode(&dn);
3943 prev_bdev = cur_bdev;
3945 prev_block = blkaddr;
3950 f2fs_put_dnode(&dn);
3952 if (fatal_signal_pending(current)) {
3960 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3961 prev_block, len, range.flags);
3963 up_write(&F2FS_I(inode)->i_mmap_sem);
3964 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3966 inode_unlock(inode);
3967 file_end_write(filp);
3972 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3974 struct inode *inode = file_inode(filp);
3975 struct f2fs_comp_option option;
3977 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3980 inode_lock_shared(inode);
3982 if (!f2fs_compressed_file(inode)) {
3983 inode_unlock_shared(inode);
3987 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3988 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3990 inode_unlock_shared(inode);
3992 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3999 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4001 struct inode *inode = file_inode(filp);
4002 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4003 struct f2fs_comp_option option;
4006 if (!f2fs_sb_has_compression(sbi))
4009 if (!(filp->f_mode & FMODE_WRITE))
4012 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4016 if (!f2fs_compressed_file(inode) ||
4017 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4018 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4019 option.algorithm >= COMPRESS_MAX)
4022 file_start_write(filp);
4025 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4030 if (inode->i_size != 0) {
4035 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4036 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4037 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4038 f2fs_mark_inode_dirty_sync(inode, true);
4040 if (!f2fs_is_compress_backend_ready(inode))
4041 f2fs_warn(sbi, "compression algorithm is successfully set, "
4042 "but current kernel doesn't support this algorithm.");
4044 inode_unlock(inode);
4045 file_end_write(filp);
4050 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4052 DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4053 struct address_space *mapping = inode->i_mapping;
4055 pgoff_t redirty_idx = page_idx;
4056 int i, page_len = 0, ret = 0;
4058 page_cache_ra_unbounded(&ractl, len, 0);
4060 for (i = 0; i < len; i++, page_idx++) {
4061 page = read_cache_page(mapping, page_idx, NULL, NULL);
4063 ret = PTR_ERR(page);
4069 for (i = 0; i < page_len; i++, redirty_idx++) {
4070 page = find_lock_page(mapping, redirty_idx);
4075 set_page_dirty(page);
4076 f2fs_put_page(page, 1);
4077 f2fs_put_page(page, 0);
4083 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4085 struct inode *inode = file_inode(filp);
4086 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4087 struct f2fs_inode_info *fi = F2FS_I(inode);
4088 pgoff_t page_idx = 0, last_idx;
4089 unsigned int blk_per_seg = sbi->blocks_per_seg;
4090 int cluster_size = F2FS_I(inode)->i_cluster_size;
4093 if (!f2fs_sb_has_compression(sbi) ||
4094 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4097 if (!(filp->f_mode & FMODE_WRITE))
4100 if (!f2fs_compressed_file(inode))
4103 f2fs_balance_fs(F2FS_I_SB(inode), true);
4105 file_start_write(filp);
4108 if (!f2fs_is_compress_backend_ready(inode)) {
4113 if (f2fs_is_mmap_file(inode)) {
4118 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4122 if (!atomic_read(&fi->i_compr_blocks))
4125 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4127 count = last_idx - page_idx;
4129 int len = min(cluster_size, count);
4131 ret = redirty_blocks(inode, page_idx, len);
4135 if (get_dirty_pages(inode) >= blk_per_seg)
4136 filemap_fdatawrite(inode->i_mapping);
4143 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4147 f2fs_warn(sbi, "%s: The file might be partially decompressed "
4148 "(errno=%d). Please delete the file.\n",
4151 inode_unlock(inode);
4152 file_end_write(filp);
4157 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4159 struct inode *inode = file_inode(filp);
4160 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4161 pgoff_t page_idx = 0, last_idx;
4162 unsigned int blk_per_seg = sbi->blocks_per_seg;
4163 int cluster_size = F2FS_I(inode)->i_cluster_size;
4166 if (!f2fs_sb_has_compression(sbi) ||
4167 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4170 if (!(filp->f_mode & FMODE_WRITE))
4173 if (!f2fs_compressed_file(inode))
4176 f2fs_balance_fs(F2FS_I_SB(inode), true);
4178 file_start_write(filp);
4181 if (!f2fs_is_compress_backend_ready(inode)) {
4186 if (f2fs_is_mmap_file(inode)) {
4191 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4195 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4197 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4199 count = last_idx - page_idx;
4201 int len = min(cluster_size, count);
4203 ret = redirty_blocks(inode, page_idx, len);
4207 if (get_dirty_pages(inode) >= blk_per_seg)
4208 filemap_fdatawrite(inode->i_mapping);
4215 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4218 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4221 f2fs_warn(sbi, "%s: The file might be partially compressed "
4222 "(errno=%d). Please delete the file.\n",
4225 inode_unlock(inode);
4226 file_end_write(filp);
4231 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4234 case FS_IOC_GETFLAGS:
4235 return f2fs_ioc_getflags(filp, arg);
4236 case FS_IOC_SETFLAGS:
4237 return f2fs_ioc_setflags(filp, arg);
4238 case FS_IOC_GETVERSION:
4239 return f2fs_ioc_getversion(filp, arg);
4240 case F2FS_IOC_START_ATOMIC_WRITE:
4241 return f2fs_ioc_start_atomic_write(filp);
4242 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4243 return f2fs_ioc_commit_atomic_write(filp);
4244 case F2FS_IOC_START_VOLATILE_WRITE:
4245 return f2fs_ioc_start_volatile_write(filp);
4246 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4247 return f2fs_ioc_release_volatile_write(filp);
4248 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4249 return f2fs_ioc_abort_volatile_write(filp);
4250 case F2FS_IOC_SHUTDOWN:
4251 return f2fs_ioc_shutdown(filp, arg);
4253 return f2fs_ioc_fitrim(filp, arg);
4254 case FS_IOC_SET_ENCRYPTION_POLICY:
4255 return f2fs_ioc_set_encryption_policy(filp, arg);
4256 case FS_IOC_GET_ENCRYPTION_POLICY:
4257 return f2fs_ioc_get_encryption_policy(filp, arg);
4258 case FS_IOC_GET_ENCRYPTION_PWSALT:
4259 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4260 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4261 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4262 case FS_IOC_ADD_ENCRYPTION_KEY:
4263 return f2fs_ioc_add_encryption_key(filp, arg);
4264 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4265 return f2fs_ioc_remove_encryption_key(filp, arg);
4266 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4267 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4268 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4269 return f2fs_ioc_get_encryption_key_status(filp, arg);
4270 case FS_IOC_GET_ENCRYPTION_NONCE:
4271 return f2fs_ioc_get_encryption_nonce(filp, arg);
4272 case F2FS_IOC_GARBAGE_COLLECT:
4273 return f2fs_ioc_gc(filp, arg);
4274 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4275 return f2fs_ioc_gc_range(filp, arg);
4276 case F2FS_IOC_WRITE_CHECKPOINT:
4277 return f2fs_ioc_write_checkpoint(filp, arg);
4278 case F2FS_IOC_DEFRAGMENT:
4279 return f2fs_ioc_defragment(filp, arg);
4280 case F2FS_IOC_MOVE_RANGE:
4281 return f2fs_ioc_move_range(filp, arg);
4282 case F2FS_IOC_FLUSH_DEVICE:
4283 return f2fs_ioc_flush_device(filp, arg);
4284 case F2FS_IOC_GET_FEATURES:
4285 return f2fs_ioc_get_features(filp, arg);
4286 case FS_IOC_FSGETXATTR:
4287 return f2fs_ioc_fsgetxattr(filp, arg);
4288 case FS_IOC_FSSETXATTR:
4289 return f2fs_ioc_fssetxattr(filp, arg);
4290 case F2FS_IOC_GET_PIN_FILE:
4291 return f2fs_ioc_get_pin_file(filp, arg);
4292 case F2FS_IOC_SET_PIN_FILE:
4293 return f2fs_ioc_set_pin_file(filp, arg);
4294 case F2FS_IOC_PRECACHE_EXTENTS:
4295 return f2fs_ioc_precache_extents(filp, arg);
4296 case F2FS_IOC_RESIZE_FS:
4297 return f2fs_ioc_resize_fs(filp, arg);
4298 case FS_IOC_ENABLE_VERITY:
4299 return f2fs_ioc_enable_verity(filp, arg);
4300 case FS_IOC_MEASURE_VERITY:
4301 return f2fs_ioc_measure_verity(filp, arg);
4302 case FS_IOC_READ_VERITY_METADATA:
4303 return f2fs_ioc_read_verity_metadata(filp, arg);
4304 case FS_IOC_GETFSLABEL:
4305 return f2fs_ioc_getfslabel(filp, arg);
4306 case FS_IOC_SETFSLABEL:
4307 return f2fs_ioc_setfslabel(filp, arg);
4308 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4309 return f2fs_get_compress_blocks(filp, arg);
4310 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4311 return f2fs_release_compress_blocks(filp, arg);
4312 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4313 return f2fs_reserve_compress_blocks(filp, arg);
4314 case F2FS_IOC_SEC_TRIM_FILE:
4315 return f2fs_sec_trim_file(filp, arg);
4316 case F2FS_IOC_GET_COMPRESS_OPTION:
4317 return f2fs_ioc_get_compress_option(filp, arg);
4318 case F2FS_IOC_SET_COMPRESS_OPTION:
4319 return f2fs_ioc_set_compress_option(filp, arg);
4320 case F2FS_IOC_DECOMPRESS_FILE:
4321 return f2fs_ioc_decompress_file(filp, arg);
4322 case F2FS_IOC_COMPRESS_FILE:
4323 return f2fs_ioc_compress_file(filp, arg);
4329 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4331 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4333 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4336 return __f2fs_ioctl(filp, cmd, arg);
4339 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4341 struct file *file = iocb->ki_filp;
4342 struct inode *inode = file_inode(file);
4345 if (!f2fs_is_compress_backend_ready(inode))
4348 ret = generic_file_read_iter(iocb, iter);
4351 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4356 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4358 struct file *file = iocb->ki_filp;
4359 struct inode *inode = file_inode(file);
4362 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4367 if (!f2fs_is_compress_backend_ready(inode)) {
4372 if (iocb->ki_flags & IOCB_NOWAIT) {
4373 if (!inode_trylock(inode)) {
4381 if (unlikely(IS_IMMUTABLE(inode))) {
4386 ret = generic_write_checks(iocb, from);
4388 bool preallocated = false;
4389 size_t target_size = 0;
4392 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4393 set_inode_flag(inode, FI_NO_PREALLOC);
4395 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4396 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4397 iov_iter_count(from)) ||
4398 f2fs_has_inline_data(inode) ||
4399 f2fs_force_buffered_io(inode, iocb, from)) {
4400 clear_inode_flag(inode, FI_NO_PREALLOC);
4401 inode_unlock(inode);
4408 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4411 if (iocb->ki_flags & IOCB_DIRECT) {
4413 * Convert inline data for Direct I/O before entering
4416 err = f2fs_convert_inline_inode(inode);
4420 * If force_buffere_io() is true, we have to allocate
4421 * blocks all the time, since f2fs_direct_IO will fall
4422 * back to buffered IO.
4424 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4425 allow_outplace_dio(inode, iocb, from))
4428 preallocated = true;
4429 target_size = iocb->ki_pos + iov_iter_count(from);
4431 err = f2fs_preallocate_blocks(iocb, from);
4434 clear_inode_flag(inode, FI_NO_PREALLOC);
4435 inode_unlock(inode);
4440 ret = __generic_file_write_iter(iocb, from);
4441 clear_inode_flag(inode, FI_NO_PREALLOC);
4443 /* if we couldn't write data, we should deallocate blocks. */
4444 if (preallocated && i_size_read(inode) < target_size)
4445 f2fs_truncate(inode);
4448 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4451 inode_unlock(inode);
4453 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4454 iov_iter_count(from), ret);
4456 ret = generic_write_sync(iocb, ret);
4460 #ifdef CONFIG_COMPAT
4461 struct compat_f2fs_gc_range {
4466 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4467 struct compat_f2fs_gc_range)
4469 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4471 struct compat_f2fs_gc_range __user *urange;
4472 struct f2fs_gc_range range;
4475 urange = compat_ptr(arg);
4476 err = get_user(range.sync, &urange->sync);
4477 err |= get_user(range.start, &urange->start);
4478 err |= get_user(range.len, &urange->len);
4482 return __f2fs_ioc_gc_range(file, &range);
4485 struct compat_f2fs_move_range {
4491 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4492 struct compat_f2fs_move_range)
4494 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4496 struct compat_f2fs_move_range __user *urange;
4497 struct f2fs_move_range range;
4500 urange = compat_ptr(arg);
4501 err = get_user(range.dst_fd, &urange->dst_fd);
4502 err |= get_user(range.pos_in, &urange->pos_in);
4503 err |= get_user(range.pos_out, &urange->pos_out);
4504 err |= get_user(range.len, &urange->len);
4508 return __f2fs_ioc_move_range(file, &range);
4511 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4513 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4515 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4519 case FS_IOC32_GETFLAGS:
4520 cmd = FS_IOC_GETFLAGS;
4522 case FS_IOC32_SETFLAGS:
4523 cmd = FS_IOC_SETFLAGS;
4525 case FS_IOC32_GETVERSION:
4526 cmd = FS_IOC_GETVERSION;
4528 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4529 return f2fs_compat_ioc_gc_range(file, arg);
4530 case F2FS_IOC32_MOVE_RANGE:
4531 return f2fs_compat_ioc_move_range(file, arg);
4532 case F2FS_IOC_START_ATOMIC_WRITE:
4533 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4534 case F2FS_IOC_START_VOLATILE_WRITE:
4535 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4536 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4537 case F2FS_IOC_SHUTDOWN:
4539 case FS_IOC_SET_ENCRYPTION_POLICY:
4540 case FS_IOC_GET_ENCRYPTION_PWSALT:
4541 case FS_IOC_GET_ENCRYPTION_POLICY:
4542 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4543 case FS_IOC_ADD_ENCRYPTION_KEY:
4544 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4545 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4546 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4547 case FS_IOC_GET_ENCRYPTION_NONCE:
4548 case F2FS_IOC_GARBAGE_COLLECT:
4549 case F2FS_IOC_WRITE_CHECKPOINT:
4550 case F2FS_IOC_DEFRAGMENT:
4551 case F2FS_IOC_FLUSH_DEVICE:
4552 case F2FS_IOC_GET_FEATURES:
4553 case FS_IOC_FSGETXATTR:
4554 case FS_IOC_FSSETXATTR:
4555 case F2FS_IOC_GET_PIN_FILE:
4556 case F2FS_IOC_SET_PIN_FILE:
4557 case F2FS_IOC_PRECACHE_EXTENTS:
4558 case F2FS_IOC_RESIZE_FS:
4559 case FS_IOC_ENABLE_VERITY:
4560 case FS_IOC_MEASURE_VERITY:
4561 case FS_IOC_READ_VERITY_METADATA:
4562 case FS_IOC_GETFSLABEL:
4563 case FS_IOC_SETFSLABEL:
4564 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4565 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4566 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4567 case F2FS_IOC_SEC_TRIM_FILE:
4568 case F2FS_IOC_GET_COMPRESS_OPTION:
4569 case F2FS_IOC_SET_COMPRESS_OPTION:
4570 case F2FS_IOC_DECOMPRESS_FILE:
4571 case F2FS_IOC_COMPRESS_FILE:
4574 return -ENOIOCTLCMD;
4576 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4580 const struct file_operations f2fs_file_operations = {
4581 .llseek = f2fs_llseek,
4582 .read_iter = f2fs_file_read_iter,
4583 .write_iter = f2fs_file_write_iter,
4584 .open = f2fs_file_open,
4585 .release = f2fs_release_file,
4586 .mmap = f2fs_file_mmap,
4587 .flush = f2fs_file_flush,
4588 .fsync = f2fs_sync_file,
4589 .fallocate = f2fs_fallocate,
4590 .unlocked_ioctl = f2fs_ioctl,
4591 #ifdef CONFIG_COMPAT
4592 .compat_ioctl = f2fs_compat_ioctl,
4594 .splice_read = generic_file_splice_read,
4595 .splice_write = iter_file_splice_write,