1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26 #include <linux/fadvise.h>
27 #include <linux/iomap.h>
36 #include <trace/events/f2fs.h>
37 #include <uapi/linux/f2fs.h>
39 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
41 struct inode *inode = file_inode(vmf->vma->vm_file);
44 ret = filemap_fault(vmf);
45 if (ret & VM_FAULT_LOCKED)
46 f2fs_update_iostat(F2FS_I_SB(inode), inode,
47 APP_MAPPED_READ_IO, F2FS_BLKSIZE);
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, vmf->vma->vm_flags, ret);
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
64 if (unlikely(IS_IMMUTABLE(inode)))
65 return VM_FAULT_SIGBUS;
67 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
72 if (unlikely(f2fs_cp_error(sbi))) {
77 if (!f2fs_is_checkpoint_ready(sbi)) {
82 err = f2fs_convert_inline_inode(inode);
86 #ifdef CONFIG_F2FS_FS_COMPRESSION
87 if (f2fs_compressed_file(inode)) {
88 int ret = f2fs_is_compressed_cluster(inode, page->index);
98 /* should do out of any locked page */
100 f2fs_balance_fs(sbi, true);
102 sb_start_pagefault(inode->i_sb);
104 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
106 file_update_time(vmf->vma->vm_file);
107 filemap_invalidate_lock_shared(inode->i_mapping);
109 if (unlikely(page->mapping != inode->i_mapping ||
110 page_offset(page) > i_size_read(inode) ||
111 !PageUptodate(page))) {
118 /* block allocation */
119 set_new_dnode(&dn, inode, NULL, NULL, 0);
120 err = f2fs_get_block_locked(&dn, page->index);
123 #ifdef CONFIG_F2FS_FS_COMPRESSION
125 set_new_dnode(&dn, inode, NULL, NULL, 0);
126 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
135 f2fs_wait_on_page_writeback(page, DATA, false, true);
137 /* wait for GCed page writeback via META_MAPPING */
138 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
141 * check to see if the page is mapped already (no holes)
143 if (PageMappedToDisk(page))
146 /* page is wholly or partially inside EOF */
147 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
148 i_size_read(inode)) {
151 offset = i_size_read(inode) & ~PAGE_MASK;
152 zero_user_segment(page, offset, PAGE_SIZE);
154 set_page_dirty(page);
156 f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
157 f2fs_update_time(sbi, REQ_TIME);
160 filemap_invalidate_unlock_shared(inode->i_mapping);
162 sb_end_pagefault(inode->i_sb);
164 ret = vmf_fs_error(err);
166 trace_f2fs_vm_page_mkwrite(inode, page->index, vmf->vma->vm_flags, ret);
170 static const struct vm_operations_struct f2fs_file_vm_ops = {
171 .fault = f2fs_filemap_fault,
172 .map_pages = filemap_map_pages,
173 .page_mkwrite = f2fs_vm_page_mkwrite,
176 static int get_parent_ino(struct inode *inode, nid_t *pino)
178 struct dentry *dentry;
181 * Make sure to get the non-deleted alias. The alias associated with
182 * the open file descriptor being fsync()'ed may be deleted already.
184 dentry = d_find_alias(inode);
188 *pino = parent_ino(dentry);
193 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
195 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
196 enum cp_reason_type cp_reason = CP_NO_NEEDED;
198 if (!S_ISREG(inode->i_mode))
199 cp_reason = CP_NON_REGULAR;
200 else if (f2fs_compressed_file(inode))
201 cp_reason = CP_COMPRESSED;
202 else if (inode->i_nlink != 1)
203 cp_reason = CP_HARDLINK;
204 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
205 cp_reason = CP_SB_NEED_CP;
206 else if (file_wrong_pino(inode))
207 cp_reason = CP_WRONG_PINO;
208 else if (!f2fs_space_for_roll_forward(sbi))
209 cp_reason = CP_NO_SPC_ROLL;
210 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
211 cp_reason = CP_NODE_NEED_CP;
212 else if (test_opt(sbi, FASTBOOT))
213 cp_reason = CP_FASTBOOT_MODE;
214 else if (F2FS_OPTION(sbi).active_logs == 2)
215 cp_reason = CP_SPEC_LOG_NUM;
216 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
217 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
218 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
220 cp_reason = CP_RECOVER_DIR;
225 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
227 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
229 /* But we need to avoid that there are some inode updates */
230 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
236 static void try_to_fix_pino(struct inode *inode)
238 struct f2fs_inode_info *fi = F2FS_I(inode);
241 f2fs_down_write(&fi->i_sem);
242 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
243 get_parent_ino(inode, &pino)) {
244 f2fs_i_pino_write(inode, pino);
245 file_got_pino(inode);
247 f2fs_up_write(&fi->i_sem);
250 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
251 int datasync, bool atomic)
253 struct inode *inode = file->f_mapping->host;
254 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
255 nid_t ino = inode->i_ino;
257 enum cp_reason_type cp_reason = 0;
258 struct writeback_control wbc = {
259 .sync_mode = WB_SYNC_ALL,
260 .nr_to_write = LONG_MAX,
263 unsigned int seq_id = 0;
265 if (unlikely(f2fs_readonly(inode->i_sb)))
268 trace_f2fs_sync_file_enter(inode);
270 if (S_ISDIR(inode->i_mode))
273 /* if fdatasync is triggered, let's do in-place-update */
274 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
275 set_inode_flag(inode, FI_NEED_IPU);
276 ret = file_write_and_wait_range(file, start, end);
277 clear_inode_flag(inode, FI_NEED_IPU);
279 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
280 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
284 /* if the inode is dirty, let's recover all the time */
285 if (!f2fs_skip_inode_update(inode, datasync)) {
286 f2fs_write_inode(inode, NULL);
291 * if there is no written data, don't waste time to write recovery info.
293 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
294 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
296 /* it may call write_inode just prior to fsync */
297 if (need_inode_page_update(sbi, ino))
300 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
301 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
306 * for OPU case, during fsync(), node can be persisted before
307 * data when lower device doesn't support write barrier, result
308 * in data corruption after SPO.
309 * So for strict fsync mode, force to use atomic write semantics
310 * to keep write order in between data/node and last node to
311 * avoid potential data corruption.
313 if (F2FS_OPTION(sbi).fsync_mode ==
314 FSYNC_MODE_STRICT && !atomic)
319 * Both of fdatasync() and fsync() are able to be recovered from
322 f2fs_down_read(&F2FS_I(inode)->i_sem);
323 cp_reason = need_do_checkpoint(inode);
324 f2fs_up_read(&F2FS_I(inode)->i_sem);
327 /* all the dirty node pages should be flushed for POR */
328 ret = f2fs_sync_fs(inode->i_sb, 1);
331 * We've secured consistency through sync_fs. Following pino
332 * will be used only for fsynced inodes after checkpoint.
334 try_to_fix_pino(inode);
335 clear_inode_flag(inode, FI_APPEND_WRITE);
336 clear_inode_flag(inode, FI_UPDATE_WRITE);
340 atomic_inc(&sbi->wb_sync_req[NODE]);
341 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
342 atomic_dec(&sbi->wb_sync_req[NODE]);
346 /* if cp_error was enabled, we should avoid infinite loop */
347 if (unlikely(f2fs_cp_error(sbi))) {
352 if (f2fs_need_inode_block_update(sbi, ino)) {
353 f2fs_mark_inode_dirty_sync(inode, true);
354 f2fs_write_inode(inode, NULL);
359 * If it's atomic_write, it's just fine to keep write ordering. So
360 * here we don't need to wait for node write completion, since we use
361 * node chain which serializes node blocks. If one of node writes are
362 * reordered, we can see simply broken chain, resulting in stopping
363 * roll-forward recovery. It means we'll recover all or none node blocks
367 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
372 /* once recovery info is written, don't need to tack this */
373 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
374 clear_inode_flag(inode, FI_APPEND_WRITE);
376 if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
377 (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
378 ret = f2fs_issue_flush(sbi, inode->i_ino);
380 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
381 clear_inode_flag(inode, FI_UPDATE_WRITE);
382 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
384 f2fs_update_time(sbi, REQ_TIME);
386 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
390 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
392 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
394 return f2fs_do_sync_file(file, start, end, datasync, false);
397 static bool __found_offset(struct address_space *mapping,
398 struct dnode_of_data *dn, pgoff_t index, int whence)
400 block_t blkaddr = f2fs_data_blkaddr(dn);
401 struct inode *inode = mapping->host;
402 bool compressed_cluster = false;
404 if (f2fs_compressed_file(inode)) {
405 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
406 ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size));
408 compressed_cluster = first_blkaddr == COMPRESS_ADDR;
413 if (__is_valid_data_blkaddr(blkaddr))
415 if (blkaddr == NEW_ADDR &&
416 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
418 if (compressed_cluster)
422 if (compressed_cluster)
424 if (blkaddr == NULL_ADDR)
431 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
433 struct inode *inode = file->f_mapping->host;
434 loff_t maxbytes = inode->i_sb->s_maxbytes;
435 struct dnode_of_data dn;
436 pgoff_t pgofs, end_offset;
437 loff_t data_ofs = offset;
441 inode_lock_shared(inode);
443 isize = i_size_read(inode);
447 /* handle inline data case */
448 if (f2fs_has_inline_data(inode)) {
449 if (whence == SEEK_HOLE) {
452 } else if (whence == SEEK_DATA) {
458 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
460 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
461 set_new_dnode(&dn, inode, NULL, NULL, 0);
462 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
463 if (err && err != -ENOENT) {
465 } else if (err == -ENOENT) {
466 /* direct node does not exists */
467 if (whence == SEEK_DATA) {
468 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
475 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
477 /* find data/hole in dnode block */
478 for (; dn.ofs_in_node < end_offset;
479 dn.ofs_in_node++, pgofs++,
480 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
483 blkaddr = f2fs_data_blkaddr(&dn);
485 if (__is_valid_data_blkaddr(blkaddr) &&
486 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
487 blkaddr, DATA_GENERIC_ENHANCE)) {
492 if (__found_offset(file->f_mapping, &dn,
501 if (whence == SEEK_DATA)
504 if (whence == SEEK_HOLE && data_ofs > isize)
506 inode_unlock_shared(inode);
507 return vfs_setpos(file, data_ofs, maxbytes);
509 inode_unlock_shared(inode);
513 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
515 struct inode *inode = file->f_mapping->host;
516 loff_t maxbytes = inode->i_sb->s_maxbytes;
518 if (f2fs_compressed_file(inode))
519 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
525 return generic_file_llseek_size(file, offset, whence,
526 maxbytes, i_size_read(inode));
531 return f2fs_seek_block(file, offset, whence);
537 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
539 struct inode *inode = file_inode(file);
541 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
544 if (!f2fs_is_compress_backend_ready(inode))
548 vma->vm_ops = &f2fs_file_vm_ops;
550 f2fs_down_read(&F2FS_I(inode)->i_sem);
551 set_inode_flag(inode, FI_MMAP_FILE);
552 f2fs_up_read(&F2FS_I(inode)->i_sem);
557 static int f2fs_file_open(struct inode *inode, struct file *filp)
559 int err = fscrypt_file_open(inode, filp);
564 if (!f2fs_is_compress_backend_ready(inode))
567 err = fsverity_file_open(inode, filp);
571 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
572 filp->f_mode |= FMODE_CAN_ODIRECT;
574 return dquot_file_open(inode, filp);
577 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
579 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
580 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
582 bool compressed_cluster = false;
583 int cluster_index = 0, valid_blocks = 0;
584 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
585 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
587 addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
589 /* Assumption: truncation starts with cluster */
590 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
591 block_t blkaddr = le32_to_cpu(*addr);
593 if (f2fs_compressed_file(dn->inode) &&
594 !(cluster_index & (cluster_size - 1))) {
595 if (compressed_cluster)
596 f2fs_i_compr_blocks_update(dn->inode,
597 valid_blocks, false);
598 compressed_cluster = (blkaddr == COMPRESS_ADDR);
602 if (blkaddr == NULL_ADDR)
605 f2fs_set_data_blkaddr(dn, NULL_ADDR);
607 if (__is_valid_data_blkaddr(blkaddr)) {
608 if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
610 if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
611 DATA_GENERIC_ENHANCE)) {
612 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
615 if (compressed_cluster)
619 f2fs_invalidate_blocks(sbi, blkaddr);
621 if (!released || blkaddr != COMPRESS_ADDR)
625 if (compressed_cluster)
626 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
631 * once we invalidate valid blkaddr in range [ofs, ofs + count],
632 * we will invalidate all blkaddr in the whole range.
634 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
636 f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
637 f2fs_update_age_extent_cache_range(dn, fofs, len);
638 dec_valid_block_count(sbi, dn->inode, nr_free);
640 dn->ofs_in_node = ofs;
642 f2fs_update_time(sbi, REQ_TIME);
643 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
644 dn->ofs_in_node, nr_free);
647 static int truncate_partial_data_page(struct inode *inode, u64 from,
650 loff_t offset = from & (PAGE_SIZE - 1);
651 pgoff_t index = from >> PAGE_SHIFT;
652 struct address_space *mapping = inode->i_mapping;
655 if (!offset && !cache_only)
659 page = find_lock_page(mapping, index);
660 if (page && PageUptodate(page))
662 f2fs_put_page(page, 1);
666 page = f2fs_get_lock_data_page(inode, index, true);
668 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
670 f2fs_wait_on_page_writeback(page, DATA, true, true);
671 zero_user(page, offset, PAGE_SIZE - offset);
673 /* An encrypted inode should have a key and truncate the last page. */
674 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
676 set_page_dirty(page);
677 f2fs_put_page(page, 1);
681 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
683 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
684 struct dnode_of_data dn;
686 int count = 0, err = 0;
688 bool truncate_page = false;
690 trace_f2fs_truncate_blocks_enter(inode, from);
692 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
694 if (free_from >= max_file_blocks(inode))
700 ipage = f2fs_get_node_page(sbi, inode->i_ino);
702 err = PTR_ERR(ipage);
706 if (f2fs_has_inline_data(inode)) {
707 f2fs_truncate_inline_inode(inode, ipage, from);
708 f2fs_put_page(ipage, 1);
709 truncate_page = true;
713 set_new_dnode(&dn, inode, ipage, NULL, 0);
714 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
721 count = ADDRS_PER_PAGE(dn.node_page, inode);
723 count -= dn.ofs_in_node;
724 f2fs_bug_on(sbi, count < 0);
726 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
727 f2fs_truncate_data_blocks_range(&dn, count);
733 err = f2fs_truncate_inode_blocks(inode, free_from);
738 /* lastly zero out the first data page */
740 err = truncate_partial_data_page(inode, from, truncate_page);
742 trace_f2fs_truncate_blocks_exit(inode, err);
746 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
748 u64 free_from = from;
751 #ifdef CONFIG_F2FS_FS_COMPRESSION
753 * for compressed file, only support cluster size
754 * aligned truncation.
756 if (f2fs_compressed_file(inode))
757 free_from = round_up(from,
758 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
761 err = f2fs_do_truncate_blocks(inode, free_from, lock);
765 #ifdef CONFIG_F2FS_FS_COMPRESSION
767 * For compressed file, after release compress blocks, don't allow write
768 * direct, but we should allow write direct after truncate to zero.
770 if (f2fs_compressed_file(inode) && !free_from
771 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
772 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
774 if (from != free_from) {
775 err = f2fs_truncate_partial_cluster(inode, from, lock);
784 int f2fs_truncate(struct inode *inode)
788 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
791 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
792 S_ISLNK(inode->i_mode)))
795 trace_f2fs_truncate(inode);
797 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE))
800 err = f2fs_dquot_initialize(inode);
804 /* we should check inline_data size */
805 if (!f2fs_may_inline_data(inode)) {
806 err = f2fs_convert_inline_inode(inode);
811 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
815 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
816 f2fs_mark_inode_dirty_sync(inode, false);
820 static bool f2fs_force_buffered_io(struct inode *inode, int rw)
822 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
824 if (!fscrypt_dio_supported(inode))
826 if (fsverity_active(inode))
828 if (f2fs_compressed_file(inode))
831 /* disallow direct IO if any of devices has unaligned blksize */
832 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
835 * for blkzoned device, fallback direct IO to buffered IO, so
836 * all IOs can be serialized by log-structured write.
838 if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
840 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
846 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
847 struct kstat *stat, u32 request_mask, unsigned int query_flags)
849 struct inode *inode = d_inode(path->dentry);
850 struct f2fs_inode_info *fi = F2FS_I(inode);
851 struct f2fs_inode *ri = NULL;
854 if (f2fs_has_extra_attr(inode) &&
855 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
856 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
857 stat->result_mask |= STATX_BTIME;
858 stat->btime.tv_sec = fi->i_crtime.tv_sec;
859 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
863 * Return the DIO alignment restrictions if requested. We only return
864 * this information when requested, since on encrypted files it might
865 * take a fair bit of work to get if the file wasn't opened recently.
867 * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN
868 * cannot represent that, so in that case we report no DIO support.
870 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
871 unsigned int bsize = i_blocksize(inode);
873 stat->result_mask |= STATX_DIOALIGN;
874 if (!f2fs_force_buffered_io(inode, WRITE)) {
875 stat->dio_mem_align = bsize;
876 stat->dio_offset_align = bsize;
881 if (flags & F2FS_COMPR_FL)
882 stat->attributes |= STATX_ATTR_COMPRESSED;
883 if (flags & F2FS_APPEND_FL)
884 stat->attributes |= STATX_ATTR_APPEND;
885 if (IS_ENCRYPTED(inode))
886 stat->attributes |= STATX_ATTR_ENCRYPTED;
887 if (flags & F2FS_IMMUTABLE_FL)
888 stat->attributes |= STATX_ATTR_IMMUTABLE;
889 if (flags & F2FS_NODUMP_FL)
890 stat->attributes |= STATX_ATTR_NODUMP;
891 if (IS_VERITY(inode))
892 stat->attributes |= STATX_ATTR_VERITY;
894 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
896 STATX_ATTR_ENCRYPTED |
897 STATX_ATTR_IMMUTABLE |
901 generic_fillattr(idmap, request_mask, inode, stat);
903 /* we need to show initial sectors used for inline_data/dentries */
904 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
905 f2fs_has_inline_dentry(inode))
906 stat->blocks += (stat->size + 511) >> 9;
911 #ifdef CONFIG_F2FS_FS_POSIX_ACL
912 static void __setattr_copy(struct mnt_idmap *idmap,
913 struct inode *inode, const struct iattr *attr)
915 unsigned int ia_valid = attr->ia_valid;
917 i_uid_update(idmap, attr, inode);
918 i_gid_update(idmap, attr, inode);
919 if (ia_valid & ATTR_ATIME)
920 inode_set_atime_to_ts(inode, attr->ia_atime);
921 if (ia_valid & ATTR_MTIME)
922 inode_set_mtime_to_ts(inode, attr->ia_mtime);
923 if (ia_valid & ATTR_CTIME)
924 inode_set_ctime_to_ts(inode, attr->ia_ctime);
925 if (ia_valid & ATTR_MODE) {
926 umode_t mode = attr->ia_mode;
927 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
929 if (!vfsgid_in_group_p(vfsgid) &&
930 !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
932 set_acl_inode(inode, mode);
936 #define __setattr_copy setattr_copy
939 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
942 struct inode *inode = d_inode(dentry);
945 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
948 if (unlikely(IS_IMMUTABLE(inode)))
951 if (unlikely(IS_APPEND(inode) &&
952 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
953 ATTR_GID | ATTR_TIMES_SET))))
956 if ((attr->ia_valid & ATTR_SIZE) &&
957 !f2fs_is_compress_backend_ready(inode))
960 err = setattr_prepare(idmap, dentry, attr);
964 err = fscrypt_prepare_setattr(dentry, attr);
968 err = fsverity_prepare_setattr(dentry, attr);
972 if (is_quota_modification(idmap, inode, attr)) {
973 err = f2fs_dquot_initialize(inode);
977 if (i_uid_needs_update(idmap, attr, inode) ||
978 i_gid_needs_update(idmap, attr, inode)) {
979 f2fs_lock_op(F2FS_I_SB(inode));
980 err = dquot_transfer(idmap, inode, attr);
982 set_sbi_flag(F2FS_I_SB(inode),
983 SBI_QUOTA_NEED_REPAIR);
984 f2fs_unlock_op(F2FS_I_SB(inode));
988 * update uid/gid under lock_op(), so that dquot and inode can
989 * be updated atomically.
991 i_uid_update(idmap, attr, inode);
992 i_gid_update(idmap, attr, inode);
993 f2fs_mark_inode_dirty_sync(inode, true);
994 f2fs_unlock_op(F2FS_I_SB(inode));
997 if (attr->ia_valid & ATTR_SIZE) {
998 loff_t old_size = i_size_read(inode);
1000 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
1002 * should convert inline inode before i_size_write to
1003 * keep smaller than inline_data size with inline flag.
1005 err = f2fs_convert_inline_inode(inode);
1010 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1011 filemap_invalidate_lock(inode->i_mapping);
1013 truncate_setsize(inode, attr->ia_size);
1015 if (attr->ia_size <= old_size)
1016 err = f2fs_truncate(inode);
1018 * do not trim all blocks after i_size if target size is
1019 * larger than i_size.
1021 filemap_invalidate_unlock(inode->i_mapping);
1022 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1026 spin_lock(&F2FS_I(inode)->i_size_lock);
1027 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1028 F2FS_I(inode)->last_disk_size = i_size_read(inode);
1029 spin_unlock(&F2FS_I(inode)->i_size_lock);
1032 __setattr_copy(idmap, inode, attr);
1034 if (attr->ia_valid & ATTR_MODE) {
1035 err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode));
1037 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1039 inode->i_mode = F2FS_I(inode)->i_acl_mode;
1040 clear_inode_flag(inode, FI_ACL_MODE);
1044 /* file size may changed here */
1045 f2fs_mark_inode_dirty_sync(inode, true);
1047 /* inode change will produce dirty node pages flushed by checkpoint */
1048 f2fs_balance_fs(F2FS_I_SB(inode), true);
1053 const struct inode_operations f2fs_file_inode_operations = {
1054 .getattr = f2fs_getattr,
1055 .setattr = f2fs_setattr,
1056 .get_inode_acl = f2fs_get_acl,
1057 .set_acl = f2fs_set_acl,
1058 .listxattr = f2fs_listxattr,
1059 .fiemap = f2fs_fiemap,
1060 .fileattr_get = f2fs_fileattr_get,
1061 .fileattr_set = f2fs_fileattr_set,
1064 static int fill_zero(struct inode *inode, pgoff_t index,
1065 loff_t start, loff_t len)
1067 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1073 f2fs_balance_fs(sbi, true);
1076 page = f2fs_get_new_data_page(inode, NULL, index, false);
1077 f2fs_unlock_op(sbi);
1080 return PTR_ERR(page);
1082 f2fs_wait_on_page_writeback(page, DATA, true, true);
1083 zero_user(page, start, len);
1084 set_page_dirty(page);
1085 f2fs_put_page(page, 1);
1089 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1093 while (pg_start < pg_end) {
1094 struct dnode_of_data dn;
1095 pgoff_t end_offset, count;
1097 set_new_dnode(&dn, inode, NULL, NULL, 0);
1098 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1100 if (err == -ENOENT) {
1101 pg_start = f2fs_get_next_page_offset(&dn,
1108 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1109 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1111 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1113 f2fs_truncate_data_blocks_range(&dn, count);
1114 f2fs_put_dnode(&dn);
1121 static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1123 pgoff_t pg_start, pg_end;
1124 loff_t off_start, off_end;
1127 ret = f2fs_convert_inline_inode(inode);
1131 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1132 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1134 off_start = offset & (PAGE_SIZE - 1);
1135 off_end = (offset + len) & (PAGE_SIZE - 1);
1137 if (pg_start == pg_end) {
1138 ret = fill_zero(inode, pg_start, off_start,
1139 off_end - off_start);
1144 ret = fill_zero(inode, pg_start++, off_start,
1145 PAGE_SIZE - off_start);
1150 ret = fill_zero(inode, pg_end, 0, off_end);
1155 if (pg_start < pg_end) {
1156 loff_t blk_start, blk_end;
1157 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1159 f2fs_balance_fs(sbi, true);
1161 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1162 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1164 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1165 filemap_invalidate_lock(inode->i_mapping);
1167 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1170 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1171 f2fs_unlock_op(sbi);
1173 filemap_invalidate_unlock(inode->i_mapping);
1174 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1181 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1182 int *do_replace, pgoff_t off, pgoff_t len)
1184 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1185 struct dnode_of_data dn;
1189 set_new_dnode(&dn, inode, NULL, NULL, 0);
1190 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1191 if (ret && ret != -ENOENT) {
1193 } else if (ret == -ENOENT) {
1194 if (dn.max_level == 0)
1196 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1197 dn.ofs_in_node, len);
1203 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1204 dn.ofs_in_node, len);
1205 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1206 *blkaddr = f2fs_data_blkaddr(&dn);
1208 if (__is_valid_data_blkaddr(*blkaddr) &&
1209 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1210 DATA_GENERIC_ENHANCE)) {
1211 f2fs_put_dnode(&dn);
1212 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1213 return -EFSCORRUPTED;
1216 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1218 if (f2fs_lfs_mode(sbi)) {
1219 f2fs_put_dnode(&dn);
1223 /* do not invalidate this block address */
1224 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1228 f2fs_put_dnode(&dn);
1237 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1238 int *do_replace, pgoff_t off, int len)
1240 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1241 struct dnode_of_data dn;
1244 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1245 if (*do_replace == 0)
1248 set_new_dnode(&dn, inode, NULL, NULL, 0);
1249 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1251 dec_valid_block_count(sbi, inode, 1);
1252 f2fs_invalidate_blocks(sbi, *blkaddr);
1254 f2fs_update_data_blkaddr(&dn, *blkaddr);
1256 f2fs_put_dnode(&dn);
1261 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1262 block_t *blkaddr, int *do_replace,
1263 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1265 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1270 if (blkaddr[i] == NULL_ADDR && !full) {
1275 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1276 struct dnode_of_data dn;
1277 struct node_info ni;
1281 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1282 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1286 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1288 f2fs_put_dnode(&dn);
1292 ilen = min((pgoff_t)
1293 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1294 dn.ofs_in_node, len - i);
1296 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1297 f2fs_truncate_data_blocks_range(&dn, 1);
1299 if (do_replace[i]) {
1300 f2fs_i_blocks_write(src_inode,
1302 f2fs_i_blocks_write(dst_inode,
1304 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1305 blkaddr[i], ni.version, true, false);
1311 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1312 if (dst_inode->i_size < new_size)
1313 f2fs_i_size_write(dst_inode, new_size);
1314 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1316 f2fs_put_dnode(&dn);
1318 struct page *psrc, *pdst;
1320 psrc = f2fs_get_lock_data_page(src_inode,
1323 return PTR_ERR(psrc);
1324 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1327 f2fs_put_page(psrc, 1);
1328 return PTR_ERR(pdst);
1330 memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
1331 set_page_dirty(pdst);
1332 set_page_private_gcing(pdst);
1333 f2fs_put_page(pdst, 1);
1334 f2fs_put_page(psrc, 1);
1336 ret = f2fs_truncate_hole(src_inode,
1337 src + i, src + i + 1);
1346 static int __exchange_data_block(struct inode *src_inode,
1347 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1348 pgoff_t len, bool full)
1350 block_t *src_blkaddr;
1356 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1358 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1359 array_size(olen, sizeof(block_t)),
1364 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1365 array_size(olen, sizeof(int)),
1368 kvfree(src_blkaddr);
1372 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1373 do_replace, src, olen);
1377 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1378 do_replace, src, dst, olen, full);
1386 kvfree(src_blkaddr);
1392 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1393 kvfree(src_blkaddr);
1398 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1400 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1401 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1402 pgoff_t start = offset >> PAGE_SHIFT;
1403 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1406 f2fs_balance_fs(sbi, true);
1408 /* avoid gc operation during block exchange */
1409 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1410 filemap_invalidate_lock(inode->i_mapping);
1413 f2fs_drop_extent_tree(inode);
1414 truncate_pagecache(inode, offset);
1415 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1416 f2fs_unlock_op(sbi);
1418 filemap_invalidate_unlock(inode->i_mapping);
1419 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1423 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1428 if (offset + len >= i_size_read(inode))
1431 /* collapse range should be aligned to block size of f2fs. */
1432 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1435 ret = f2fs_convert_inline_inode(inode);
1439 /* write out all dirty pages from offset */
1440 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1444 ret = f2fs_do_collapse(inode, offset, len);
1448 /* write out all moved pages, if possible */
1449 filemap_invalidate_lock(inode->i_mapping);
1450 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1451 truncate_pagecache(inode, offset);
1453 new_size = i_size_read(inode) - len;
1454 ret = f2fs_truncate_blocks(inode, new_size, true);
1455 filemap_invalidate_unlock(inode->i_mapping);
1457 f2fs_i_size_write(inode, new_size);
1461 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1464 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1465 pgoff_t index = start;
1466 unsigned int ofs_in_node = dn->ofs_in_node;
1470 for (; index < end; index++, dn->ofs_in_node++) {
1471 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1475 dn->ofs_in_node = ofs_in_node;
1476 ret = f2fs_reserve_new_blocks(dn, count);
1480 dn->ofs_in_node = ofs_in_node;
1481 for (index = start; index < end; index++, dn->ofs_in_node++) {
1482 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1484 * f2fs_reserve_new_blocks will not guarantee entire block
1487 if (dn->data_blkaddr == NULL_ADDR) {
1492 if (dn->data_blkaddr == NEW_ADDR)
1495 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1496 DATA_GENERIC_ENHANCE)) {
1497 ret = -EFSCORRUPTED;
1498 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1502 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1503 f2fs_set_data_blkaddr(dn, NEW_ADDR);
1506 f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1507 f2fs_update_age_extent_cache_range(dn, start, index - start);
1512 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1515 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1516 struct address_space *mapping = inode->i_mapping;
1517 pgoff_t index, pg_start, pg_end;
1518 loff_t new_size = i_size_read(inode);
1519 loff_t off_start, off_end;
1522 ret = inode_newsize_ok(inode, (len + offset));
1526 ret = f2fs_convert_inline_inode(inode);
1530 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1534 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1535 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1537 off_start = offset & (PAGE_SIZE - 1);
1538 off_end = (offset + len) & (PAGE_SIZE - 1);
1540 if (pg_start == pg_end) {
1541 ret = fill_zero(inode, pg_start, off_start,
1542 off_end - off_start);
1546 new_size = max_t(loff_t, new_size, offset + len);
1549 ret = fill_zero(inode, pg_start++, off_start,
1550 PAGE_SIZE - off_start);
1554 new_size = max_t(loff_t, new_size,
1555 (loff_t)pg_start << PAGE_SHIFT);
1558 for (index = pg_start; index < pg_end;) {
1559 struct dnode_of_data dn;
1560 unsigned int end_offset;
1563 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1564 filemap_invalidate_lock(mapping);
1566 truncate_pagecache_range(inode,
1567 (loff_t)index << PAGE_SHIFT,
1568 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1572 set_new_dnode(&dn, inode, NULL, NULL, 0);
1573 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1575 f2fs_unlock_op(sbi);
1576 filemap_invalidate_unlock(mapping);
1577 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1581 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1582 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1584 ret = f2fs_do_zero_range(&dn, index, end);
1585 f2fs_put_dnode(&dn);
1587 f2fs_unlock_op(sbi);
1588 filemap_invalidate_unlock(mapping);
1589 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1591 f2fs_balance_fs(sbi, dn.node_changed);
1597 new_size = max_t(loff_t, new_size,
1598 (loff_t)index << PAGE_SHIFT);
1602 ret = fill_zero(inode, pg_end, 0, off_end);
1606 new_size = max_t(loff_t, new_size, offset + len);
1611 if (new_size > i_size_read(inode)) {
1612 if (mode & FALLOC_FL_KEEP_SIZE)
1613 file_set_keep_isize(inode);
1615 f2fs_i_size_write(inode, new_size);
1620 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1622 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1623 struct address_space *mapping = inode->i_mapping;
1624 pgoff_t nr, pg_start, pg_end, delta, idx;
1628 new_size = i_size_read(inode) + len;
1629 ret = inode_newsize_ok(inode, new_size);
1633 if (offset >= i_size_read(inode))
1636 /* insert range should be aligned to block size of f2fs. */
1637 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1640 ret = f2fs_convert_inline_inode(inode);
1644 f2fs_balance_fs(sbi, true);
1646 filemap_invalidate_lock(mapping);
1647 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1648 filemap_invalidate_unlock(mapping);
1652 /* write out all dirty pages from offset */
1653 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1657 pg_start = offset >> PAGE_SHIFT;
1658 pg_end = (offset + len) >> PAGE_SHIFT;
1659 delta = pg_end - pg_start;
1660 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1662 /* avoid gc operation during block exchange */
1663 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1664 filemap_invalidate_lock(mapping);
1665 truncate_pagecache(inode, offset);
1667 while (!ret && idx > pg_start) {
1668 nr = idx - pg_start;
1674 f2fs_drop_extent_tree(inode);
1676 ret = __exchange_data_block(inode, inode, idx,
1677 idx + delta, nr, false);
1678 f2fs_unlock_op(sbi);
1680 filemap_invalidate_unlock(mapping);
1681 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1685 /* write out all moved pages, if possible */
1686 filemap_invalidate_lock(mapping);
1687 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1688 truncate_pagecache(inode, offset);
1689 filemap_invalidate_unlock(mapping);
1692 f2fs_i_size_write(inode, new_size);
1696 static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
1697 loff_t len, int mode)
1699 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1700 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1701 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1702 .m_may_create = true };
1703 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1704 .init_gc_type = FG_GC,
1705 .should_migrate_blocks = false,
1706 .err_gc_skipped = true,
1707 .nr_free_secs = 0 };
1708 pgoff_t pg_start, pg_end;
1711 block_t expanded = 0;
1714 err = inode_newsize_ok(inode, (len + offset));
1718 err = f2fs_convert_inline_inode(inode);
1722 f2fs_balance_fs(sbi, true);
1724 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1725 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1726 off_end = (offset + len) & (PAGE_SIZE - 1);
1728 map.m_lblk = pg_start;
1729 map.m_len = pg_end - pg_start;
1736 if (f2fs_is_pinned_file(inode)) {
1737 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
1738 block_t sec_len = roundup(map.m_len, sec_blks);
1740 map.m_len = sec_blks;
1742 if (has_not_enough_free_secs(sbi, 0,
1743 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1744 f2fs_down_write(&sbi->gc_lock);
1745 stat_inc_gc_call_count(sbi, FOREGROUND);
1746 err = f2fs_gc(sbi, &gc_control);
1747 if (err && err != -ENODATA)
1751 f2fs_down_write(&sbi->pin_sem);
1753 err = f2fs_allocate_pinning_section(sbi);
1755 f2fs_up_write(&sbi->pin_sem);
1759 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1760 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
1761 file_dont_truncate(inode);
1763 f2fs_up_write(&sbi->pin_sem);
1765 expanded += map.m_len;
1766 sec_len -= map.m_len;
1767 map.m_lblk += map.m_len;
1768 if (!err && sec_len)
1771 map.m_len = expanded;
1773 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
1774 expanded = map.m_len;
1783 last_off = pg_start + expanded - 1;
1785 /* update new size to the failed position */
1786 new_size = (last_off == pg_end) ? offset + len :
1787 (loff_t)(last_off + 1) << PAGE_SHIFT;
1789 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1792 if (new_size > i_size_read(inode)) {
1793 if (mode & FALLOC_FL_KEEP_SIZE)
1794 file_set_keep_isize(inode);
1796 f2fs_i_size_write(inode, new_size);
1802 static long f2fs_fallocate(struct file *file, int mode,
1803 loff_t offset, loff_t len)
1805 struct inode *inode = file_inode(file);
1808 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1810 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1812 if (!f2fs_is_compress_backend_ready(inode))
1815 /* f2fs only support ->fallocate for regular file */
1816 if (!S_ISREG(inode->i_mode))
1819 if (IS_ENCRYPTED(inode) &&
1820 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1824 * Pinned file should not support partial truncation since the block
1825 * can be used by applications.
1827 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1828 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1829 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1832 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1833 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1834 FALLOC_FL_INSERT_RANGE))
1839 ret = file_modified(file);
1843 if (mode & FALLOC_FL_PUNCH_HOLE) {
1844 if (offset >= inode->i_size)
1847 ret = f2fs_punch_hole(inode, offset, len);
1848 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1849 ret = f2fs_collapse_range(inode, offset, len);
1850 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1851 ret = f2fs_zero_range(inode, offset, len, mode);
1852 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1853 ret = f2fs_insert_range(inode, offset, len);
1855 ret = f2fs_expand_inode_data(inode, offset, len, mode);
1859 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1860 f2fs_mark_inode_dirty_sync(inode, false);
1861 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1865 inode_unlock(inode);
1867 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1871 static int f2fs_release_file(struct inode *inode, struct file *filp)
1874 * f2fs_release_file is called at every close calls. So we should
1875 * not drop any inmemory pages by close called by other process.
1877 if (!(filp->f_mode & FMODE_WRITE) ||
1878 atomic_read(&inode->i_writecount) != 1)
1882 f2fs_abort_atomic_write(inode, true);
1883 inode_unlock(inode);
1888 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1890 struct inode *inode = file_inode(file);
1893 * If the process doing a transaction is crashed, we should do
1894 * roll-back. Otherwise, other reader/write can see corrupted database
1895 * until all the writers close its file. Since this should be done
1896 * before dropping file lock, it needs to do in ->flush.
1898 if (F2FS_I(inode)->atomic_write_task == current &&
1899 (current->flags & PF_EXITING)) {
1901 f2fs_abort_atomic_write(inode, true);
1902 inode_unlock(inode);
1908 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1910 struct f2fs_inode_info *fi = F2FS_I(inode);
1911 u32 masked_flags = fi->i_flags & mask;
1913 /* mask can be shrunk by flags_valid selector */
1916 /* Is it quota file? Do not allow user to mess with it */
1917 if (IS_NOQUOTA(inode))
1920 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1921 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1923 if (!f2fs_empty_dir(inode))
1927 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1928 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1930 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1934 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1935 if (masked_flags & F2FS_COMPR_FL) {
1936 if (!f2fs_disable_compressed_file(inode))
1939 /* try to convert inline_data to support compression */
1940 int err = f2fs_convert_inline_inode(inode);
1944 f2fs_down_write(&F2FS_I(inode)->i_sem);
1945 if (!f2fs_may_compress(inode) ||
1946 (S_ISREG(inode->i_mode) &&
1947 F2FS_HAS_BLOCKS(inode))) {
1948 f2fs_up_write(&F2FS_I(inode)->i_sem);
1951 err = set_compress_context(inode);
1952 f2fs_up_write(&F2FS_I(inode)->i_sem);
1959 fi->i_flags = iflags | (fi->i_flags & ~mask);
1960 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1961 (fi->i_flags & F2FS_NOCOMP_FL));
1963 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1964 set_inode_flag(inode, FI_PROJ_INHERIT);
1966 clear_inode_flag(inode, FI_PROJ_INHERIT);
1968 inode_set_ctime_current(inode);
1969 f2fs_set_inode_flags(inode);
1970 f2fs_mark_inode_dirty_sync(inode, true);
1974 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1977 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1978 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1979 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1980 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1982 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1983 * FS_IOC_FSSETXATTR is done by the VFS.
1986 static const struct {
1989 } f2fs_fsflags_map[] = {
1990 { F2FS_COMPR_FL, FS_COMPR_FL },
1991 { F2FS_SYNC_FL, FS_SYNC_FL },
1992 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1993 { F2FS_APPEND_FL, FS_APPEND_FL },
1994 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1995 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1996 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1997 { F2FS_INDEX_FL, FS_INDEX_FL },
1998 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1999 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
2000 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
2003 #define F2FS_GETTABLE_FS_FL ( \
2013 FS_PROJINHERIT_FL | \
2015 FS_INLINE_DATA_FL | \
2020 #define F2FS_SETTABLE_FS_FL ( \
2029 FS_PROJINHERIT_FL | \
2032 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
2033 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
2038 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2039 if (iflags & f2fs_fsflags_map[i].iflag)
2040 fsflags |= f2fs_fsflags_map[i].fsflag;
2045 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
2046 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
2051 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2052 if (fsflags & f2fs_fsflags_map[i].fsflag)
2053 iflags |= f2fs_fsflags_map[i].iflag;
2058 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2060 struct inode *inode = file_inode(filp);
2062 return put_user(inode->i_generation, (int __user *)arg);
2065 static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
2067 struct inode *inode = file_inode(filp);
2068 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2069 struct f2fs_inode_info *fi = F2FS_I(inode);
2070 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2071 struct inode *pinode;
2075 if (!inode_owner_or_capable(idmap, inode))
2078 if (!S_ISREG(inode->i_mode))
2081 if (filp->f_flags & O_DIRECT)
2084 ret = mnt_want_write_file(filp);
2090 if (!f2fs_disable_compressed_file(inode)) {
2095 if (f2fs_is_atomic_file(inode))
2098 ret = f2fs_convert_inline_inode(inode);
2102 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
2105 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2106 * f2fs_is_atomic_file.
2108 if (get_dirty_pages(inode))
2109 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2110 inode->i_ino, get_dirty_pages(inode));
2111 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2113 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2117 /* Check if the inode already has a COW inode */
2118 if (fi->cow_inode == NULL) {
2119 /* Create a COW inode for atomic write */
2120 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2121 if (IS_ERR(pinode)) {
2122 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2123 ret = PTR_ERR(pinode);
2127 ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
2130 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2134 set_inode_flag(fi->cow_inode, FI_COW_FILE);
2135 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
2137 /* Reuse the already created COW inode */
2138 ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
2140 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2145 f2fs_write_inode(inode, NULL);
2147 stat_inc_atomic_inode(inode);
2149 set_inode_flag(inode, FI_ATOMIC_FILE);
2151 isize = i_size_read(inode);
2152 fi->original_i_size = isize;
2154 set_inode_flag(inode, FI_ATOMIC_REPLACE);
2155 truncate_inode_pages_final(inode->i_mapping);
2156 f2fs_i_size_write(inode, 0);
2159 f2fs_i_size_write(fi->cow_inode, isize);
2161 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2163 f2fs_update_time(sbi, REQ_TIME);
2164 fi->atomic_write_task = current;
2165 stat_update_max_atomic_write(inode);
2166 fi->atomic_write_cnt = 0;
2168 inode_unlock(inode);
2169 mnt_drop_write_file(filp);
2173 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2175 struct inode *inode = file_inode(filp);
2176 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2179 if (!inode_owner_or_capable(idmap, inode))
2182 ret = mnt_want_write_file(filp);
2186 f2fs_balance_fs(F2FS_I_SB(inode), true);
2190 if (f2fs_is_atomic_file(inode)) {
2191 ret = f2fs_commit_atomic_write(inode);
2193 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2195 f2fs_abort_atomic_write(inode, ret);
2197 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2200 inode_unlock(inode);
2201 mnt_drop_write_file(filp);
2205 static int f2fs_ioc_abort_atomic_write(struct file *filp)
2207 struct inode *inode = file_inode(filp);
2208 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2211 if (!inode_owner_or_capable(idmap, inode))
2214 ret = mnt_want_write_file(filp);
2220 f2fs_abort_atomic_write(inode, true);
2222 inode_unlock(inode);
2224 mnt_drop_write_file(filp);
2225 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2229 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2231 struct inode *inode = file_inode(filp);
2232 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2233 struct super_block *sb = sbi->sb;
2237 if (!capable(CAP_SYS_ADMIN))
2240 if (get_user(in, (__u32 __user *)arg))
2243 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2244 ret = mnt_want_write_file(filp);
2246 if (ret == -EROFS) {
2248 f2fs_stop_checkpoint(sbi, false,
2249 STOP_CP_REASON_SHUTDOWN);
2250 trace_f2fs_shutdown(sbi, in, ret);
2257 case F2FS_GOING_DOWN_FULLSYNC:
2258 ret = bdev_freeze(sb->s_bdev);
2261 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2262 bdev_thaw(sb->s_bdev);
2264 case F2FS_GOING_DOWN_METASYNC:
2265 /* do checkpoint only */
2266 ret = f2fs_sync_fs(sb, 1);
2272 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2274 case F2FS_GOING_DOWN_NOSYNC:
2275 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2277 case F2FS_GOING_DOWN_METAFLUSH:
2278 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2279 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2281 case F2FS_GOING_DOWN_NEED_FSCK:
2282 set_sbi_flag(sbi, SBI_NEED_FSCK);
2283 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2284 set_sbi_flag(sbi, SBI_IS_DIRTY);
2285 /* do checkpoint only */
2286 ret = f2fs_sync_fs(sb, 1);
2295 f2fs_stop_gc_thread(sbi);
2296 f2fs_stop_discard_thread(sbi);
2298 f2fs_drop_discard_cmd(sbi);
2299 clear_opt(sbi, DISCARD);
2301 f2fs_update_time(sbi, REQ_TIME);
2303 if (in != F2FS_GOING_DOWN_FULLSYNC)
2304 mnt_drop_write_file(filp);
2306 trace_f2fs_shutdown(sbi, in, ret);
2311 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2313 struct inode *inode = file_inode(filp);
2314 struct super_block *sb = inode->i_sb;
2315 struct fstrim_range range;
2318 if (!capable(CAP_SYS_ADMIN))
2321 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2324 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2328 ret = mnt_want_write_file(filp);
2332 range.minlen = max((unsigned int)range.minlen,
2333 bdev_discard_granularity(sb->s_bdev));
2334 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2335 mnt_drop_write_file(filp);
2339 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2342 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2346 static bool uuid_is_nonzero(__u8 u[16])
2350 for (i = 0; i < 16; i++)
2356 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2358 struct inode *inode = file_inode(filp);
2360 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2363 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2365 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2368 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2370 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2372 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2375 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2377 struct inode *inode = file_inode(filp);
2378 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2379 u8 encrypt_pw_salt[16];
2382 if (!f2fs_sb_has_encrypt(sbi))
2385 err = mnt_want_write_file(filp);
2389 f2fs_down_write(&sbi->sb_lock);
2391 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2394 /* update superblock with uuid */
2395 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2397 err = f2fs_commit_super(sbi, false);
2400 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2404 memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
2406 f2fs_up_write(&sbi->sb_lock);
2407 mnt_drop_write_file(filp);
2409 if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
2415 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2418 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2421 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2424 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2426 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2429 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2432 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2434 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2437 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2440 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2443 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2446 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2449 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2452 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2455 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2458 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2460 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2463 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2466 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2468 struct inode *inode = file_inode(filp);
2469 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2470 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2472 .should_migrate_blocks = false,
2473 .nr_free_secs = 0 };
2477 if (!capable(CAP_SYS_ADMIN))
2480 if (get_user(sync, (__u32 __user *)arg))
2483 if (f2fs_readonly(sbi->sb))
2486 ret = mnt_want_write_file(filp);
2491 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2496 f2fs_down_write(&sbi->gc_lock);
2499 gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2500 gc_control.err_gc_skipped = sync;
2501 stat_inc_gc_call_count(sbi, FOREGROUND);
2502 ret = f2fs_gc(sbi, &gc_control);
2504 mnt_drop_write_file(filp);
2508 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2510 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2511 struct f2fs_gc_control gc_control = {
2512 .init_gc_type = range->sync ? FG_GC : BG_GC,
2514 .should_migrate_blocks = false,
2515 .err_gc_skipped = range->sync,
2516 .nr_free_secs = 0 };
2520 if (!capable(CAP_SYS_ADMIN))
2522 if (f2fs_readonly(sbi->sb))
2525 end = range->start + range->len;
2526 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2527 end >= MAX_BLKADDR(sbi))
2530 ret = mnt_want_write_file(filp);
2536 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2541 f2fs_down_write(&sbi->gc_lock);
2544 gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2545 stat_inc_gc_call_count(sbi, FOREGROUND);
2546 ret = f2fs_gc(sbi, &gc_control);
2552 range->start += CAP_BLKS_PER_SEC(sbi);
2553 if (range->start <= end)
2556 mnt_drop_write_file(filp);
2560 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2562 struct f2fs_gc_range range;
2564 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2567 return __f2fs_ioc_gc_range(filp, &range);
2570 static int f2fs_ioc_write_checkpoint(struct file *filp)
2572 struct inode *inode = file_inode(filp);
2573 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2576 if (!capable(CAP_SYS_ADMIN))
2579 if (f2fs_readonly(sbi->sb))
2582 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2583 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2587 ret = mnt_want_write_file(filp);
2591 ret = f2fs_sync_fs(sbi->sb, 1);
2593 mnt_drop_write_file(filp);
2597 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2599 struct f2fs_defragment *range)
2601 struct inode *inode = file_inode(filp);
2602 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2603 .m_seg_type = NO_CHECK_TYPE,
2604 .m_may_create = false };
2605 struct extent_info ei = {};
2606 pgoff_t pg_start, pg_end, next_pgofs;
2607 unsigned int total = 0, sec_num;
2608 block_t blk_end = 0;
2609 bool fragmented = false;
2612 pg_start = range->start >> PAGE_SHIFT;
2613 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2615 f2fs_balance_fs(sbi, true);
2619 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
2624 /* if in-place-update policy is enabled, don't waste time here */
2625 set_inode_flag(inode, FI_OPU_WRITE);
2626 if (f2fs_should_update_inplace(inode, NULL)) {
2631 /* writeback all dirty pages in the range */
2632 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2633 range->start + range->len - 1);
2638 * lookup mapping info in extent cache, skip defragmenting if physical
2639 * block addresses are continuous.
2641 if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2642 if (ei.fofs + ei.len >= pg_end)
2646 map.m_lblk = pg_start;
2647 map.m_next_pgofs = &next_pgofs;
2650 * lookup mapping info in dnode page cache, skip defragmenting if all
2651 * physical block addresses are continuous even if there are hole(s)
2652 * in logical blocks.
2654 while (map.m_lblk < pg_end) {
2655 map.m_len = pg_end - map.m_lblk;
2656 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2660 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2661 map.m_lblk = next_pgofs;
2665 if (blk_end && blk_end != map.m_pblk)
2668 /* record total count of block that we're going to move */
2671 blk_end = map.m_pblk + map.m_len;
2673 map.m_lblk += map.m_len;
2681 sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
2684 * make sure there are enough free section for LFS allocation, this can
2685 * avoid defragment running in SSR mode when free section are allocated
2688 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2693 map.m_lblk = pg_start;
2694 map.m_len = pg_end - pg_start;
2697 while (map.m_lblk < pg_end) {
2702 map.m_len = pg_end - map.m_lblk;
2703 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2707 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2708 map.m_lblk = next_pgofs;
2712 set_inode_flag(inode, FI_SKIP_WRITES);
2715 while (idx < map.m_lblk + map.m_len &&
2716 cnt < BLKS_PER_SEG(sbi)) {
2719 page = f2fs_get_lock_data_page(inode, idx, true);
2721 err = PTR_ERR(page);
2725 set_page_dirty(page);
2726 set_page_private_gcing(page);
2727 f2fs_put_page(page, 1);
2736 if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi))
2739 clear_inode_flag(inode, FI_SKIP_WRITES);
2741 err = filemap_fdatawrite(inode->i_mapping);
2746 clear_inode_flag(inode, FI_SKIP_WRITES);
2748 clear_inode_flag(inode, FI_OPU_WRITE);
2750 inode_unlock(inode);
2752 range->len = (u64)total << PAGE_SHIFT;
2756 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2758 struct inode *inode = file_inode(filp);
2759 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2760 struct f2fs_defragment range;
2763 if (!capable(CAP_SYS_ADMIN))
2766 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2769 if (f2fs_readonly(sbi->sb))
2772 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2776 /* verify alignment of offset & size */
2777 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2780 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2781 max_file_blocks(inode)))
2784 err = mnt_want_write_file(filp);
2788 err = f2fs_defragment_range(sbi, filp, &range);
2789 mnt_drop_write_file(filp);
2791 f2fs_update_time(sbi, REQ_TIME);
2795 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2802 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2803 struct file *file_out, loff_t pos_out, size_t len)
2805 struct inode *src = file_inode(file_in);
2806 struct inode *dst = file_inode(file_out);
2807 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2808 size_t olen = len, dst_max_i_size = 0;
2812 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2813 src->i_sb != dst->i_sb)
2816 if (unlikely(f2fs_readonly(src->i_sb)))
2819 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2822 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2825 if (pos_out < 0 || pos_in < 0)
2829 if (pos_in == pos_out)
2831 if (pos_out > pos_in && pos_out < pos_in + len)
2838 if (!inode_trylock(dst))
2842 if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
2848 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2851 olen = len = src->i_size - pos_in;
2852 if (pos_in + len == src->i_size)
2853 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2859 dst_osize = dst->i_size;
2860 if (pos_out + olen > dst->i_size)
2861 dst_max_i_size = pos_out + olen;
2863 /* verify the end result is block aligned */
2864 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2865 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2866 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2869 ret = f2fs_convert_inline_inode(src);
2873 ret = f2fs_convert_inline_inode(dst);
2877 /* write out all dirty pages from offset */
2878 ret = filemap_write_and_wait_range(src->i_mapping,
2879 pos_in, pos_in + len);
2883 ret = filemap_write_and_wait_range(dst->i_mapping,
2884 pos_out, pos_out + len);
2888 f2fs_balance_fs(sbi, true);
2890 f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2893 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2898 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2899 pos_out >> F2FS_BLKSIZE_BITS,
2900 len >> F2FS_BLKSIZE_BITS, false);
2904 f2fs_i_size_write(dst, dst_max_i_size);
2905 else if (dst_osize != dst->i_size)
2906 f2fs_i_size_write(dst, dst_osize);
2908 f2fs_unlock_op(sbi);
2911 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2913 f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2917 inode_set_mtime_to_ts(src, inode_set_ctime_current(src));
2918 f2fs_mark_inode_dirty_sync(src, false);
2920 inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst));
2921 f2fs_mark_inode_dirty_sync(dst, false);
2923 f2fs_update_time(sbi, REQ_TIME);
2933 static int __f2fs_ioc_move_range(struct file *filp,
2934 struct f2fs_move_range *range)
2939 if (!(filp->f_mode & FMODE_READ) ||
2940 !(filp->f_mode & FMODE_WRITE))
2943 dst = fdget(range->dst_fd);
2947 if (!(dst.file->f_mode & FMODE_WRITE)) {
2952 err = mnt_want_write_file(filp);
2956 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2957 range->pos_out, range->len);
2959 mnt_drop_write_file(filp);
2965 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2967 struct f2fs_move_range range;
2969 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2972 return __f2fs_ioc_move_range(filp, &range);
2975 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2977 struct inode *inode = file_inode(filp);
2978 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2979 struct sit_info *sm = SIT_I(sbi);
2980 unsigned int start_segno = 0, end_segno = 0;
2981 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2982 struct f2fs_flush_device range;
2983 struct f2fs_gc_control gc_control = {
2984 .init_gc_type = FG_GC,
2985 .should_migrate_blocks = true,
2986 .err_gc_skipped = true,
2987 .nr_free_secs = 0 };
2990 if (!capable(CAP_SYS_ADMIN))
2993 if (f2fs_readonly(sbi->sb))
2996 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2999 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
3003 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
3004 __is_large_section(sbi)) {
3005 f2fs_warn(sbi, "Can't flush %u in %d for SEGS_PER_SEC %u != 1",
3006 range.dev_num, sbi->s_ndevs, SEGS_PER_SEC(sbi));
3010 ret = mnt_want_write_file(filp);
3014 if (range.dev_num != 0)
3015 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
3016 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
3018 start_segno = sm->last_victim[FLUSH_DEVICE];
3019 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
3020 start_segno = dev_start_segno;
3021 end_segno = min(start_segno + range.segments, dev_end_segno);
3023 while (start_segno < end_segno) {
3024 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
3028 sm->last_victim[GC_CB] = end_segno + 1;
3029 sm->last_victim[GC_GREEDY] = end_segno + 1;
3030 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3032 gc_control.victim_segno = start_segno;
3033 stat_inc_gc_call_count(sbi, FOREGROUND);
3034 ret = f2fs_gc(sbi, &gc_control);
3042 mnt_drop_write_file(filp);
3046 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3048 struct inode *inode = file_inode(filp);
3049 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3051 /* Must validate to set it with SQLite behavior in Android. */
3052 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3054 return put_user(sb_feature, (u32 __user *)arg);
3058 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3060 struct dquot *transfer_to[MAXQUOTAS] = {};
3061 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3062 struct super_block *sb = sbi->sb;
3065 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3066 if (IS_ERR(transfer_to[PRJQUOTA]))
3067 return PTR_ERR(transfer_to[PRJQUOTA]);
3069 err = __dquot_transfer(inode, transfer_to);
3071 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3072 dqput(transfer_to[PRJQUOTA]);
3076 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3078 struct f2fs_inode_info *fi = F2FS_I(inode);
3079 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3080 struct f2fs_inode *ri = NULL;
3084 if (!f2fs_sb_has_project_quota(sbi)) {
3085 if (projid != F2FS_DEF_PROJID)
3091 if (!f2fs_has_extra_attr(inode))
3094 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3096 if (projid_eq(kprojid, fi->i_projid))
3100 /* Is it quota file? Do not allow user to mess with it */
3101 if (IS_NOQUOTA(inode))
3104 if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3107 err = f2fs_dquot_initialize(inode);
3112 err = f2fs_transfer_project_quota(inode, kprojid);
3116 fi->i_projid = kprojid;
3117 inode_set_ctime_current(inode);
3118 f2fs_mark_inode_dirty_sync(inode, true);
3120 f2fs_unlock_op(sbi);
3124 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3129 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3131 if (projid != F2FS_DEF_PROJID)
3137 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3139 struct inode *inode = d_inode(dentry);
3140 struct f2fs_inode_info *fi = F2FS_I(inode);
3141 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3143 if (IS_ENCRYPTED(inode))
3144 fsflags |= FS_ENCRYPT_FL;
3145 if (IS_VERITY(inode))
3146 fsflags |= FS_VERITY_FL;
3147 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3148 fsflags |= FS_INLINE_DATA_FL;
3149 if (is_inode_flag_set(inode, FI_PIN_FILE))
3150 fsflags |= FS_NOCOW_FL;
3152 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3154 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3155 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3160 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3161 struct dentry *dentry, struct fileattr *fa)
3163 struct inode *inode = d_inode(dentry);
3164 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3168 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3170 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3172 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3174 fsflags &= F2FS_SETTABLE_FS_FL;
3175 if (!fa->flags_valid)
3176 mask &= FS_COMMON_FL;
3178 iflags = f2fs_fsflags_to_iflags(fsflags);
3179 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3182 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3184 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3189 int f2fs_pin_file_control(struct inode *inode, bool inc)
3191 struct f2fs_inode_info *fi = F2FS_I(inode);
3192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3194 /* Use i_gc_failures for normal file as a risk signal. */
3196 f2fs_i_gc_failures_write(inode,
3197 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3199 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3200 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3201 __func__, inode->i_ino,
3202 fi->i_gc_failures[GC_FAILURE_PIN]);
3203 clear_inode_flag(inode, FI_PIN_FILE);
3209 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3211 struct inode *inode = file_inode(filp);
3212 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3216 if (get_user(pin, (__u32 __user *)arg))
3219 if (!S_ISREG(inode->i_mode))
3222 if (f2fs_readonly(sbi->sb))
3225 ret = mnt_want_write_file(filp);
3232 clear_inode_flag(inode, FI_PIN_FILE);
3233 f2fs_i_gc_failures_write(inode, 0);
3235 } else if (f2fs_is_pinned_file(inode)) {
3239 if (f2fs_sb_has_blkzoned(sbi) && F2FS_HAS_BLOCKS(inode)) {
3244 /* Let's allow file pinning on zoned device. */
3245 if (!f2fs_sb_has_blkzoned(sbi) &&
3246 f2fs_should_update_outplace(inode, NULL)) {
3251 if (f2fs_pin_file_control(inode, false)) {
3256 ret = f2fs_convert_inline_inode(inode);
3260 if (!f2fs_disable_compressed_file(inode)) {
3265 set_inode_flag(inode, FI_PIN_FILE);
3266 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3268 f2fs_update_time(sbi, REQ_TIME);
3270 inode_unlock(inode);
3271 mnt_drop_write_file(filp);
3275 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3277 struct inode *inode = file_inode(filp);
3280 if (is_inode_flag_set(inode, FI_PIN_FILE))
3281 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3282 return put_user(pin, (u32 __user *)arg);
3285 int f2fs_precache_extents(struct inode *inode)
3287 struct f2fs_inode_info *fi = F2FS_I(inode);
3288 struct f2fs_map_blocks map;
3289 pgoff_t m_next_extent;
3293 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3298 map.m_next_pgofs = NULL;
3299 map.m_next_extent = &m_next_extent;
3300 map.m_seg_type = NO_CHECK_TYPE;
3301 map.m_may_create = false;
3302 end = F2FS_BLK_ALIGN(i_size_read(inode));
3304 while (map.m_lblk < end) {
3305 map.m_len = end - map.m_lblk;
3307 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3308 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
3309 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3310 if (err || !map.m_len)
3313 map.m_lblk = m_next_extent;
3319 static int f2fs_ioc_precache_extents(struct file *filp)
3321 return f2fs_precache_extents(file_inode(filp));
3324 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3326 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3329 if (!capable(CAP_SYS_ADMIN))
3332 if (f2fs_readonly(sbi->sb))
3335 if (copy_from_user(&block_count, (void __user *)arg,
3336 sizeof(block_count)))
3339 return f2fs_resize_fs(filp, block_count);
3342 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3344 struct inode *inode = file_inode(filp);
3346 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3348 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3349 f2fs_warn(F2FS_I_SB(inode),
3350 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3355 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3358 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3360 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3363 return fsverity_ioctl_measure(filp, (void __user *)arg);
3366 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3368 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3371 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3374 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3376 struct inode *inode = file_inode(filp);
3377 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3382 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3386 f2fs_down_read(&sbi->sb_lock);
3387 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3388 ARRAY_SIZE(sbi->raw_super->volume_name),
3389 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3390 f2fs_up_read(&sbi->sb_lock);
3392 if (copy_to_user((char __user *)arg, vbuf,
3393 min(FSLABEL_MAX, count)))
3400 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3402 struct inode *inode = file_inode(filp);
3403 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3407 if (!capable(CAP_SYS_ADMIN))
3410 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3412 return PTR_ERR(vbuf);
3414 err = mnt_want_write_file(filp);
3418 f2fs_down_write(&sbi->sb_lock);
3420 memset(sbi->raw_super->volume_name, 0,
3421 sizeof(sbi->raw_super->volume_name));
3422 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3423 sbi->raw_super->volume_name,
3424 ARRAY_SIZE(sbi->raw_super->volume_name));
3426 err = f2fs_commit_super(sbi, false);
3428 f2fs_up_write(&sbi->sb_lock);
3430 mnt_drop_write_file(filp);
3436 static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks)
3438 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3441 if (!f2fs_compressed_file(inode))
3444 *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3449 static int f2fs_ioc_get_compress_blocks(struct file *filp, unsigned long arg)
3451 struct inode *inode = file_inode(filp);
3455 ret = f2fs_get_compress_blocks(inode, &blocks);
3459 return put_user(blocks, (u64 __user *)arg);
3462 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3464 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3465 unsigned int released_blocks = 0;
3466 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3470 for (i = 0; i < count; i++) {
3471 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3472 dn->ofs_in_node + i);
3474 if (!__is_valid_data_blkaddr(blkaddr))
3476 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3477 DATA_GENERIC_ENHANCE))) {
3478 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3479 return -EFSCORRUPTED;
3484 int compr_blocks = 0;
3486 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3487 blkaddr = f2fs_data_blkaddr(dn);
3490 if (blkaddr == COMPRESS_ADDR)
3492 dn->ofs_in_node += cluster_size;
3496 if (__is_valid_data_blkaddr(blkaddr))
3499 if (blkaddr != NEW_ADDR)
3502 f2fs_set_data_blkaddr(dn, NULL_ADDR);
3505 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3506 dec_valid_block_count(sbi, dn->inode,
3507 cluster_size - compr_blocks);
3509 released_blocks += cluster_size - compr_blocks;
3511 count -= cluster_size;
3514 return released_blocks;
3517 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3519 struct inode *inode = file_inode(filp);
3520 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3521 pgoff_t page_idx = 0, last_idx;
3522 unsigned int released_blocks = 0;
3526 if (!f2fs_sb_has_compression(sbi))
3529 if (!f2fs_compressed_file(inode))
3532 if (f2fs_readonly(sbi->sb))
3535 ret = mnt_want_write_file(filp);
3539 f2fs_balance_fs(sbi, true);
3543 writecount = atomic_read(&inode->i_writecount);
3544 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3545 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3550 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3555 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3559 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3564 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3565 inode_set_ctime_current(inode);
3566 f2fs_mark_inode_dirty_sync(inode, true);
3568 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3569 filemap_invalidate_lock(inode->i_mapping);
3571 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3573 while (page_idx < last_idx) {
3574 struct dnode_of_data dn;
3575 pgoff_t end_offset, count;
3577 set_new_dnode(&dn, inode, NULL, NULL, 0);
3578 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3580 if (ret == -ENOENT) {
3581 page_idx = f2fs_get_next_page_offset(&dn,
3589 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3590 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3591 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3593 ret = release_compress_blocks(&dn, count);
3595 f2fs_put_dnode(&dn);
3601 released_blocks += ret;
3604 filemap_invalidate_unlock(inode->i_mapping);
3605 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3607 inode_unlock(inode);
3609 mnt_drop_write_file(filp);
3612 ret = put_user(released_blocks, (u64 __user *)arg);
3613 } else if (released_blocks &&
3614 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3615 set_sbi_flag(sbi, SBI_NEED_FSCK);
3616 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3617 "iblocks=%llu, released=%u, compr_blocks=%u, "
3619 __func__, inode->i_ino, inode->i_blocks,
3621 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3627 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
3628 unsigned int *reserved_blocks)
3630 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3631 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3635 for (i = 0; i < count; i++) {
3636 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3637 dn->ofs_in_node + i);
3639 if (!__is_valid_data_blkaddr(blkaddr))
3641 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3642 DATA_GENERIC_ENHANCE))) {
3643 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3644 return -EFSCORRUPTED;
3649 int compr_blocks = 0;
3653 for (i = 0; i < cluster_size; i++) {
3654 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3655 dn->ofs_in_node + i);
3658 if (blkaddr != COMPRESS_ADDR) {
3659 dn->ofs_in_node += cluster_size;
3666 * compressed cluster was not released due to it
3667 * fails in release_compress_blocks(), so NEW_ADDR
3668 * is a possible case.
3670 if (blkaddr == NEW_ADDR ||
3671 __is_valid_data_blkaddr(blkaddr)) {
3677 reserved = cluster_size - compr_blocks;
3679 /* for the case all blocks in cluster were reserved */
3683 ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
3687 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3688 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
3689 f2fs_set_data_blkaddr(dn, NEW_ADDR);
3692 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3694 *reserved_blocks += reserved;
3696 count -= cluster_size;
3702 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3704 struct inode *inode = file_inode(filp);
3705 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3706 pgoff_t page_idx = 0, last_idx;
3707 unsigned int reserved_blocks = 0;
3710 if (!f2fs_sb_has_compression(sbi))
3713 if (!f2fs_compressed_file(inode))
3716 if (f2fs_readonly(sbi->sb))
3719 ret = mnt_want_write_file(filp);
3723 f2fs_balance_fs(sbi, true);
3727 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3732 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3735 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3736 filemap_invalidate_lock(inode->i_mapping);
3738 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3740 while (page_idx < last_idx) {
3741 struct dnode_of_data dn;
3742 pgoff_t end_offset, count;
3744 set_new_dnode(&dn, inode, NULL, NULL, 0);
3745 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3747 if (ret == -ENOENT) {
3748 page_idx = f2fs_get_next_page_offset(&dn,
3756 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3757 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3758 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3760 ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
3762 f2fs_put_dnode(&dn);
3770 filemap_invalidate_unlock(inode->i_mapping);
3771 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3774 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3775 inode_set_ctime_current(inode);
3776 f2fs_mark_inode_dirty_sync(inode, true);
3779 inode_unlock(inode);
3780 mnt_drop_write_file(filp);
3783 ret = put_user(reserved_blocks, (u64 __user *)arg);
3784 } else if (reserved_blocks &&
3785 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3786 set_sbi_flag(sbi, SBI_NEED_FSCK);
3787 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3788 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3790 __func__, inode->i_ino, inode->i_blocks,
3792 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3798 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3799 pgoff_t off, block_t block, block_t len, u32 flags)
3801 sector_t sector = SECTOR_FROM_BLOCK(block);
3802 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3805 if (flags & F2FS_TRIM_FILE_DISCARD) {
3806 if (bdev_max_secure_erase_sectors(bdev))
3807 ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
3810 ret = blkdev_issue_discard(bdev, sector, nr_sects,
3814 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3815 if (IS_ENCRYPTED(inode))
3816 ret = fscrypt_zeroout_range(inode, off, block, len);
3818 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3825 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3827 struct inode *inode = file_inode(filp);
3828 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3829 struct address_space *mapping = inode->i_mapping;
3830 struct block_device *prev_bdev = NULL;
3831 struct f2fs_sectrim_range range;
3832 pgoff_t index, pg_end, prev_index = 0;
3833 block_t prev_block = 0, len = 0;
3835 bool to_end = false;
3838 if (!(filp->f_mode & FMODE_WRITE))
3841 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3845 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3846 !S_ISREG(inode->i_mode))
3849 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3850 !f2fs_hw_support_discard(sbi)) ||
3851 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3852 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3855 file_start_write(filp);
3858 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3859 range.start >= inode->i_size) {
3867 if (inode->i_size - range.start > range.len) {
3868 end_addr = range.start + range.len;
3870 end_addr = range.len == (u64)-1 ?
3871 sbi->sb->s_maxbytes : inode->i_size;
3875 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3876 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3881 index = F2FS_BYTES_TO_BLK(range.start);
3882 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3884 ret = f2fs_convert_inline_inode(inode);
3888 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3889 filemap_invalidate_lock(mapping);
3891 ret = filemap_write_and_wait_range(mapping, range.start,
3892 to_end ? LLONG_MAX : end_addr - 1);
3896 truncate_inode_pages_range(mapping, range.start,
3897 to_end ? -1 : end_addr - 1);
3899 while (index < pg_end) {
3900 struct dnode_of_data dn;
3901 pgoff_t end_offset, count;
3904 set_new_dnode(&dn, inode, NULL, NULL, 0);
3905 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3907 if (ret == -ENOENT) {
3908 index = f2fs_get_next_page_offset(&dn, index);
3914 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3915 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3916 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3917 struct block_device *cur_bdev;
3918 block_t blkaddr = f2fs_data_blkaddr(&dn);
3920 if (!__is_valid_data_blkaddr(blkaddr))
3923 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3924 DATA_GENERIC_ENHANCE)) {
3925 ret = -EFSCORRUPTED;
3926 f2fs_put_dnode(&dn);
3927 f2fs_handle_error(sbi,
3928 ERROR_INVALID_BLKADDR);
3932 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3933 if (f2fs_is_multi_device(sbi)) {
3934 int di = f2fs_target_device_index(sbi, blkaddr);
3936 blkaddr -= FDEV(di).start_blk;
3940 if (prev_bdev == cur_bdev &&
3941 index == prev_index + len &&
3942 blkaddr == prev_block + len) {
3945 ret = f2fs_secure_erase(prev_bdev,
3946 inode, prev_index, prev_block,
3949 f2fs_put_dnode(&dn);
3958 prev_bdev = cur_bdev;
3960 prev_block = blkaddr;
3965 f2fs_put_dnode(&dn);
3967 if (fatal_signal_pending(current)) {
3975 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3976 prev_block, len, range.flags);
3978 filemap_invalidate_unlock(mapping);
3979 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3981 inode_unlock(inode);
3982 file_end_write(filp);
3987 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3989 struct inode *inode = file_inode(filp);
3990 struct f2fs_comp_option option;
3992 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3995 inode_lock_shared(inode);
3997 if (!f2fs_compressed_file(inode)) {
3998 inode_unlock_shared(inode);
4002 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
4003 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
4005 inode_unlock_shared(inode);
4007 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
4014 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4016 struct inode *inode = file_inode(filp);
4017 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4018 struct f2fs_comp_option option;
4021 if (!f2fs_sb_has_compression(sbi))
4024 if (!(filp->f_mode & FMODE_WRITE))
4027 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4031 if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4032 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4033 option.algorithm >= COMPRESS_MAX)
4036 file_start_write(filp);
4039 f2fs_down_write(&F2FS_I(inode)->i_sem);
4040 if (!f2fs_compressed_file(inode)) {
4045 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4050 if (F2FS_HAS_BLOCKS(inode)) {
4055 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4056 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4057 F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
4058 /* Set default level */
4059 if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
4060 F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
4062 F2FS_I(inode)->i_compress_level = 0;
4063 /* Adjust mount option level */
4064 if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
4065 F2FS_OPTION(sbi).compress_level)
4066 F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
4067 f2fs_mark_inode_dirty_sync(inode, true);
4069 if (!f2fs_is_compress_backend_ready(inode))
4070 f2fs_warn(sbi, "compression algorithm is successfully set, "
4071 "but current kernel doesn't support this algorithm.");
4073 f2fs_up_write(&F2FS_I(inode)->i_sem);
4074 inode_unlock(inode);
4075 file_end_write(filp);
4080 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4082 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
4083 struct address_space *mapping = inode->i_mapping;
4085 pgoff_t redirty_idx = page_idx;
4086 int i, page_len = 0, ret = 0;
4088 page_cache_ra_unbounded(&ractl, len, 0);
4090 for (i = 0; i < len; i++, page_idx++) {
4091 page = read_cache_page(mapping, page_idx, NULL, NULL);
4093 ret = PTR_ERR(page);
4099 for (i = 0; i < page_len; i++, redirty_idx++) {
4100 page = find_lock_page(mapping, redirty_idx);
4102 /* It will never fail, when page has pinned above */
4103 f2fs_bug_on(F2FS_I_SB(inode), !page);
4105 set_page_dirty(page);
4106 set_page_private_gcing(page);
4107 f2fs_put_page(page, 1);
4108 f2fs_put_page(page, 0);
4114 static int f2fs_ioc_decompress_file(struct file *filp)
4116 struct inode *inode = file_inode(filp);
4117 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4118 struct f2fs_inode_info *fi = F2FS_I(inode);
4119 pgoff_t page_idx = 0, last_idx;
4120 int cluster_size = fi->i_cluster_size;
4123 if (!f2fs_sb_has_compression(sbi) ||
4124 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4127 if (!(filp->f_mode & FMODE_WRITE))
4130 if (!f2fs_compressed_file(inode))
4133 f2fs_balance_fs(sbi, true);
4135 file_start_write(filp);
4138 if (!f2fs_is_compress_backend_ready(inode)) {
4143 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4148 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4152 if (!atomic_read(&fi->i_compr_blocks))
4155 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4157 count = last_idx - page_idx;
4158 while (count && count >= cluster_size) {
4159 ret = redirty_blocks(inode, page_idx, cluster_size);
4163 if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
4164 ret = filemap_fdatawrite(inode->i_mapping);
4169 count -= cluster_size;
4170 page_idx += cluster_size;
4173 if (fatal_signal_pending(current)) {
4180 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4184 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4187 inode_unlock(inode);
4188 file_end_write(filp);
4193 static int f2fs_ioc_compress_file(struct file *filp)
4195 struct inode *inode = file_inode(filp);
4196 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4197 pgoff_t page_idx = 0, last_idx;
4198 int cluster_size = F2FS_I(inode)->i_cluster_size;
4201 if (!f2fs_sb_has_compression(sbi) ||
4202 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4205 if (!(filp->f_mode & FMODE_WRITE))
4208 if (!f2fs_compressed_file(inode))
4211 f2fs_balance_fs(sbi, true);
4213 file_start_write(filp);
4216 if (!f2fs_is_compress_backend_ready(inode)) {
4221 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4226 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4230 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4232 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4234 count = last_idx - page_idx;
4235 while (count && count >= cluster_size) {
4236 ret = redirty_blocks(inode, page_idx, cluster_size);
4240 if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
4241 ret = filemap_fdatawrite(inode->i_mapping);
4246 count -= cluster_size;
4247 page_idx += cluster_size;
4250 if (fatal_signal_pending(current)) {
4257 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4260 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4263 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4266 inode_unlock(inode);
4267 file_end_write(filp);
4272 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4275 case FS_IOC_GETVERSION:
4276 return f2fs_ioc_getversion(filp, arg);
4277 case F2FS_IOC_START_ATOMIC_WRITE:
4278 return f2fs_ioc_start_atomic_write(filp, false);
4279 case F2FS_IOC_START_ATOMIC_REPLACE:
4280 return f2fs_ioc_start_atomic_write(filp, true);
4281 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4282 return f2fs_ioc_commit_atomic_write(filp);
4283 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4284 return f2fs_ioc_abort_atomic_write(filp);
4285 case F2FS_IOC_START_VOLATILE_WRITE:
4286 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4288 case F2FS_IOC_SHUTDOWN:
4289 return f2fs_ioc_shutdown(filp, arg);
4291 return f2fs_ioc_fitrim(filp, arg);
4292 case FS_IOC_SET_ENCRYPTION_POLICY:
4293 return f2fs_ioc_set_encryption_policy(filp, arg);
4294 case FS_IOC_GET_ENCRYPTION_POLICY:
4295 return f2fs_ioc_get_encryption_policy(filp, arg);
4296 case FS_IOC_GET_ENCRYPTION_PWSALT:
4297 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4298 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4299 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4300 case FS_IOC_ADD_ENCRYPTION_KEY:
4301 return f2fs_ioc_add_encryption_key(filp, arg);
4302 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4303 return f2fs_ioc_remove_encryption_key(filp, arg);
4304 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4305 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4306 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4307 return f2fs_ioc_get_encryption_key_status(filp, arg);
4308 case FS_IOC_GET_ENCRYPTION_NONCE:
4309 return f2fs_ioc_get_encryption_nonce(filp, arg);
4310 case F2FS_IOC_GARBAGE_COLLECT:
4311 return f2fs_ioc_gc(filp, arg);
4312 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4313 return f2fs_ioc_gc_range(filp, arg);
4314 case F2FS_IOC_WRITE_CHECKPOINT:
4315 return f2fs_ioc_write_checkpoint(filp);
4316 case F2FS_IOC_DEFRAGMENT:
4317 return f2fs_ioc_defragment(filp, arg);
4318 case F2FS_IOC_MOVE_RANGE:
4319 return f2fs_ioc_move_range(filp, arg);
4320 case F2FS_IOC_FLUSH_DEVICE:
4321 return f2fs_ioc_flush_device(filp, arg);
4322 case F2FS_IOC_GET_FEATURES:
4323 return f2fs_ioc_get_features(filp, arg);
4324 case F2FS_IOC_GET_PIN_FILE:
4325 return f2fs_ioc_get_pin_file(filp, arg);
4326 case F2FS_IOC_SET_PIN_FILE:
4327 return f2fs_ioc_set_pin_file(filp, arg);
4328 case F2FS_IOC_PRECACHE_EXTENTS:
4329 return f2fs_ioc_precache_extents(filp);
4330 case F2FS_IOC_RESIZE_FS:
4331 return f2fs_ioc_resize_fs(filp, arg);
4332 case FS_IOC_ENABLE_VERITY:
4333 return f2fs_ioc_enable_verity(filp, arg);
4334 case FS_IOC_MEASURE_VERITY:
4335 return f2fs_ioc_measure_verity(filp, arg);
4336 case FS_IOC_READ_VERITY_METADATA:
4337 return f2fs_ioc_read_verity_metadata(filp, arg);
4338 case FS_IOC_GETFSLABEL:
4339 return f2fs_ioc_getfslabel(filp, arg);
4340 case FS_IOC_SETFSLABEL:
4341 return f2fs_ioc_setfslabel(filp, arg);
4342 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4343 return f2fs_ioc_get_compress_blocks(filp, arg);
4344 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4345 return f2fs_release_compress_blocks(filp, arg);
4346 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4347 return f2fs_reserve_compress_blocks(filp, arg);
4348 case F2FS_IOC_SEC_TRIM_FILE:
4349 return f2fs_sec_trim_file(filp, arg);
4350 case F2FS_IOC_GET_COMPRESS_OPTION:
4351 return f2fs_ioc_get_compress_option(filp, arg);
4352 case F2FS_IOC_SET_COMPRESS_OPTION:
4353 return f2fs_ioc_set_compress_option(filp, arg);
4354 case F2FS_IOC_DECOMPRESS_FILE:
4355 return f2fs_ioc_decompress_file(filp);
4356 case F2FS_IOC_COMPRESS_FILE:
4357 return f2fs_ioc_compress_file(filp);
4363 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4365 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4367 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4370 return __f2fs_ioctl(filp, cmd, arg);
4374 * Return %true if the given read or write request should use direct I/O, or
4375 * %false if it should use buffered I/O.
4377 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4378 struct iov_iter *iter)
4382 if (!(iocb->ki_flags & IOCB_DIRECT))
4385 if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
4389 * Direct I/O not aligned to the disk's logical_block_size will be
4390 * attempted, but will fail with -EINVAL.
4392 * f2fs additionally requires that direct I/O be aligned to the
4393 * filesystem block size, which is often a stricter requirement.
4394 * However, f2fs traditionally falls back to buffered I/O on requests
4395 * that are logical_block_size-aligned but not fs-block aligned.
4397 * The below logic implements this behavior.
4399 align = iocb->ki_pos | iov_iter_alignment(iter);
4400 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4401 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4407 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4410 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4412 dec_page_count(sbi, F2FS_DIO_READ);
4415 f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
4419 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4420 .end_io = f2fs_dio_read_end_io,
4423 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4425 struct file *file = iocb->ki_filp;
4426 struct inode *inode = file_inode(file);
4427 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4428 struct f2fs_inode_info *fi = F2FS_I(inode);
4429 const loff_t pos = iocb->ki_pos;
4430 const size_t count = iov_iter_count(to);
4431 struct iomap_dio *dio;
4435 return 0; /* skip atime update */
4437 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4439 if (iocb->ki_flags & IOCB_NOWAIT) {
4440 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4445 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4449 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4450 * the higher-level function iomap_dio_rw() in order to ensure that the
4451 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4453 inc_page_count(sbi, F2FS_DIO_READ);
4454 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4455 &f2fs_iomap_dio_read_ops, 0, NULL, 0);
4456 if (IS_ERR_OR_NULL(dio)) {
4457 ret = PTR_ERR_OR_ZERO(dio);
4458 if (ret != -EIOCBQUEUED)
4459 dec_page_count(sbi, F2FS_DIO_READ);
4461 ret = iomap_dio_complete(dio);
4464 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4466 file_accessed(file);
4468 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4472 static void f2fs_trace_rw_file_path(struct file *file, loff_t pos, size_t count,
4475 struct inode *inode = file_inode(file);
4478 buf = f2fs_getname(F2FS_I_SB(inode));
4481 path = dentry_path_raw(file_dentry(file), buf, PATH_MAX);
4485 trace_f2fs_datawrite_start(inode, pos, count,
4486 current->pid, path, current->comm);
4488 trace_f2fs_dataread_start(inode, pos, count,
4489 current->pid, path, current->comm);
4494 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4496 struct inode *inode = file_inode(iocb->ki_filp);
4497 const loff_t pos = iocb->ki_pos;
4500 if (!f2fs_is_compress_backend_ready(inode))
4503 if (trace_f2fs_dataread_start_enabled())
4504 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4505 iov_iter_count(to), READ);
4507 if (f2fs_should_use_dio(inode, iocb, to)) {
4508 ret = f2fs_dio_read_iter(iocb, to);
4510 ret = filemap_read(iocb, to, 0);
4512 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4513 APP_BUFFERED_READ_IO, ret);
4515 if (trace_f2fs_dataread_end_enabled())
4516 trace_f2fs_dataread_end(inode, pos, ret);
4520 static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos,
4521 struct pipe_inode_info *pipe,
4522 size_t len, unsigned int flags)
4524 struct inode *inode = file_inode(in);
4525 const loff_t pos = *ppos;
4528 if (!f2fs_is_compress_backend_ready(inode))
4531 if (trace_f2fs_dataread_start_enabled())
4532 f2fs_trace_rw_file_path(in, pos, len, READ);
4534 ret = filemap_splice_read(in, ppos, pipe, len, flags);
4536 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4537 APP_BUFFERED_READ_IO, ret);
4539 if (trace_f2fs_dataread_end_enabled())
4540 trace_f2fs_dataread_end(inode, pos, ret);
4544 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4546 struct file *file = iocb->ki_filp;
4547 struct inode *inode = file_inode(file);
4551 if (IS_IMMUTABLE(inode))
4554 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4557 count = generic_write_checks(iocb, from);
4561 err = file_modified(file);
4568 * Preallocate blocks for a write request, if it is possible and helpful to do
4569 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4570 * blocks were preallocated, or a negative errno value if something went
4571 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4572 * requested blocks (not just some of them) have been allocated.
4574 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4577 struct inode *inode = file_inode(iocb->ki_filp);
4578 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4579 const loff_t pos = iocb->ki_pos;
4580 const size_t count = iov_iter_count(iter);
4581 struct f2fs_map_blocks map = {};
4585 /* If it will be an out-of-place direct write, don't bother. */
4586 if (dio && f2fs_lfs_mode(sbi))
4589 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4590 * buffered IO, if DIO meets any holes.
4592 if (dio && i_size_read(inode) &&
4593 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4596 /* No-wait I/O can't allocate blocks. */
4597 if (iocb->ki_flags & IOCB_NOWAIT)
4600 /* If it will be a short write, don't bother. */
4601 if (fault_in_iov_iter_readable(iter, count))
4604 if (f2fs_has_inline_data(inode)) {
4605 /* If the data will fit inline, don't bother. */
4606 if (pos + count <= MAX_INLINE_DATA(inode))
4608 ret = f2fs_convert_inline_inode(inode);
4613 /* Do not preallocate blocks that will be written partially in 4KB. */
4614 map.m_lblk = F2FS_BLK_ALIGN(pos);
4615 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4616 if (map.m_len > map.m_lblk)
4617 map.m_len -= map.m_lblk;
4621 map.m_may_create = true;
4623 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4624 flag = F2FS_GET_BLOCK_PRE_DIO;
4626 map.m_seg_type = NO_CHECK_TYPE;
4627 flag = F2FS_GET_BLOCK_PRE_AIO;
4630 ret = f2fs_map_blocks(inode, &map, flag);
4631 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4632 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4635 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4639 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4640 struct iov_iter *from)
4642 struct file *file = iocb->ki_filp;
4643 struct inode *inode = file_inode(file);
4646 if (iocb->ki_flags & IOCB_NOWAIT)
4649 ret = generic_perform_write(iocb, from);
4652 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4653 APP_BUFFERED_IO, ret);
4658 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4661 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4663 dec_page_count(sbi, F2FS_DIO_WRITE);
4666 f2fs_update_time(sbi, REQ_TIME);
4667 f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
4671 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4672 .end_io = f2fs_dio_write_end_io,
4675 static void f2fs_flush_buffered_write(struct address_space *mapping,
4676 loff_t start_pos, loff_t end_pos)
4680 ret = filemap_write_and_wait_range(mapping, start_pos, end_pos);
4683 invalidate_mapping_pages(mapping,
4684 start_pos >> PAGE_SHIFT,
4685 end_pos >> PAGE_SHIFT);
4688 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4689 bool *may_need_sync)
4691 struct file *file = iocb->ki_filp;
4692 struct inode *inode = file_inode(file);
4693 struct f2fs_inode_info *fi = F2FS_I(inode);
4694 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4695 const bool do_opu = f2fs_lfs_mode(sbi);
4696 const loff_t pos = iocb->ki_pos;
4697 const ssize_t count = iov_iter_count(from);
4698 unsigned int dio_flags;
4699 struct iomap_dio *dio;
4702 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4704 if (iocb->ki_flags & IOCB_NOWAIT) {
4705 /* f2fs_convert_inline_inode() and block allocation can block */
4706 if (f2fs_has_inline_data(inode) ||
4707 !f2fs_overwrite_io(inode, pos, count)) {
4712 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4716 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4717 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4722 ret = f2fs_convert_inline_inode(inode);
4726 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
4728 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4732 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4733 * the higher-level function iomap_dio_rw() in order to ensure that the
4734 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4736 inc_page_count(sbi, F2FS_DIO_WRITE);
4738 if (pos + count > inode->i_size)
4739 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4740 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4741 &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
4742 if (IS_ERR_OR_NULL(dio)) {
4743 ret = PTR_ERR_OR_ZERO(dio);
4744 if (ret == -ENOTBLK)
4746 if (ret != -EIOCBQUEUED)
4747 dec_page_count(sbi, F2FS_DIO_WRITE);
4749 ret = iomap_dio_complete(dio);
4753 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4754 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4758 if (pos + ret > inode->i_size)
4759 f2fs_i_size_write(inode, pos + ret);
4761 set_inode_flag(inode, FI_UPDATE_WRITE);
4763 if (iov_iter_count(from)) {
4765 loff_t bufio_start_pos = iocb->ki_pos;
4768 * The direct write was partial, so we need to fall back to a
4769 * buffered write for the remainder.
4772 ret2 = f2fs_buffered_write_iter(iocb, from);
4773 if (iov_iter_count(from))
4774 f2fs_write_failed(inode, iocb->ki_pos);
4779 * Ensure that the pagecache pages are written to disk and
4780 * invalidated to preserve the expected O_DIRECT semantics.
4783 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4787 f2fs_flush_buffered_write(file->f_mapping,
4792 /* iomap_dio_rw() already handled the generic_write_sync(). */
4793 *may_need_sync = false;
4796 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4800 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4802 struct inode *inode = file_inode(iocb->ki_filp);
4803 const loff_t orig_pos = iocb->ki_pos;
4804 const size_t orig_count = iov_iter_count(from);
4807 bool may_need_sync = true;
4811 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4816 if (!f2fs_is_compress_backend_ready(inode)) {
4821 if (iocb->ki_flags & IOCB_NOWAIT) {
4822 if (!inode_trylock(inode)) {
4830 ret = f2fs_write_checks(iocb, from);
4834 /* Determine whether we will do a direct write or a buffered write. */
4835 dio = f2fs_should_use_dio(inode, iocb, from);
4837 /* Possibly preallocate the blocks for the write. */
4838 target_size = iocb->ki_pos + iov_iter_count(from);
4839 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4840 if (preallocated < 0) {
4843 if (trace_f2fs_datawrite_start_enabled())
4844 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4847 /* Do the actual write. */
4849 f2fs_dio_write_iter(iocb, from, &may_need_sync) :
4850 f2fs_buffered_write_iter(iocb, from);
4852 if (trace_f2fs_datawrite_end_enabled())
4853 trace_f2fs_datawrite_end(inode, orig_pos, ret);
4856 /* Don't leave any preallocated blocks around past i_size. */
4857 if (preallocated && i_size_read(inode) < target_size) {
4858 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4859 filemap_invalidate_lock(inode->i_mapping);
4860 if (!f2fs_truncate(inode))
4861 file_dont_truncate(inode);
4862 filemap_invalidate_unlock(inode->i_mapping);
4863 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4865 file_dont_truncate(inode);
4868 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4870 inode_unlock(inode);
4872 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4874 if (ret > 0 && may_need_sync)
4875 ret = generic_write_sync(iocb, ret);
4877 /* If buffered IO was forced, flush and drop the data from
4878 * the page cache to preserve O_DIRECT semantics
4880 if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT))
4881 f2fs_flush_buffered_write(iocb->ki_filp->f_mapping,
4883 orig_pos + ret - 1);
4888 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4891 struct address_space *mapping;
4892 struct backing_dev_info *bdi;
4893 struct inode *inode = file_inode(filp);
4896 if (advice == POSIX_FADV_SEQUENTIAL) {
4897 if (S_ISFIFO(inode->i_mode))
4900 mapping = filp->f_mapping;
4901 if (!mapping || len < 0)
4904 bdi = inode_to_bdi(mapping->host);
4905 filp->f_ra.ra_pages = bdi->ra_pages *
4906 F2FS_I_SB(inode)->seq_file_ra_mul;
4907 spin_lock(&filp->f_lock);
4908 filp->f_mode &= ~FMODE_RANDOM;
4909 spin_unlock(&filp->f_lock);
4911 } else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
4912 /* Load extent cache at the first readahead. */
4913 f2fs_precache_extents(inode);
4916 err = generic_fadvise(filp, offset, len, advice);
4917 if (!err && advice == POSIX_FADV_DONTNEED &&
4918 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4919 f2fs_compressed_file(inode))
4920 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4925 #ifdef CONFIG_COMPAT
4926 struct compat_f2fs_gc_range {
4931 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4932 struct compat_f2fs_gc_range)
4934 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4936 struct compat_f2fs_gc_range __user *urange;
4937 struct f2fs_gc_range range;
4940 urange = compat_ptr(arg);
4941 err = get_user(range.sync, &urange->sync);
4942 err |= get_user(range.start, &urange->start);
4943 err |= get_user(range.len, &urange->len);
4947 return __f2fs_ioc_gc_range(file, &range);
4950 struct compat_f2fs_move_range {
4956 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4957 struct compat_f2fs_move_range)
4959 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4961 struct compat_f2fs_move_range __user *urange;
4962 struct f2fs_move_range range;
4965 urange = compat_ptr(arg);
4966 err = get_user(range.dst_fd, &urange->dst_fd);
4967 err |= get_user(range.pos_in, &urange->pos_in);
4968 err |= get_user(range.pos_out, &urange->pos_out);
4969 err |= get_user(range.len, &urange->len);
4973 return __f2fs_ioc_move_range(file, &range);
4976 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4978 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4980 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4984 case FS_IOC32_GETVERSION:
4985 cmd = FS_IOC_GETVERSION;
4987 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4988 return f2fs_compat_ioc_gc_range(file, arg);
4989 case F2FS_IOC32_MOVE_RANGE:
4990 return f2fs_compat_ioc_move_range(file, arg);
4991 case F2FS_IOC_START_ATOMIC_WRITE:
4992 case F2FS_IOC_START_ATOMIC_REPLACE:
4993 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4994 case F2FS_IOC_START_VOLATILE_WRITE:
4995 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4996 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4997 case F2FS_IOC_SHUTDOWN:
4999 case FS_IOC_SET_ENCRYPTION_POLICY:
5000 case FS_IOC_GET_ENCRYPTION_PWSALT:
5001 case FS_IOC_GET_ENCRYPTION_POLICY:
5002 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
5003 case FS_IOC_ADD_ENCRYPTION_KEY:
5004 case FS_IOC_REMOVE_ENCRYPTION_KEY:
5005 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
5006 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
5007 case FS_IOC_GET_ENCRYPTION_NONCE:
5008 case F2FS_IOC_GARBAGE_COLLECT:
5009 case F2FS_IOC_WRITE_CHECKPOINT:
5010 case F2FS_IOC_DEFRAGMENT:
5011 case F2FS_IOC_FLUSH_DEVICE:
5012 case F2FS_IOC_GET_FEATURES:
5013 case F2FS_IOC_GET_PIN_FILE:
5014 case F2FS_IOC_SET_PIN_FILE:
5015 case F2FS_IOC_PRECACHE_EXTENTS:
5016 case F2FS_IOC_RESIZE_FS:
5017 case FS_IOC_ENABLE_VERITY:
5018 case FS_IOC_MEASURE_VERITY:
5019 case FS_IOC_READ_VERITY_METADATA:
5020 case FS_IOC_GETFSLABEL:
5021 case FS_IOC_SETFSLABEL:
5022 case F2FS_IOC_GET_COMPRESS_BLOCKS:
5023 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
5024 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
5025 case F2FS_IOC_SEC_TRIM_FILE:
5026 case F2FS_IOC_GET_COMPRESS_OPTION:
5027 case F2FS_IOC_SET_COMPRESS_OPTION:
5028 case F2FS_IOC_DECOMPRESS_FILE:
5029 case F2FS_IOC_COMPRESS_FILE:
5032 return -ENOIOCTLCMD;
5034 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
5038 const struct file_operations f2fs_file_operations = {
5039 .llseek = f2fs_llseek,
5040 .read_iter = f2fs_file_read_iter,
5041 .write_iter = f2fs_file_write_iter,
5042 .iopoll = iocb_bio_iopoll,
5043 .open = f2fs_file_open,
5044 .release = f2fs_release_file,
5045 .mmap = f2fs_file_mmap,
5046 .flush = f2fs_file_flush,
5047 .fsync = f2fs_sync_file,
5048 .fallocate = f2fs_fallocate,
5049 .unlocked_ioctl = f2fs_ioctl,
5050 #ifdef CONFIG_COMPAT
5051 .compat_ioctl = f2fs_compat_ioctl,
5053 .splice_read = f2fs_file_splice_read,
5054 .splice_write = iter_file_splice_write,
5055 .fadvise = f2fs_file_fadvise,