Merge tag 'nfs-for-5.15-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[linux-2.6-microblaze.git] / fs / f2fs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26
27 #include "f2fs.h"
28 #include "node.h"
29 #include "segment.h"
30 #include "xattr.h"
31 #include "acl.h"
32 #include "gc.h"
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
35
36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37 {
38         struct inode *inode = file_inode(vmf->vma->vm_file);
39         vm_fault_t ret;
40
41         ret = filemap_fault(vmf);
42         if (!ret)
43                 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
44                                                         F2FS_BLKSIZE);
45
46         trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
47
48         return ret;
49 }
50
51 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
52 {
53         struct page *page = vmf->page;
54         struct inode *inode = file_inode(vmf->vma->vm_file);
55         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
56         struct dnode_of_data dn;
57         bool need_alloc = true;
58         int err = 0;
59
60         if (unlikely(IS_IMMUTABLE(inode)))
61                 return VM_FAULT_SIGBUS;
62
63         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
64                 return VM_FAULT_SIGBUS;
65
66         if (unlikely(f2fs_cp_error(sbi))) {
67                 err = -EIO;
68                 goto err;
69         }
70
71         if (!f2fs_is_checkpoint_ready(sbi)) {
72                 err = -ENOSPC;
73                 goto err;
74         }
75
76         err = f2fs_convert_inline_inode(inode);
77         if (err)
78                 goto err;
79
80 #ifdef CONFIG_F2FS_FS_COMPRESSION
81         if (f2fs_compressed_file(inode)) {
82                 int ret = f2fs_is_compressed_cluster(inode, page->index);
83
84                 if (ret < 0) {
85                         err = ret;
86                         goto err;
87                 } else if (ret) {
88                         need_alloc = false;
89                 }
90         }
91 #endif
92         /* should do out of any locked page */
93         if (need_alloc)
94                 f2fs_balance_fs(sbi, true);
95
96         sb_start_pagefault(inode->i_sb);
97
98         f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
99
100         file_update_time(vmf->vma->vm_file);
101         filemap_invalidate_lock_shared(inode->i_mapping);
102         lock_page(page);
103         if (unlikely(page->mapping != inode->i_mapping ||
104                         page_offset(page) > i_size_read(inode) ||
105                         !PageUptodate(page))) {
106                 unlock_page(page);
107                 err = -EFAULT;
108                 goto out_sem;
109         }
110
111         if (need_alloc) {
112                 /* block allocation */
113                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
114                 set_new_dnode(&dn, inode, NULL, NULL, 0);
115                 err = f2fs_get_block(&dn, page->index);
116                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
117         }
118
119 #ifdef CONFIG_F2FS_FS_COMPRESSION
120         if (!need_alloc) {
121                 set_new_dnode(&dn, inode, NULL, NULL, 0);
122                 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
123                 f2fs_put_dnode(&dn);
124         }
125 #endif
126         if (err) {
127                 unlock_page(page);
128                 goto out_sem;
129         }
130
131         f2fs_wait_on_page_writeback(page, DATA, false, true);
132
133         /* wait for GCed page writeback via META_MAPPING */
134         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
135
136         /*
137          * check to see if the page is mapped already (no holes)
138          */
139         if (PageMappedToDisk(page))
140                 goto out_sem;
141
142         /* page is wholly or partially inside EOF */
143         if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
144                                                 i_size_read(inode)) {
145                 loff_t offset;
146
147                 offset = i_size_read(inode) & ~PAGE_MASK;
148                 zero_user_segment(page, offset, PAGE_SIZE);
149         }
150         set_page_dirty(page);
151         if (!PageUptodate(page))
152                 SetPageUptodate(page);
153
154         f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
155         f2fs_update_time(sbi, REQ_TIME);
156
157         trace_f2fs_vm_page_mkwrite(page, DATA);
158 out_sem:
159         filemap_invalidate_unlock_shared(inode->i_mapping);
160
161         sb_end_pagefault(inode->i_sb);
162 err:
163         return block_page_mkwrite_return(err);
164 }
165
166 static const struct vm_operations_struct f2fs_file_vm_ops = {
167         .fault          = f2fs_filemap_fault,
168         .map_pages      = filemap_map_pages,
169         .page_mkwrite   = f2fs_vm_page_mkwrite,
170 };
171
172 static int get_parent_ino(struct inode *inode, nid_t *pino)
173 {
174         struct dentry *dentry;
175
176         /*
177          * Make sure to get the non-deleted alias.  The alias associated with
178          * the open file descriptor being fsync()'ed may be deleted already.
179          */
180         dentry = d_find_alias(inode);
181         if (!dentry)
182                 return 0;
183
184         *pino = parent_ino(dentry);
185         dput(dentry);
186         return 1;
187 }
188
189 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
190 {
191         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
192         enum cp_reason_type cp_reason = CP_NO_NEEDED;
193
194         if (!S_ISREG(inode->i_mode))
195                 cp_reason = CP_NON_REGULAR;
196         else if (f2fs_compressed_file(inode))
197                 cp_reason = CP_COMPRESSED;
198         else if (inode->i_nlink != 1)
199                 cp_reason = CP_HARDLINK;
200         else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
201                 cp_reason = CP_SB_NEED_CP;
202         else if (file_wrong_pino(inode))
203                 cp_reason = CP_WRONG_PINO;
204         else if (!f2fs_space_for_roll_forward(sbi))
205                 cp_reason = CP_NO_SPC_ROLL;
206         else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
207                 cp_reason = CP_NODE_NEED_CP;
208         else if (test_opt(sbi, FASTBOOT))
209                 cp_reason = CP_FASTBOOT_MODE;
210         else if (F2FS_OPTION(sbi).active_logs == 2)
211                 cp_reason = CP_SPEC_LOG_NUM;
212         else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
213                 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
214                 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
215                                                         TRANS_DIR_INO))
216                 cp_reason = CP_RECOVER_DIR;
217
218         return cp_reason;
219 }
220
221 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
222 {
223         struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
224         bool ret = false;
225         /* But we need to avoid that there are some inode updates */
226         if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
227                 ret = true;
228         f2fs_put_page(i, 0);
229         return ret;
230 }
231
232 static void try_to_fix_pino(struct inode *inode)
233 {
234         struct f2fs_inode_info *fi = F2FS_I(inode);
235         nid_t pino;
236
237         down_write(&fi->i_sem);
238         if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
239                         get_parent_ino(inode, &pino)) {
240                 f2fs_i_pino_write(inode, pino);
241                 file_got_pino(inode);
242         }
243         up_write(&fi->i_sem);
244 }
245
246 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
247                                                 int datasync, bool atomic)
248 {
249         struct inode *inode = file->f_mapping->host;
250         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
251         nid_t ino = inode->i_ino;
252         int ret = 0;
253         enum cp_reason_type cp_reason = 0;
254         struct writeback_control wbc = {
255                 .sync_mode = WB_SYNC_ALL,
256                 .nr_to_write = LONG_MAX,
257                 .for_reclaim = 0,
258         };
259         unsigned int seq_id = 0;
260
261         if (unlikely(f2fs_readonly(inode->i_sb) ||
262                                 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
263                 return 0;
264
265         trace_f2fs_sync_file_enter(inode);
266
267         if (S_ISDIR(inode->i_mode))
268                 goto go_write;
269
270         /* if fdatasync is triggered, let's do in-place-update */
271         if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
272                 set_inode_flag(inode, FI_NEED_IPU);
273         ret = file_write_and_wait_range(file, start, end);
274         clear_inode_flag(inode, FI_NEED_IPU);
275
276         if (ret) {
277                 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
278                 return ret;
279         }
280
281         /* if the inode is dirty, let's recover all the time */
282         if (!f2fs_skip_inode_update(inode, datasync)) {
283                 f2fs_write_inode(inode, NULL);
284                 goto go_write;
285         }
286
287         /*
288          * if there is no written data, don't waste time to write recovery info.
289          */
290         if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
291                         !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
292
293                 /* it may call write_inode just prior to fsync */
294                 if (need_inode_page_update(sbi, ino))
295                         goto go_write;
296
297                 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
298                                 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
299                         goto flush_out;
300                 goto out;
301         }
302 go_write:
303         /*
304          * Both of fdatasync() and fsync() are able to be recovered from
305          * sudden-power-off.
306          */
307         down_read(&F2FS_I(inode)->i_sem);
308         cp_reason = need_do_checkpoint(inode);
309         up_read(&F2FS_I(inode)->i_sem);
310
311         if (cp_reason) {
312                 /* all the dirty node pages should be flushed for POR */
313                 ret = f2fs_sync_fs(inode->i_sb, 1);
314
315                 /*
316                  * We've secured consistency through sync_fs. Following pino
317                  * will be used only for fsynced inodes after checkpoint.
318                  */
319                 try_to_fix_pino(inode);
320                 clear_inode_flag(inode, FI_APPEND_WRITE);
321                 clear_inode_flag(inode, FI_UPDATE_WRITE);
322                 goto out;
323         }
324 sync_nodes:
325         atomic_inc(&sbi->wb_sync_req[NODE]);
326         ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
327         atomic_dec(&sbi->wb_sync_req[NODE]);
328         if (ret)
329                 goto out;
330
331         /* if cp_error was enabled, we should avoid infinite loop */
332         if (unlikely(f2fs_cp_error(sbi))) {
333                 ret = -EIO;
334                 goto out;
335         }
336
337         if (f2fs_need_inode_block_update(sbi, ino)) {
338                 f2fs_mark_inode_dirty_sync(inode, true);
339                 f2fs_write_inode(inode, NULL);
340                 goto sync_nodes;
341         }
342
343         /*
344          * If it's atomic_write, it's just fine to keep write ordering. So
345          * here we don't need to wait for node write completion, since we use
346          * node chain which serializes node blocks. If one of node writes are
347          * reordered, we can see simply broken chain, resulting in stopping
348          * roll-forward recovery. It means we'll recover all or none node blocks
349          * given fsync mark.
350          */
351         if (!atomic) {
352                 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
353                 if (ret)
354                         goto out;
355         }
356
357         /* once recovery info is written, don't need to tack this */
358         f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
359         clear_inode_flag(inode, FI_APPEND_WRITE);
360 flush_out:
361         if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
362                 ret = f2fs_issue_flush(sbi, inode->i_ino);
363         if (!ret) {
364                 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
365                 clear_inode_flag(inode, FI_UPDATE_WRITE);
366                 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
367         }
368         f2fs_update_time(sbi, REQ_TIME);
369 out:
370         trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
371         return ret;
372 }
373
374 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
375 {
376         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
377                 return -EIO;
378         return f2fs_do_sync_file(file, start, end, datasync, false);
379 }
380
381 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
382                                 pgoff_t index, int whence)
383 {
384         switch (whence) {
385         case SEEK_DATA:
386                 if (__is_valid_data_blkaddr(blkaddr))
387                         return true;
388                 if (blkaddr == NEW_ADDR &&
389                     xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
390                         return true;
391                 break;
392         case SEEK_HOLE:
393                 if (blkaddr == NULL_ADDR)
394                         return true;
395                 break;
396         }
397         return false;
398 }
399
400 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
401 {
402         struct inode *inode = file->f_mapping->host;
403         loff_t maxbytes = inode->i_sb->s_maxbytes;
404         struct dnode_of_data dn;
405         pgoff_t pgofs, end_offset;
406         loff_t data_ofs = offset;
407         loff_t isize;
408         int err = 0;
409
410         inode_lock(inode);
411
412         isize = i_size_read(inode);
413         if (offset >= isize)
414                 goto fail;
415
416         /* handle inline data case */
417         if (f2fs_has_inline_data(inode)) {
418                 if (whence == SEEK_HOLE) {
419                         data_ofs = isize;
420                         goto found;
421                 } else if (whence == SEEK_DATA) {
422                         data_ofs = offset;
423                         goto found;
424                 }
425         }
426
427         pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
428
429         for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
430                 set_new_dnode(&dn, inode, NULL, NULL, 0);
431                 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
432                 if (err && err != -ENOENT) {
433                         goto fail;
434                 } else if (err == -ENOENT) {
435                         /* direct node does not exists */
436                         if (whence == SEEK_DATA) {
437                                 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
438                                 continue;
439                         } else {
440                                 goto found;
441                         }
442                 }
443
444                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
445
446                 /* find data/hole in dnode block */
447                 for (; dn.ofs_in_node < end_offset;
448                                 dn.ofs_in_node++, pgofs++,
449                                 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
450                         block_t blkaddr;
451
452                         blkaddr = f2fs_data_blkaddr(&dn);
453
454                         if (__is_valid_data_blkaddr(blkaddr) &&
455                                 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
456                                         blkaddr, DATA_GENERIC_ENHANCE)) {
457                                 f2fs_put_dnode(&dn);
458                                 goto fail;
459                         }
460
461                         if (__found_offset(file->f_mapping, blkaddr,
462                                                         pgofs, whence)) {
463                                 f2fs_put_dnode(&dn);
464                                 goto found;
465                         }
466                 }
467                 f2fs_put_dnode(&dn);
468         }
469
470         if (whence == SEEK_DATA)
471                 goto fail;
472 found:
473         if (whence == SEEK_HOLE && data_ofs > isize)
474                 data_ofs = isize;
475         inode_unlock(inode);
476         return vfs_setpos(file, data_ofs, maxbytes);
477 fail:
478         inode_unlock(inode);
479         return -ENXIO;
480 }
481
482 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
483 {
484         struct inode *inode = file->f_mapping->host;
485         loff_t maxbytes = inode->i_sb->s_maxbytes;
486
487         if (f2fs_compressed_file(inode))
488                 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
489
490         switch (whence) {
491         case SEEK_SET:
492         case SEEK_CUR:
493         case SEEK_END:
494                 return generic_file_llseek_size(file, offset, whence,
495                                                 maxbytes, i_size_read(inode));
496         case SEEK_DATA:
497         case SEEK_HOLE:
498                 if (offset < 0)
499                         return -ENXIO;
500                 return f2fs_seek_block(file, offset, whence);
501         }
502
503         return -EINVAL;
504 }
505
506 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
507 {
508         struct inode *inode = file_inode(file);
509
510         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
511                 return -EIO;
512
513         if (!f2fs_is_compress_backend_ready(inode))
514                 return -EOPNOTSUPP;
515
516         file_accessed(file);
517         vma->vm_ops = &f2fs_file_vm_ops;
518         set_inode_flag(inode, FI_MMAP_FILE);
519         return 0;
520 }
521
522 static int f2fs_file_open(struct inode *inode, struct file *filp)
523 {
524         int err = fscrypt_file_open(inode, filp);
525
526         if (err)
527                 return err;
528
529         if (!f2fs_is_compress_backend_ready(inode))
530                 return -EOPNOTSUPP;
531
532         err = fsverity_file_open(inode, filp);
533         if (err)
534                 return err;
535
536         filp->f_mode |= FMODE_NOWAIT;
537
538         return dquot_file_open(inode, filp);
539 }
540
541 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
542 {
543         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
544         struct f2fs_node *raw_node;
545         int nr_free = 0, ofs = dn->ofs_in_node, len = count;
546         __le32 *addr;
547         int base = 0;
548         bool compressed_cluster = false;
549         int cluster_index = 0, valid_blocks = 0;
550         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
551         bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
552
553         if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
554                 base = get_extra_isize(dn->inode);
555
556         raw_node = F2FS_NODE(dn->node_page);
557         addr = blkaddr_in_node(raw_node) + base + ofs;
558
559         /* Assumption: truncateion starts with cluster */
560         for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
561                 block_t blkaddr = le32_to_cpu(*addr);
562
563                 if (f2fs_compressed_file(dn->inode) &&
564                                         !(cluster_index & (cluster_size - 1))) {
565                         if (compressed_cluster)
566                                 f2fs_i_compr_blocks_update(dn->inode,
567                                                         valid_blocks, false);
568                         compressed_cluster = (blkaddr == COMPRESS_ADDR);
569                         valid_blocks = 0;
570                 }
571
572                 if (blkaddr == NULL_ADDR)
573                         continue;
574
575                 dn->data_blkaddr = NULL_ADDR;
576                 f2fs_set_data_blkaddr(dn);
577
578                 if (__is_valid_data_blkaddr(blkaddr)) {
579                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
580                                         DATA_GENERIC_ENHANCE))
581                                 continue;
582                         if (compressed_cluster)
583                                 valid_blocks++;
584                 }
585
586                 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
587                         clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
588
589                 f2fs_invalidate_blocks(sbi, blkaddr);
590
591                 if (!released || blkaddr != COMPRESS_ADDR)
592                         nr_free++;
593         }
594
595         if (compressed_cluster)
596                 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
597
598         if (nr_free) {
599                 pgoff_t fofs;
600                 /*
601                  * once we invalidate valid blkaddr in range [ofs, ofs + count],
602                  * we will invalidate all blkaddr in the whole range.
603                  */
604                 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
605                                                         dn->inode) + ofs;
606                 f2fs_update_extent_cache_range(dn, fofs, 0, len);
607                 dec_valid_block_count(sbi, dn->inode, nr_free);
608         }
609         dn->ofs_in_node = ofs;
610
611         f2fs_update_time(sbi, REQ_TIME);
612         trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
613                                          dn->ofs_in_node, nr_free);
614 }
615
616 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
617 {
618         f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
619 }
620
621 static int truncate_partial_data_page(struct inode *inode, u64 from,
622                                                                 bool cache_only)
623 {
624         loff_t offset = from & (PAGE_SIZE - 1);
625         pgoff_t index = from >> PAGE_SHIFT;
626         struct address_space *mapping = inode->i_mapping;
627         struct page *page;
628
629         if (!offset && !cache_only)
630                 return 0;
631
632         if (cache_only) {
633                 page = find_lock_page(mapping, index);
634                 if (page && PageUptodate(page))
635                         goto truncate_out;
636                 f2fs_put_page(page, 1);
637                 return 0;
638         }
639
640         page = f2fs_get_lock_data_page(inode, index, true);
641         if (IS_ERR(page))
642                 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
643 truncate_out:
644         f2fs_wait_on_page_writeback(page, DATA, true, true);
645         zero_user(page, offset, PAGE_SIZE - offset);
646
647         /* An encrypted inode should have a key and truncate the last page. */
648         f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
649         if (!cache_only)
650                 set_page_dirty(page);
651         f2fs_put_page(page, 1);
652         return 0;
653 }
654
655 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
656 {
657         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
658         struct dnode_of_data dn;
659         pgoff_t free_from;
660         int count = 0, err = 0;
661         struct page *ipage;
662         bool truncate_page = false;
663
664         trace_f2fs_truncate_blocks_enter(inode, from);
665
666         free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
667
668         if (free_from >= max_file_blocks(inode))
669                 goto free_partial;
670
671         if (lock)
672                 f2fs_lock_op(sbi);
673
674         ipage = f2fs_get_node_page(sbi, inode->i_ino);
675         if (IS_ERR(ipage)) {
676                 err = PTR_ERR(ipage);
677                 goto out;
678         }
679
680         if (f2fs_has_inline_data(inode)) {
681                 f2fs_truncate_inline_inode(inode, ipage, from);
682                 f2fs_put_page(ipage, 1);
683                 truncate_page = true;
684                 goto out;
685         }
686
687         set_new_dnode(&dn, inode, ipage, NULL, 0);
688         err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
689         if (err) {
690                 if (err == -ENOENT)
691                         goto free_next;
692                 goto out;
693         }
694
695         count = ADDRS_PER_PAGE(dn.node_page, inode);
696
697         count -= dn.ofs_in_node;
698         f2fs_bug_on(sbi, count < 0);
699
700         if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
701                 f2fs_truncate_data_blocks_range(&dn, count);
702                 free_from += count;
703         }
704
705         f2fs_put_dnode(&dn);
706 free_next:
707         err = f2fs_truncate_inode_blocks(inode, free_from);
708 out:
709         if (lock)
710                 f2fs_unlock_op(sbi);
711 free_partial:
712         /* lastly zero out the first data page */
713         if (!err)
714                 err = truncate_partial_data_page(inode, from, truncate_page);
715
716         trace_f2fs_truncate_blocks_exit(inode, err);
717         return err;
718 }
719
720 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
721 {
722         u64 free_from = from;
723         int err;
724
725 #ifdef CONFIG_F2FS_FS_COMPRESSION
726         /*
727          * for compressed file, only support cluster size
728          * aligned truncation.
729          */
730         if (f2fs_compressed_file(inode))
731                 free_from = round_up(from,
732                                 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
733 #endif
734
735         err = f2fs_do_truncate_blocks(inode, free_from, lock);
736         if (err)
737                 return err;
738
739 #ifdef CONFIG_F2FS_FS_COMPRESSION
740         if (from != free_from) {
741                 err = f2fs_truncate_partial_cluster(inode, from, lock);
742                 if (err)
743                         return err;
744         }
745 #endif
746
747         return 0;
748 }
749
750 int f2fs_truncate(struct inode *inode)
751 {
752         int err;
753
754         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
755                 return -EIO;
756
757         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
758                                 S_ISLNK(inode->i_mode)))
759                 return 0;
760
761         trace_f2fs_truncate(inode);
762
763         if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
764                 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
765                 return -EIO;
766         }
767
768         err = dquot_initialize(inode);
769         if (err)
770                 return err;
771
772         /* we should check inline_data size */
773         if (!f2fs_may_inline_data(inode)) {
774                 err = f2fs_convert_inline_inode(inode);
775                 if (err)
776                         return err;
777         }
778
779         err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
780         if (err)
781                 return err;
782
783         inode->i_mtime = inode->i_ctime = current_time(inode);
784         f2fs_mark_inode_dirty_sync(inode, false);
785         return 0;
786 }
787
788 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
789                  struct kstat *stat, u32 request_mask, unsigned int query_flags)
790 {
791         struct inode *inode = d_inode(path->dentry);
792         struct f2fs_inode_info *fi = F2FS_I(inode);
793         struct f2fs_inode *ri;
794         unsigned int flags;
795
796         if (f2fs_has_extra_attr(inode) &&
797                         f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
798                         F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
799                 stat->result_mask |= STATX_BTIME;
800                 stat->btime.tv_sec = fi->i_crtime.tv_sec;
801                 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
802         }
803
804         flags = fi->i_flags;
805         if (flags & F2FS_COMPR_FL)
806                 stat->attributes |= STATX_ATTR_COMPRESSED;
807         if (flags & F2FS_APPEND_FL)
808                 stat->attributes |= STATX_ATTR_APPEND;
809         if (IS_ENCRYPTED(inode))
810                 stat->attributes |= STATX_ATTR_ENCRYPTED;
811         if (flags & F2FS_IMMUTABLE_FL)
812                 stat->attributes |= STATX_ATTR_IMMUTABLE;
813         if (flags & F2FS_NODUMP_FL)
814                 stat->attributes |= STATX_ATTR_NODUMP;
815         if (IS_VERITY(inode))
816                 stat->attributes |= STATX_ATTR_VERITY;
817
818         stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
819                                   STATX_ATTR_APPEND |
820                                   STATX_ATTR_ENCRYPTED |
821                                   STATX_ATTR_IMMUTABLE |
822                                   STATX_ATTR_NODUMP |
823                                   STATX_ATTR_VERITY);
824
825         generic_fillattr(&init_user_ns, inode, stat);
826
827         /* we need to show initial sectors used for inline_data/dentries */
828         if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
829                                         f2fs_has_inline_dentry(inode))
830                 stat->blocks += (stat->size + 511) >> 9;
831
832         return 0;
833 }
834
835 #ifdef CONFIG_F2FS_FS_POSIX_ACL
836 static void __setattr_copy(struct user_namespace *mnt_userns,
837                            struct inode *inode, const struct iattr *attr)
838 {
839         unsigned int ia_valid = attr->ia_valid;
840
841         if (ia_valid & ATTR_UID)
842                 inode->i_uid = attr->ia_uid;
843         if (ia_valid & ATTR_GID)
844                 inode->i_gid = attr->ia_gid;
845         if (ia_valid & ATTR_ATIME)
846                 inode->i_atime = attr->ia_atime;
847         if (ia_valid & ATTR_MTIME)
848                 inode->i_mtime = attr->ia_mtime;
849         if (ia_valid & ATTR_CTIME)
850                 inode->i_ctime = attr->ia_ctime;
851         if (ia_valid & ATTR_MODE) {
852                 umode_t mode = attr->ia_mode;
853                 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
854
855                 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
856                         mode &= ~S_ISGID;
857                 set_acl_inode(inode, mode);
858         }
859 }
860 #else
861 #define __setattr_copy setattr_copy
862 #endif
863
864 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
865                  struct iattr *attr)
866 {
867         struct inode *inode = d_inode(dentry);
868         int err;
869
870         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
871                 return -EIO;
872
873         if (unlikely(IS_IMMUTABLE(inode)))
874                 return -EPERM;
875
876         if (unlikely(IS_APPEND(inode) &&
877                         (attr->ia_valid & (ATTR_MODE | ATTR_UID |
878                                   ATTR_GID | ATTR_TIMES_SET))))
879                 return -EPERM;
880
881         if ((attr->ia_valid & ATTR_SIZE) &&
882                 !f2fs_is_compress_backend_ready(inode))
883                 return -EOPNOTSUPP;
884
885         err = setattr_prepare(&init_user_ns, dentry, attr);
886         if (err)
887                 return err;
888
889         err = fscrypt_prepare_setattr(dentry, attr);
890         if (err)
891                 return err;
892
893         err = fsverity_prepare_setattr(dentry, attr);
894         if (err)
895                 return err;
896
897         if (is_quota_modification(inode, attr)) {
898                 err = dquot_initialize(inode);
899                 if (err)
900                         return err;
901         }
902         if ((attr->ia_valid & ATTR_UID &&
903                 !uid_eq(attr->ia_uid, inode->i_uid)) ||
904                 (attr->ia_valid & ATTR_GID &&
905                 !gid_eq(attr->ia_gid, inode->i_gid))) {
906                 f2fs_lock_op(F2FS_I_SB(inode));
907                 err = dquot_transfer(inode, attr);
908                 if (err) {
909                         set_sbi_flag(F2FS_I_SB(inode),
910                                         SBI_QUOTA_NEED_REPAIR);
911                         f2fs_unlock_op(F2FS_I_SB(inode));
912                         return err;
913                 }
914                 /*
915                  * update uid/gid under lock_op(), so that dquot and inode can
916                  * be updated atomically.
917                  */
918                 if (attr->ia_valid & ATTR_UID)
919                         inode->i_uid = attr->ia_uid;
920                 if (attr->ia_valid & ATTR_GID)
921                         inode->i_gid = attr->ia_gid;
922                 f2fs_mark_inode_dirty_sync(inode, true);
923                 f2fs_unlock_op(F2FS_I_SB(inode));
924         }
925
926         if (attr->ia_valid & ATTR_SIZE) {
927                 loff_t old_size = i_size_read(inode);
928
929                 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
930                         /*
931                          * should convert inline inode before i_size_write to
932                          * keep smaller than inline_data size with inline flag.
933                          */
934                         err = f2fs_convert_inline_inode(inode);
935                         if (err)
936                                 return err;
937                 }
938
939                 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
940                 filemap_invalidate_lock(inode->i_mapping);
941
942                 truncate_setsize(inode, attr->ia_size);
943
944                 if (attr->ia_size <= old_size)
945                         err = f2fs_truncate(inode);
946                 /*
947                  * do not trim all blocks after i_size if target size is
948                  * larger than i_size.
949                  */
950                 filemap_invalidate_unlock(inode->i_mapping);
951                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
952                 if (err)
953                         return err;
954
955                 spin_lock(&F2FS_I(inode)->i_size_lock);
956                 inode->i_mtime = inode->i_ctime = current_time(inode);
957                 F2FS_I(inode)->last_disk_size = i_size_read(inode);
958                 spin_unlock(&F2FS_I(inode)->i_size_lock);
959         }
960
961         __setattr_copy(&init_user_ns, inode, attr);
962
963         if (attr->ia_valid & ATTR_MODE) {
964                 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
965
966                 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
967                         if (!err)
968                                 inode->i_mode = F2FS_I(inode)->i_acl_mode;
969                         clear_inode_flag(inode, FI_ACL_MODE);
970                 }
971         }
972
973         /* file size may changed here */
974         f2fs_mark_inode_dirty_sync(inode, true);
975
976         /* inode change will produce dirty node pages flushed by checkpoint */
977         f2fs_balance_fs(F2FS_I_SB(inode), true);
978
979         return err;
980 }
981
982 const struct inode_operations f2fs_file_inode_operations = {
983         .getattr        = f2fs_getattr,
984         .setattr        = f2fs_setattr,
985         .get_acl        = f2fs_get_acl,
986         .set_acl        = f2fs_set_acl,
987         .listxattr      = f2fs_listxattr,
988         .fiemap         = f2fs_fiemap,
989         .fileattr_get   = f2fs_fileattr_get,
990         .fileattr_set   = f2fs_fileattr_set,
991 };
992
993 static int fill_zero(struct inode *inode, pgoff_t index,
994                                         loff_t start, loff_t len)
995 {
996         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
997         struct page *page;
998
999         if (!len)
1000                 return 0;
1001
1002         f2fs_balance_fs(sbi, true);
1003
1004         f2fs_lock_op(sbi);
1005         page = f2fs_get_new_data_page(inode, NULL, index, false);
1006         f2fs_unlock_op(sbi);
1007
1008         if (IS_ERR(page))
1009                 return PTR_ERR(page);
1010
1011         f2fs_wait_on_page_writeback(page, DATA, true, true);
1012         zero_user(page, start, len);
1013         set_page_dirty(page);
1014         f2fs_put_page(page, 1);
1015         return 0;
1016 }
1017
1018 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1019 {
1020         int err;
1021
1022         while (pg_start < pg_end) {
1023                 struct dnode_of_data dn;
1024                 pgoff_t end_offset, count;
1025
1026                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1027                 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1028                 if (err) {
1029                         if (err == -ENOENT) {
1030                                 pg_start = f2fs_get_next_page_offset(&dn,
1031                                                                 pg_start);
1032                                 continue;
1033                         }
1034                         return err;
1035                 }
1036
1037                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1038                 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1039
1040                 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1041
1042                 f2fs_truncate_data_blocks_range(&dn, count);
1043                 f2fs_put_dnode(&dn);
1044
1045                 pg_start += count;
1046         }
1047         return 0;
1048 }
1049
1050 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1051 {
1052         pgoff_t pg_start, pg_end;
1053         loff_t off_start, off_end;
1054         int ret;
1055
1056         ret = f2fs_convert_inline_inode(inode);
1057         if (ret)
1058                 return ret;
1059
1060         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1061         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1062
1063         off_start = offset & (PAGE_SIZE - 1);
1064         off_end = (offset + len) & (PAGE_SIZE - 1);
1065
1066         if (pg_start == pg_end) {
1067                 ret = fill_zero(inode, pg_start, off_start,
1068                                                 off_end - off_start);
1069                 if (ret)
1070                         return ret;
1071         } else {
1072                 if (off_start) {
1073                         ret = fill_zero(inode, pg_start++, off_start,
1074                                                 PAGE_SIZE - off_start);
1075                         if (ret)
1076                                 return ret;
1077                 }
1078                 if (off_end) {
1079                         ret = fill_zero(inode, pg_end, 0, off_end);
1080                         if (ret)
1081                                 return ret;
1082                 }
1083
1084                 if (pg_start < pg_end) {
1085                         struct address_space *mapping = inode->i_mapping;
1086                         loff_t blk_start, blk_end;
1087                         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1088
1089                         f2fs_balance_fs(sbi, true);
1090
1091                         blk_start = (loff_t)pg_start << PAGE_SHIFT;
1092                         blk_end = (loff_t)pg_end << PAGE_SHIFT;
1093
1094                         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1095                         filemap_invalidate_lock(mapping);
1096
1097                         truncate_inode_pages_range(mapping, blk_start,
1098                                         blk_end - 1);
1099
1100                         f2fs_lock_op(sbi);
1101                         ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1102                         f2fs_unlock_op(sbi);
1103
1104                         filemap_invalidate_unlock(mapping);
1105                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1106                 }
1107         }
1108
1109         return ret;
1110 }
1111
1112 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1113                                 int *do_replace, pgoff_t off, pgoff_t len)
1114 {
1115         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1116         struct dnode_of_data dn;
1117         int ret, done, i;
1118
1119 next_dnode:
1120         set_new_dnode(&dn, inode, NULL, NULL, 0);
1121         ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1122         if (ret && ret != -ENOENT) {
1123                 return ret;
1124         } else if (ret == -ENOENT) {
1125                 if (dn.max_level == 0)
1126                         return -ENOENT;
1127                 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1128                                                 dn.ofs_in_node, len);
1129                 blkaddr += done;
1130                 do_replace += done;
1131                 goto next;
1132         }
1133
1134         done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1135                                                         dn.ofs_in_node, len);
1136         for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1137                 *blkaddr = f2fs_data_blkaddr(&dn);
1138
1139                 if (__is_valid_data_blkaddr(*blkaddr) &&
1140                         !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1141                                         DATA_GENERIC_ENHANCE)) {
1142                         f2fs_put_dnode(&dn);
1143                         return -EFSCORRUPTED;
1144                 }
1145
1146                 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1147
1148                         if (f2fs_lfs_mode(sbi)) {
1149                                 f2fs_put_dnode(&dn);
1150                                 return -EOPNOTSUPP;
1151                         }
1152
1153                         /* do not invalidate this block address */
1154                         f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1155                         *do_replace = 1;
1156                 }
1157         }
1158         f2fs_put_dnode(&dn);
1159 next:
1160         len -= done;
1161         off += done;
1162         if (len)
1163                 goto next_dnode;
1164         return 0;
1165 }
1166
1167 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1168                                 int *do_replace, pgoff_t off, int len)
1169 {
1170         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1171         struct dnode_of_data dn;
1172         int ret, i;
1173
1174         for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1175                 if (*do_replace == 0)
1176                         continue;
1177
1178                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1179                 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1180                 if (ret) {
1181                         dec_valid_block_count(sbi, inode, 1);
1182                         f2fs_invalidate_blocks(sbi, *blkaddr);
1183                 } else {
1184                         f2fs_update_data_blkaddr(&dn, *blkaddr);
1185                 }
1186                 f2fs_put_dnode(&dn);
1187         }
1188         return 0;
1189 }
1190
1191 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1192                         block_t *blkaddr, int *do_replace,
1193                         pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1194 {
1195         struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1196         pgoff_t i = 0;
1197         int ret;
1198
1199         while (i < len) {
1200                 if (blkaddr[i] == NULL_ADDR && !full) {
1201                         i++;
1202                         continue;
1203                 }
1204
1205                 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1206                         struct dnode_of_data dn;
1207                         struct node_info ni;
1208                         size_t new_size;
1209                         pgoff_t ilen;
1210
1211                         set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1212                         ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1213                         if (ret)
1214                                 return ret;
1215
1216                         ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1217                         if (ret) {
1218                                 f2fs_put_dnode(&dn);
1219                                 return ret;
1220                         }
1221
1222                         ilen = min((pgoff_t)
1223                                 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1224                                                 dn.ofs_in_node, len - i);
1225                         do {
1226                                 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1227                                 f2fs_truncate_data_blocks_range(&dn, 1);
1228
1229                                 if (do_replace[i]) {
1230                                         f2fs_i_blocks_write(src_inode,
1231                                                         1, false, false);
1232                                         f2fs_i_blocks_write(dst_inode,
1233                                                         1, true, false);
1234                                         f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1235                                         blkaddr[i], ni.version, true, false);
1236
1237                                         do_replace[i] = 0;
1238                                 }
1239                                 dn.ofs_in_node++;
1240                                 i++;
1241                                 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1242                                 if (dst_inode->i_size < new_size)
1243                                         f2fs_i_size_write(dst_inode, new_size);
1244                         } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1245
1246                         f2fs_put_dnode(&dn);
1247                 } else {
1248                         struct page *psrc, *pdst;
1249
1250                         psrc = f2fs_get_lock_data_page(src_inode,
1251                                                         src + i, true);
1252                         if (IS_ERR(psrc))
1253                                 return PTR_ERR(psrc);
1254                         pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1255                                                                 true);
1256                         if (IS_ERR(pdst)) {
1257                                 f2fs_put_page(psrc, 1);
1258                                 return PTR_ERR(pdst);
1259                         }
1260                         f2fs_copy_page(psrc, pdst);
1261                         set_page_dirty(pdst);
1262                         f2fs_put_page(pdst, 1);
1263                         f2fs_put_page(psrc, 1);
1264
1265                         ret = f2fs_truncate_hole(src_inode,
1266                                                 src + i, src + i + 1);
1267                         if (ret)
1268                                 return ret;
1269                         i++;
1270                 }
1271         }
1272         return 0;
1273 }
1274
1275 static int __exchange_data_block(struct inode *src_inode,
1276                         struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1277                         pgoff_t len, bool full)
1278 {
1279         block_t *src_blkaddr;
1280         int *do_replace;
1281         pgoff_t olen;
1282         int ret;
1283
1284         while (len) {
1285                 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1286
1287                 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1288                                         array_size(olen, sizeof(block_t)),
1289                                         GFP_NOFS);
1290                 if (!src_blkaddr)
1291                         return -ENOMEM;
1292
1293                 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1294                                         array_size(olen, sizeof(int)),
1295                                         GFP_NOFS);
1296                 if (!do_replace) {
1297                         kvfree(src_blkaddr);
1298                         return -ENOMEM;
1299                 }
1300
1301                 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1302                                         do_replace, src, olen);
1303                 if (ret)
1304                         goto roll_back;
1305
1306                 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1307                                         do_replace, src, dst, olen, full);
1308                 if (ret)
1309                         goto roll_back;
1310
1311                 src += olen;
1312                 dst += olen;
1313                 len -= olen;
1314
1315                 kvfree(src_blkaddr);
1316                 kvfree(do_replace);
1317         }
1318         return 0;
1319
1320 roll_back:
1321         __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1322         kvfree(src_blkaddr);
1323         kvfree(do_replace);
1324         return ret;
1325 }
1326
1327 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1328 {
1329         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1330         pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1331         pgoff_t start = offset >> PAGE_SHIFT;
1332         pgoff_t end = (offset + len) >> PAGE_SHIFT;
1333         int ret;
1334
1335         f2fs_balance_fs(sbi, true);
1336
1337         /* avoid gc operation during block exchange */
1338         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1339         filemap_invalidate_lock(inode->i_mapping);
1340
1341         f2fs_lock_op(sbi);
1342         f2fs_drop_extent_tree(inode);
1343         truncate_pagecache(inode, offset);
1344         ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1345         f2fs_unlock_op(sbi);
1346
1347         filemap_invalidate_unlock(inode->i_mapping);
1348         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1349         return ret;
1350 }
1351
1352 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1353 {
1354         loff_t new_size;
1355         int ret;
1356
1357         if (offset + len >= i_size_read(inode))
1358                 return -EINVAL;
1359
1360         /* collapse range should be aligned to block size of f2fs. */
1361         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1362                 return -EINVAL;
1363
1364         ret = f2fs_convert_inline_inode(inode);
1365         if (ret)
1366                 return ret;
1367
1368         /* write out all dirty pages from offset */
1369         ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1370         if (ret)
1371                 return ret;
1372
1373         ret = f2fs_do_collapse(inode, offset, len);
1374         if (ret)
1375                 return ret;
1376
1377         /* write out all moved pages, if possible */
1378         filemap_invalidate_lock(inode->i_mapping);
1379         filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1380         truncate_pagecache(inode, offset);
1381
1382         new_size = i_size_read(inode) - len;
1383         ret = f2fs_truncate_blocks(inode, new_size, true);
1384         filemap_invalidate_unlock(inode->i_mapping);
1385         if (!ret)
1386                 f2fs_i_size_write(inode, new_size);
1387         return ret;
1388 }
1389
1390 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1391                                                                 pgoff_t end)
1392 {
1393         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1394         pgoff_t index = start;
1395         unsigned int ofs_in_node = dn->ofs_in_node;
1396         blkcnt_t count = 0;
1397         int ret;
1398
1399         for (; index < end; index++, dn->ofs_in_node++) {
1400                 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1401                         count++;
1402         }
1403
1404         dn->ofs_in_node = ofs_in_node;
1405         ret = f2fs_reserve_new_blocks(dn, count);
1406         if (ret)
1407                 return ret;
1408
1409         dn->ofs_in_node = ofs_in_node;
1410         for (index = start; index < end; index++, dn->ofs_in_node++) {
1411                 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1412                 /*
1413                  * f2fs_reserve_new_blocks will not guarantee entire block
1414                  * allocation.
1415                  */
1416                 if (dn->data_blkaddr == NULL_ADDR) {
1417                         ret = -ENOSPC;
1418                         break;
1419                 }
1420                 if (dn->data_blkaddr != NEW_ADDR) {
1421                         f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1422                         dn->data_blkaddr = NEW_ADDR;
1423                         f2fs_set_data_blkaddr(dn);
1424                 }
1425         }
1426
1427         f2fs_update_extent_cache_range(dn, start, 0, index - start);
1428
1429         return ret;
1430 }
1431
1432 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1433                                                                 int mode)
1434 {
1435         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1436         struct address_space *mapping = inode->i_mapping;
1437         pgoff_t index, pg_start, pg_end;
1438         loff_t new_size = i_size_read(inode);
1439         loff_t off_start, off_end;
1440         int ret = 0;
1441
1442         ret = inode_newsize_ok(inode, (len + offset));
1443         if (ret)
1444                 return ret;
1445
1446         ret = f2fs_convert_inline_inode(inode);
1447         if (ret)
1448                 return ret;
1449
1450         ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1451         if (ret)
1452                 return ret;
1453
1454         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1455         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1456
1457         off_start = offset & (PAGE_SIZE - 1);
1458         off_end = (offset + len) & (PAGE_SIZE - 1);
1459
1460         if (pg_start == pg_end) {
1461                 ret = fill_zero(inode, pg_start, off_start,
1462                                                 off_end - off_start);
1463                 if (ret)
1464                         return ret;
1465
1466                 new_size = max_t(loff_t, new_size, offset + len);
1467         } else {
1468                 if (off_start) {
1469                         ret = fill_zero(inode, pg_start++, off_start,
1470                                                 PAGE_SIZE - off_start);
1471                         if (ret)
1472                                 return ret;
1473
1474                         new_size = max_t(loff_t, new_size,
1475                                         (loff_t)pg_start << PAGE_SHIFT);
1476                 }
1477
1478                 for (index = pg_start; index < pg_end;) {
1479                         struct dnode_of_data dn;
1480                         unsigned int end_offset;
1481                         pgoff_t end;
1482
1483                         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1484                         filemap_invalidate_lock(mapping);
1485
1486                         truncate_pagecache_range(inode,
1487                                 (loff_t)index << PAGE_SHIFT,
1488                                 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1489
1490                         f2fs_lock_op(sbi);
1491
1492                         set_new_dnode(&dn, inode, NULL, NULL, 0);
1493                         ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1494                         if (ret) {
1495                                 f2fs_unlock_op(sbi);
1496                                 filemap_invalidate_unlock(mapping);
1497                                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1498                                 goto out;
1499                         }
1500
1501                         end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1502                         end = min(pg_end, end_offset - dn.ofs_in_node + index);
1503
1504                         ret = f2fs_do_zero_range(&dn, index, end);
1505                         f2fs_put_dnode(&dn);
1506
1507                         f2fs_unlock_op(sbi);
1508                         filemap_invalidate_unlock(mapping);
1509                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1510
1511                         f2fs_balance_fs(sbi, dn.node_changed);
1512
1513                         if (ret)
1514                                 goto out;
1515
1516                         index = end;
1517                         new_size = max_t(loff_t, new_size,
1518                                         (loff_t)index << PAGE_SHIFT);
1519                 }
1520
1521                 if (off_end) {
1522                         ret = fill_zero(inode, pg_end, 0, off_end);
1523                         if (ret)
1524                                 goto out;
1525
1526                         new_size = max_t(loff_t, new_size, offset + len);
1527                 }
1528         }
1529
1530 out:
1531         if (new_size > i_size_read(inode)) {
1532                 if (mode & FALLOC_FL_KEEP_SIZE)
1533                         file_set_keep_isize(inode);
1534                 else
1535                         f2fs_i_size_write(inode, new_size);
1536         }
1537         return ret;
1538 }
1539
1540 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1541 {
1542         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1543         struct address_space *mapping = inode->i_mapping;
1544         pgoff_t nr, pg_start, pg_end, delta, idx;
1545         loff_t new_size;
1546         int ret = 0;
1547
1548         new_size = i_size_read(inode) + len;
1549         ret = inode_newsize_ok(inode, new_size);
1550         if (ret)
1551                 return ret;
1552
1553         if (offset >= i_size_read(inode))
1554                 return -EINVAL;
1555
1556         /* insert range should be aligned to block size of f2fs. */
1557         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1558                 return -EINVAL;
1559
1560         ret = f2fs_convert_inline_inode(inode);
1561         if (ret)
1562                 return ret;
1563
1564         f2fs_balance_fs(sbi, true);
1565
1566         filemap_invalidate_lock(mapping);
1567         ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1568         filemap_invalidate_unlock(mapping);
1569         if (ret)
1570                 return ret;
1571
1572         /* write out all dirty pages from offset */
1573         ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1574         if (ret)
1575                 return ret;
1576
1577         pg_start = offset >> PAGE_SHIFT;
1578         pg_end = (offset + len) >> PAGE_SHIFT;
1579         delta = pg_end - pg_start;
1580         idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1581
1582         /* avoid gc operation during block exchange */
1583         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1584         filemap_invalidate_lock(mapping);
1585         truncate_pagecache(inode, offset);
1586
1587         while (!ret && idx > pg_start) {
1588                 nr = idx - pg_start;
1589                 if (nr > delta)
1590                         nr = delta;
1591                 idx -= nr;
1592
1593                 f2fs_lock_op(sbi);
1594                 f2fs_drop_extent_tree(inode);
1595
1596                 ret = __exchange_data_block(inode, inode, idx,
1597                                         idx + delta, nr, false);
1598                 f2fs_unlock_op(sbi);
1599         }
1600         filemap_invalidate_unlock(mapping);
1601         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1602
1603         /* write out all moved pages, if possible */
1604         filemap_invalidate_lock(mapping);
1605         filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1606         truncate_pagecache(inode, offset);
1607         filemap_invalidate_unlock(mapping);
1608
1609         if (!ret)
1610                 f2fs_i_size_write(inode, new_size);
1611         return ret;
1612 }
1613
1614 static int expand_inode_data(struct inode *inode, loff_t offset,
1615                                         loff_t len, int mode)
1616 {
1617         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1618         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1619                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1620                         .m_may_create = true };
1621         pgoff_t pg_start, pg_end;
1622         loff_t new_size = i_size_read(inode);
1623         loff_t off_end;
1624         block_t expanded = 0;
1625         int err;
1626
1627         err = inode_newsize_ok(inode, (len + offset));
1628         if (err)
1629                 return err;
1630
1631         err = f2fs_convert_inline_inode(inode);
1632         if (err)
1633                 return err;
1634
1635         f2fs_balance_fs(sbi, true);
1636
1637         pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1638         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1639         off_end = (offset + len) & (PAGE_SIZE - 1);
1640
1641         map.m_lblk = pg_start;
1642         map.m_len = pg_end - pg_start;
1643         if (off_end)
1644                 map.m_len++;
1645
1646         if (!map.m_len)
1647                 return 0;
1648
1649         if (f2fs_is_pinned_file(inode)) {
1650                 block_t sec_blks = BLKS_PER_SEC(sbi);
1651                 block_t sec_len = roundup(map.m_len, sec_blks);
1652
1653                 map.m_len = sec_blks;
1654 next_alloc:
1655                 if (has_not_enough_free_secs(sbi, 0,
1656                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1657                         down_write(&sbi->gc_lock);
1658                         err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1659                         if (err && err != -ENODATA && err != -EAGAIN)
1660                                 goto out_err;
1661                 }
1662
1663                 down_write(&sbi->pin_sem);
1664
1665                 f2fs_lock_op(sbi);
1666                 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1667                 f2fs_unlock_op(sbi);
1668
1669                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1670                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1671
1672                 up_write(&sbi->pin_sem);
1673
1674                 expanded += map.m_len;
1675                 sec_len -= map.m_len;
1676                 map.m_lblk += map.m_len;
1677                 if (!err && sec_len)
1678                         goto next_alloc;
1679
1680                 map.m_len = expanded;
1681         } else {
1682                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1683                 expanded = map.m_len;
1684         }
1685 out_err:
1686         if (err) {
1687                 pgoff_t last_off;
1688
1689                 if (!expanded)
1690                         return err;
1691
1692                 last_off = pg_start + expanded - 1;
1693
1694                 /* update new size to the failed position */
1695                 new_size = (last_off == pg_end) ? offset + len :
1696                                         (loff_t)(last_off + 1) << PAGE_SHIFT;
1697         } else {
1698                 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1699         }
1700
1701         if (new_size > i_size_read(inode)) {
1702                 if (mode & FALLOC_FL_KEEP_SIZE)
1703                         file_set_keep_isize(inode);
1704                 else
1705                         f2fs_i_size_write(inode, new_size);
1706         }
1707
1708         return err;
1709 }
1710
1711 static long f2fs_fallocate(struct file *file, int mode,
1712                                 loff_t offset, loff_t len)
1713 {
1714         struct inode *inode = file_inode(file);
1715         long ret = 0;
1716
1717         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1718                 return -EIO;
1719         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1720                 return -ENOSPC;
1721         if (!f2fs_is_compress_backend_ready(inode))
1722                 return -EOPNOTSUPP;
1723
1724         /* f2fs only support ->fallocate for regular file */
1725         if (!S_ISREG(inode->i_mode))
1726                 return -EINVAL;
1727
1728         if (IS_ENCRYPTED(inode) &&
1729                 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1730                 return -EOPNOTSUPP;
1731
1732         if (f2fs_compressed_file(inode) &&
1733                 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1734                         FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1735                 return -EOPNOTSUPP;
1736
1737         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1738                         FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1739                         FALLOC_FL_INSERT_RANGE))
1740                 return -EOPNOTSUPP;
1741
1742         inode_lock(inode);
1743
1744         if (mode & FALLOC_FL_PUNCH_HOLE) {
1745                 if (offset >= inode->i_size)
1746                         goto out;
1747
1748                 ret = punch_hole(inode, offset, len);
1749         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1750                 ret = f2fs_collapse_range(inode, offset, len);
1751         } else if (mode & FALLOC_FL_ZERO_RANGE) {
1752                 ret = f2fs_zero_range(inode, offset, len, mode);
1753         } else if (mode & FALLOC_FL_INSERT_RANGE) {
1754                 ret = f2fs_insert_range(inode, offset, len);
1755         } else {
1756                 ret = expand_inode_data(inode, offset, len, mode);
1757         }
1758
1759         if (!ret) {
1760                 inode->i_mtime = inode->i_ctime = current_time(inode);
1761                 f2fs_mark_inode_dirty_sync(inode, false);
1762                 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1763         }
1764
1765 out:
1766         inode_unlock(inode);
1767
1768         trace_f2fs_fallocate(inode, mode, offset, len, ret);
1769         return ret;
1770 }
1771
1772 static int f2fs_release_file(struct inode *inode, struct file *filp)
1773 {
1774         /*
1775          * f2fs_relase_file is called at every close calls. So we should
1776          * not drop any inmemory pages by close called by other process.
1777          */
1778         if (!(filp->f_mode & FMODE_WRITE) ||
1779                         atomic_read(&inode->i_writecount) != 1)
1780                 return 0;
1781
1782         /* some remained atomic pages should discarded */
1783         if (f2fs_is_atomic_file(inode))
1784                 f2fs_drop_inmem_pages(inode);
1785         if (f2fs_is_volatile_file(inode)) {
1786                 set_inode_flag(inode, FI_DROP_CACHE);
1787                 filemap_fdatawrite(inode->i_mapping);
1788                 clear_inode_flag(inode, FI_DROP_CACHE);
1789                 clear_inode_flag(inode, FI_VOLATILE_FILE);
1790                 stat_dec_volatile_write(inode);
1791         }
1792         return 0;
1793 }
1794
1795 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1796 {
1797         struct inode *inode = file_inode(file);
1798
1799         /*
1800          * If the process doing a transaction is crashed, we should do
1801          * roll-back. Otherwise, other reader/write can see corrupted database
1802          * until all the writers close its file. Since this should be done
1803          * before dropping file lock, it needs to do in ->flush.
1804          */
1805         if (f2fs_is_atomic_file(inode) &&
1806                         F2FS_I(inode)->inmem_task == current)
1807                 f2fs_drop_inmem_pages(inode);
1808         return 0;
1809 }
1810
1811 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1812 {
1813         struct f2fs_inode_info *fi = F2FS_I(inode);
1814         u32 masked_flags = fi->i_flags & mask;
1815
1816         /* mask can be shrunk by flags_valid selector */
1817         iflags &= mask;
1818
1819         /* Is it quota file? Do not allow user to mess with it */
1820         if (IS_NOQUOTA(inode))
1821                 return -EPERM;
1822
1823         if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1824                 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1825                         return -EOPNOTSUPP;
1826                 if (!f2fs_empty_dir(inode))
1827                         return -ENOTEMPTY;
1828         }
1829
1830         if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1831                 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1832                         return -EOPNOTSUPP;
1833                 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1834                         return -EINVAL;
1835         }
1836
1837         if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1838                 if (masked_flags & F2FS_COMPR_FL) {
1839                         if (!f2fs_disable_compressed_file(inode))
1840                                 return -EINVAL;
1841                 }
1842                 if (iflags & F2FS_NOCOMP_FL)
1843                         return -EINVAL;
1844                 if (iflags & F2FS_COMPR_FL) {
1845                         if (!f2fs_may_compress(inode))
1846                                 return -EINVAL;
1847                         if (S_ISREG(inode->i_mode) && inode->i_size)
1848                                 return -EINVAL;
1849
1850                         set_compress_context(inode);
1851                 }
1852         }
1853         if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1854                 if (masked_flags & F2FS_COMPR_FL)
1855                         return -EINVAL;
1856         }
1857
1858         fi->i_flags = iflags | (fi->i_flags & ~mask);
1859         f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1860                                         (fi->i_flags & F2FS_NOCOMP_FL));
1861
1862         if (fi->i_flags & F2FS_PROJINHERIT_FL)
1863                 set_inode_flag(inode, FI_PROJ_INHERIT);
1864         else
1865                 clear_inode_flag(inode, FI_PROJ_INHERIT);
1866
1867         inode->i_ctime = current_time(inode);
1868         f2fs_set_inode_flags(inode);
1869         f2fs_mark_inode_dirty_sync(inode, true);
1870         return 0;
1871 }
1872
1873 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1874
1875 /*
1876  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1877  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1878  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1879  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1880  *
1881  * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1882  * FS_IOC_FSSETXATTR is done by the VFS.
1883  */
1884
1885 static const struct {
1886         u32 iflag;
1887         u32 fsflag;
1888 } f2fs_fsflags_map[] = {
1889         { F2FS_COMPR_FL,        FS_COMPR_FL },
1890         { F2FS_SYNC_FL,         FS_SYNC_FL },
1891         { F2FS_IMMUTABLE_FL,    FS_IMMUTABLE_FL },
1892         { F2FS_APPEND_FL,       FS_APPEND_FL },
1893         { F2FS_NODUMP_FL,       FS_NODUMP_FL },
1894         { F2FS_NOATIME_FL,      FS_NOATIME_FL },
1895         { F2FS_NOCOMP_FL,       FS_NOCOMP_FL },
1896         { F2FS_INDEX_FL,        FS_INDEX_FL },
1897         { F2FS_DIRSYNC_FL,      FS_DIRSYNC_FL },
1898         { F2FS_PROJINHERIT_FL,  FS_PROJINHERIT_FL },
1899         { F2FS_CASEFOLD_FL,     FS_CASEFOLD_FL },
1900 };
1901
1902 #define F2FS_GETTABLE_FS_FL (           \
1903                 FS_COMPR_FL |           \
1904                 FS_SYNC_FL |            \
1905                 FS_IMMUTABLE_FL |       \
1906                 FS_APPEND_FL |          \
1907                 FS_NODUMP_FL |          \
1908                 FS_NOATIME_FL |         \
1909                 FS_NOCOMP_FL |          \
1910                 FS_INDEX_FL |           \
1911                 FS_DIRSYNC_FL |         \
1912                 FS_PROJINHERIT_FL |     \
1913                 FS_ENCRYPT_FL |         \
1914                 FS_INLINE_DATA_FL |     \
1915                 FS_NOCOW_FL |           \
1916                 FS_VERITY_FL |          \
1917                 FS_CASEFOLD_FL)
1918
1919 #define F2FS_SETTABLE_FS_FL (           \
1920                 FS_COMPR_FL |           \
1921                 FS_SYNC_FL |            \
1922                 FS_IMMUTABLE_FL |       \
1923                 FS_APPEND_FL |          \
1924                 FS_NODUMP_FL |          \
1925                 FS_NOATIME_FL |         \
1926                 FS_NOCOMP_FL |          \
1927                 FS_DIRSYNC_FL |         \
1928                 FS_PROJINHERIT_FL |     \
1929                 FS_CASEFOLD_FL)
1930
1931 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1932 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1933 {
1934         u32 fsflags = 0;
1935         int i;
1936
1937         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1938                 if (iflags & f2fs_fsflags_map[i].iflag)
1939                         fsflags |= f2fs_fsflags_map[i].fsflag;
1940
1941         return fsflags;
1942 }
1943
1944 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1945 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1946 {
1947         u32 iflags = 0;
1948         int i;
1949
1950         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1951                 if (fsflags & f2fs_fsflags_map[i].fsflag)
1952                         iflags |= f2fs_fsflags_map[i].iflag;
1953
1954         return iflags;
1955 }
1956
1957 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1958 {
1959         struct inode *inode = file_inode(filp);
1960
1961         return put_user(inode->i_generation, (int __user *)arg);
1962 }
1963
1964 static int f2fs_ioc_start_atomic_write(struct file *filp)
1965 {
1966         struct inode *inode = file_inode(filp);
1967         struct f2fs_inode_info *fi = F2FS_I(inode);
1968         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1969         int ret;
1970
1971         if (!inode_owner_or_capable(&init_user_ns, inode))
1972                 return -EACCES;
1973
1974         if (!S_ISREG(inode->i_mode))
1975                 return -EINVAL;
1976
1977         if (filp->f_flags & O_DIRECT)
1978                 return -EINVAL;
1979
1980         ret = mnt_want_write_file(filp);
1981         if (ret)
1982                 return ret;
1983
1984         inode_lock(inode);
1985
1986         f2fs_disable_compressed_file(inode);
1987
1988         if (f2fs_is_atomic_file(inode)) {
1989                 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1990                         ret = -EINVAL;
1991                 goto out;
1992         }
1993
1994         ret = f2fs_convert_inline_inode(inode);
1995         if (ret)
1996                 goto out;
1997
1998         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1999
2000         /*
2001          * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2002          * f2fs_is_atomic_file.
2003          */
2004         if (get_dirty_pages(inode))
2005                 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2006                           inode->i_ino, get_dirty_pages(inode));
2007         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2008         if (ret) {
2009                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2010                 goto out;
2011         }
2012
2013         spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2014         if (list_empty(&fi->inmem_ilist))
2015                 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2016         sbi->atomic_files++;
2017         spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2018
2019         /* add inode in inmem_list first and set atomic_file */
2020         set_inode_flag(inode, FI_ATOMIC_FILE);
2021         clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2022         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2023
2024         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2025         F2FS_I(inode)->inmem_task = current;
2026         stat_update_max_atomic_write(inode);
2027 out:
2028         inode_unlock(inode);
2029         mnt_drop_write_file(filp);
2030         return ret;
2031 }
2032
2033 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2034 {
2035         struct inode *inode = file_inode(filp);
2036         int ret;
2037
2038         if (!inode_owner_or_capable(&init_user_ns, inode))
2039                 return -EACCES;
2040
2041         ret = mnt_want_write_file(filp);
2042         if (ret)
2043                 return ret;
2044
2045         f2fs_balance_fs(F2FS_I_SB(inode), true);
2046
2047         inode_lock(inode);
2048
2049         if (f2fs_is_volatile_file(inode)) {
2050                 ret = -EINVAL;
2051                 goto err_out;
2052         }
2053
2054         if (f2fs_is_atomic_file(inode)) {
2055                 ret = f2fs_commit_inmem_pages(inode);
2056                 if (ret)
2057                         goto err_out;
2058
2059                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2060                 if (!ret)
2061                         f2fs_drop_inmem_pages(inode);
2062         } else {
2063                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2064         }
2065 err_out:
2066         if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2067                 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2068                 ret = -EINVAL;
2069         }
2070         inode_unlock(inode);
2071         mnt_drop_write_file(filp);
2072         return ret;
2073 }
2074
2075 static int f2fs_ioc_start_volatile_write(struct file *filp)
2076 {
2077         struct inode *inode = file_inode(filp);
2078         int ret;
2079
2080         if (!inode_owner_or_capable(&init_user_ns, inode))
2081                 return -EACCES;
2082
2083         if (!S_ISREG(inode->i_mode))
2084                 return -EINVAL;
2085
2086         ret = mnt_want_write_file(filp);
2087         if (ret)
2088                 return ret;
2089
2090         inode_lock(inode);
2091
2092         if (f2fs_is_volatile_file(inode))
2093                 goto out;
2094
2095         ret = f2fs_convert_inline_inode(inode);
2096         if (ret)
2097                 goto out;
2098
2099         stat_inc_volatile_write(inode);
2100         stat_update_max_volatile_write(inode);
2101
2102         set_inode_flag(inode, FI_VOLATILE_FILE);
2103         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2104 out:
2105         inode_unlock(inode);
2106         mnt_drop_write_file(filp);
2107         return ret;
2108 }
2109
2110 static int f2fs_ioc_release_volatile_write(struct file *filp)
2111 {
2112         struct inode *inode = file_inode(filp);
2113         int ret;
2114
2115         if (!inode_owner_or_capable(&init_user_ns, inode))
2116                 return -EACCES;
2117
2118         ret = mnt_want_write_file(filp);
2119         if (ret)
2120                 return ret;
2121
2122         inode_lock(inode);
2123
2124         if (!f2fs_is_volatile_file(inode))
2125                 goto out;
2126
2127         if (!f2fs_is_first_block_written(inode)) {
2128                 ret = truncate_partial_data_page(inode, 0, true);
2129                 goto out;
2130         }
2131
2132         ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2133 out:
2134         inode_unlock(inode);
2135         mnt_drop_write_file(filp);
2136         return ret;
2137 }
2138
2139 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2140 {
2141         struct inode *inode = file_inode(filp);
2142         int ret;
2143
2144         if (!inode_owner_or_capable(&init_user_ns, inode))
2145                 return -EACCES;
2146
2147         ret = mnt_want_write_file(filp);
2148         if (ret)
2149                 return ret;
2150
2151         inode_lock(inode);
2152
2153         if (f2fs_is_atomic_file(inode))
2154                 f2fs_drop_inmem_pages(inode);
2155         if (f2fs_is_volatile_file(inode)) {
2156                 clear_inode_flag(inode, FI_VOLATILE_FILE);
2157                 stat_dec_volatile_write(inode);
2158                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2159         }
2160
2161         clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2162
2163         inode_unlock(inode);
2164
2165         mnt_drop_write_file(filp);
2166         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2167         return ret;
2168 }
2169
2170 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2171 {
2172         struct inode *inode = file_inode(filp);
2173         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2174         struct super_block *sb = sbi->sb;
2175         __u32 in;
2176         int ret = 0;
2177
2178         if (!capable(CAP_SYS_ADMIN))
2179                 return -EPERM;
2180
2181         if (get_user(in, (__u32 __user *)arg))
2182                 return -EFAULT;
2183
2184         if (in != F2FS_GOING_DOWN_FULLSYNC) {
2185                 ret = mnt_want_write_file(filp);
2186                 if (ret) {
2187                         if (ret == -EROFS) {
2188                                 ret = 0;
2189                                 f2fs_stop_checkpoint(sbi, false);
2190                                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2191                                 trace_f2fs_shutdown(sbi, in, ret);
2192                         }
2193                         return ret;
2194                 }
2195         }
2196
2197         switch (in) {
2198         case F2FS_GOING_DOWN_FULLSYNC:
2199                 ret = freeze_bdev(sb->s_bdev);
2200                 if (ret)
2201                         goto out;
2202                 f2fs_stop_checkpoint(sbi, false);
2203                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2204                 thaw_bdev(sb->s_bdev);
2205                 break;
2206         case F2FS_GOING_DOWN_METASYNC:
2207                 /* do checkpoint only */
2208                 ret = f2fs_sync_fs(sb, 1);
2209                 if (ret)
2210                         goto out;
2211                 f2fs_stop_checkpoint(sbi, false);
2212                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2213                 break;
2214         case F2FS_GOING_DOWN_NOSYNC:
2215                 f2fs_stop_checkpoint(sbi, false);
2216                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2217                 break;
2218         case F2FS_GOING_DOWN_METAFLUSH:
2219                 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2220                 f2fs_stop_checkpoint(sbi, false);
2221                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2222                 break;
2223         case F2FS_GOING_DOWN_NEED_FSCK:
2224                 set_sbi_flag(sbi, SBI_NEED_FSCK);
2225                 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2226                 set_sbi_flag(sbi, SBI_IS_DIRTY);
2227                 /* do checkpoint only */
2228                 ret = f2fs_sync_fs(sb, 1);
2229                 goto out;
2230         default:
2231                 ret = -EINVAL;
2232                 goto out;
2233         }
2234
2235         f2fs_stop_gc_thread(sbi);
2236         f2fs_stop_discard_thread(sbi);
2237
2238         f2fs_drop_discard_cmd(sbi);
2239         clear_opt(sbi, DISCARD);
2240
2241         f2fs_update_time(sbi, REQ_TIME);
2242 out:
2243         if (in != F2FS_GOING_DOWN_FULLSYNC)
2244                 mnt_drop_write_file(filp);
2245
2246         trace_f2fs_shutdown(sbi, in, ret);
2247
2248         return ret;
2249 }
2250
2251 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2252 {
2253         struct inode *inode = file_inode(filp);
2254         struct super_block *sb = inode->i_sb;
2255         struct request_queue *q = bdev_get_queue(sb->s_bdev);
2256         struct fstrim_range range;
2257         int ret;
2258
2259         if (!capable(CAP_SYS_ADMIN))
2260                 return -EPERM;
2261
2262         if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2263                 return -EOPNOTSUPP;
2264
2265         if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2266                                 sizeof(range)))
2267                 return -EFAULT;
2268
2269         ret = mnt_want_write_file(filp);
2270         if (ret)
2271                 return ret;
2272
2273         range.minlen = max((unsigned int)range.minlen,
2274                                 q->limits.discard_granularity);
2275         ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2276         mnt_drop_write_file(filp);
2277         if (ret < 0)
2278                 return ret;
2279
2280         if (copy_to_user((struct fstrim_range __user *)arg, &range,
2281                                 sizeof(range)))
2282                 return -EFAULT;
2283         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2284         return 0;
2285 }
2286
2287 static bool uuid_is_nonzero(__u8 u[16])
2288 {
2289         int i;
2290
2291         for (i = 0; i < 16; i++)
2292                 if (u[i])
2293                         return true;
2294         return false;
2295 }
2296
2297 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2298 {
2299         struct inode *inode = file_inode(filp);
2300
2301         if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2302                 return -EOPNOTSUPP;
2303
2304         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2305
2306         return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2307 }
2308
2309 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2310 {
2311         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2312                 return -EOPNOTSUPP;
2313         return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2314 }
2315
2316 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2317 {
2318         struct inode *inode = file_inode(filp);
2319         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2320         int err;
2321
2322         if (!f2fs_sb_has_encrypt(sbi))
2323                 return -EOPNOTSUPP;
2324
2325         err = mnt_want_write_file(filp);
2326         if (err)
2327                 return err;
2328
2329         down_write(&sbi->sb_lock);
2330
2331         if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2332                 goto got_it;
2333
2334         /* update superblock with uuid */
2335         generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2336
2337         err = f2fs_commit_super(sbi, false);
2338         if (err) {
2339                 /* undo new data */
2340                 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2341                 goto out_err;
2342         }
2343 got_it:
2344         if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2345                                                                         16))
2346                 err = -EFAULT;
2347 out_err:
2348         up_write(&sbi->sb_lock);
2349         mnt_drop_write_file(filp);
2350         return err;
2351 }
2352
2353 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2354                                              unsigned long arg)
2355 {
2356         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2357                 return -EOPNOTSUPP;
2358
2359         return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2360 }
2361
2362 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2363 {
2364         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2365                 return -EOPNOTSUPP;
2366
2367         return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2368 }
2369
2370 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2371 {
2372         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2373                 return -EOPNOTSUPP;
2374
2375         return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2376 }
2377
2378 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2379                                                     unsigned long arg)
2380 {
2381         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2382                 return -EOPNOTSUPP;
2383
2384         return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2385 }
2386
2387 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2388                                               unsigned long arg)
2389 {
2390         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2391                 return -EOPNOTSUPP;
2392
2393         return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2394 }
2395
2396 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2397 {
2398         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2399                 return -EOPNOTSUPP;
2400
2401         return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2402 }
2403
2404 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2405 {
2406         struct inode *inode = file_inode(filp);
2407         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2408         __u32 sync;
2409         int ret;
2410
2411         if (!capable(CAP_SYS_ADMIN))
2412                 return -EPERM;
2413
2414         if (get_user(sync, (__u32 __user *)arg))
2415                 return -EFAULT;
2416
2417         if (f2fs_readonly(sbi->sb))
2418                 return -EROFS;
2419
2420         ret = mnt_want_write_file(filp);
2421         if (ret)
2422                 return ret;
2423
2424         if (!sync) {
2425                 if (!down_write_trylock(&sbi->gc_lock)) {
2426                         ret = -EBUSY;
2427                         goto out;
2428                 }
2429         } else {
2430                 down_write(&sbi->gc_lock);
2431         }
2432
2433         ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2434 out:
2435         mnt_drop_write_file(filp);
2436         return ret;
2437 }
2438
2439 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2440 {
2441         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2442         u64 end;
2443         int ret;
2444
2445         if (!capable(CAP_SYS_ADMIN))
2446                 return -EPERM;
2447         if (f2fs_readonly(sbi->sb))
2448                 return -EROFS;
2449
2450         end = range->start + range->len;
2451         if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2452                                         end >= MAX_BLKADDR(sbi))
2453                 return -EINVAL;
2454
2455         ret = mnt_want_write_file(filp);
2456         if (ret)
2457                 return ret;
2458
2459 do_more:
2460         if (!range->sync) {
2461                 if (!down_write_trylock(&sbi->gc_lock)) {
2462                         ret = -EBUSY;
2463                         goto out;
2464                 }
2465         } else {
2466                 down_write(&sbi->gc_lock);
2467         }
2468
2469         ret = f2fs_gc(sbi, range->sync, true, false,
2470                                 GET_SEGNO(sbi, range->start));
2471         if (ret) {
2472                 if (ret == -EBUSY)
2473                         ret = -EAGAIN;
2474                 goto out;
2475         }
2476         range->start += BLKS_PER_SEC(sbi);
2477         if (range->start <= end)
2478                 goto do_more;
2479 out:
2480         mnt_drop_write_file(filp);
2481         return ret;
2482 }
2483
2484 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2485 {
2486         struct f2fs_gc_range range;
2487
2488         if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2489                                                         sizeof(range)))
2490                 return -EFAULT;
2491         return __f2fs_ioc_gc_range(filp, &range);
2492 }
2493
2494 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2495 {
2496         struct inode *inode = file_inode(filp);
2497         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2498         int ret;
2499
2500         if (!capable(CAP_SYS_ADMIN))
2501                 return -EPERM;
2502
2503         if (f2fs_readonly(sbi->sb))
2504                 return -EROFS;
2505
2506         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2507                 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2508                 return -EINVAL;
2509         }
2510
2511         ret = mnt_want_write_file(filp);
2512         if (ret)
2513                 return ret;
2514
2515         ret = f2fs_sync_fs(sbi->sb, 1);
2516
2517         mnt_drop_write_file(filp);
2518         return ret;
2519 }
2520
2521 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2522                                         struct file *filp,
2523                                         struct f2fs_defragment *range)
2524 {
2525         struct inode *inode = file_inode(filp);
2526         struct f2fs_map_blocks map = { .m_next_extent = NULL,
2527                                         .m_seg_type = NO_CHECK_TYPE,
2528                                         .m_may_create = false };
2529         struct extent_info ei = {0, 0, 0};
2530         pgoff_t pg_start, pg_end, next_pgofs;
2531         unsigned int blk_per_seg = sbi->blocks_per_seg;
2532         unsigned int total = 0, sec_num;
2533         block_t blk_end = 0;
2534         bool fragmented = false;
2535         int err;
2536
2537         /* if in-place-update policy is enabled, don't waste time here */
2538         if (f2fs_should_update_inplace(inode, NULL))
2539                 return -EINVAL;
2540
2541         pg_start = range->start >> PAGE_SHIFT;
2542         pg_end = (range->start + range->len) >> PAGE_SHIFT;
2543
2544         f2fs_balance_fs(sbi, true);
2545
2546         inode_lock(inode);
2547
2548         /* writeback all dirty pages in the range */
2549         err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2550                                                 range->start + range->len - 1);
2551         if (err)
2552                 goto out;
2553
2554         /*
2555          * lookup mapping info in extent cache, skip defragmenting if physical
2556          * block addresses are continuous.
2557          */
2558         if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2559                 if (ei.fofs + ei.len >= pg_end)
2560                         goto out;
2561         }
2562
2563         map.m_lblk = pg_start;
2564         map.m_next_pgofs = &next_pgofs;
2565
2566         /*
2567          * lookup mapping info in dnode page cache, skip defragmenting if all
2568          * physical block addresses are continuous even if there are hole(s)
2569          * in logical blocks.
2570          */
2571         while (map.m_lblk < pg_end) {
2572                 map.m_len = pg_end - map.m_lblk;
2573                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2574                 if (err)
2575                         goto out;
2576
2577                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2578                         map.m_lblk = next_pgofs;
2579                         continue;
2580                 }
2581
2582                 if (blk_end && blk_end != map.m_pblk)
2583                         fragmented = true;
2584
2585                 /* record total count of block that we're going to move */
2586                 total += map.m_len;
2587
2588                 blk_end = map.m_pblk + map.m_len;
2589
2590                 map.m_lblk += map.m_len;
2591         }
2592
2593         if (!fragmented) {
2594                 total = 0;
2595                 goto out;
2596         }
2597
2598         sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2599
2600         /*
2601          * make sure there are enough free section for LFS allocation, this can
2602          * avoid defragment running in SSR mode when free section are allocated
2603          * intensively
2604          */
2605         if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2606                 err = -EAGAIN;
2607                 goto out;
2608         }
2609
2610         map.m_lblk = pg_start;
2611         map.m_len = pg_end - pg_start;
2612         total = 0;
2613
2614         while (map.m_lblk < pg_end) {
2615                 pgoff_t idx;
2616                 int cnt = 0;
2617
2618 do_map:
2619                 map.m_len = pg_end - map.m_lblk;
2620                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2621                 if (err)
2622                         goto clear_out;
2623
2624                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2625                         map.m_lblk = next_pgofs;
2626                         goto check;
2627                 }
2628
2629                 set_inode_flag(inode, FI_DO_DEFRAG);
2630
2631                 idx = map.m_lblk;
2632                 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2633                         struct page *page;
2634
2635                         page = f2fs_get_lock_data_page(inode, idx, true);
2636                         if (IS_ERR(page)) {
2637                                 err = PTR_ERR(page);
2638                                 goto clear_out;
2639                         }
2640
2641                         set_page_dirty(page);
2642                         f2fs_put_page(page, 1);
2643
2644                         idx++;
2645                         cnt++;
2646                         total++;
2647                 }
2648
2649                 map.m_lblk = idx;
2650 check:
2651                 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2652                         goto do_map;
2653
2654                 clear_inode_flag(inode, FI_DO_DEFRAG);
2655
2656                 err = filemap_fdatawrite(inode->i_mapping);
2657                 if (err)
2658                         goto out;
2659         }
2660 clear_out:
2661         clear_inode_flag(inode, FI_DO_DEFRAG);
2662 out:
2663         inode_unlock(inode);
2664         if (!err)
2665                 range->len = (u64)total << PAGE_SHIFT;
2666         return err;
2667 }
2668
2669 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2670 {
2671         struct inode *inode = file_inode(filp);
2672         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2673         struct f2fs_defragment range;
2674         int err;
2675
2676         if (!capable(CAP_SYS_ADMIN))
2677                 return -EPERM;
2678
2679         if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2680                 return -EINVAL;
2681
2682         if (f2fs_readonly(sbi->sb))
2683                 return -EROFS;
2684
2685         if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2686                                                         sizeof(range)))
2687                 return -EFAULT;
2688
2689         /* verify alignment of offset & size */
2690         if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2691                 return -EINVAL;
2692
2693         if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2694                                         max_file_blocks(inode)))
2695                 return -EINVAL;
2696
2697         err = mnt_want_write_file(filp);
2698         if (err)
2699                 return err;
2700
2701         err = f2fs_defragment_range(sbi, filp, &range);
2702         mnt_drop_write_file(filp);
2703
2704         f2fs_update_time(sbi, REQ_TIME);
2705         if (err < 0)
2706                 return err;
2707
2708         if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2709                                                         sizeof(range)))
2710                 return -EFAULT;
2711
2712         return 0;
2713 }
2714
2715 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2716                         struct file *file_out, loff_t pos_out, size_t len)
2717 {
2718         struct inode *src = file_inode(file_in);
2719         struct inode *dst = file_inode(file_out);
2720         struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2721         size_t olen = len, dst_max_i_size = 0;
2722         size_t dst_osize;
2723         int ret;
2724
2725         if (file_in->f_path.mnt != file_out->f_path.mnt ||
2726                                 src->i_sb != dst->i_sb)
2727                 return -EXDEV;
2728
2729         if (unlikely(f2fs_readonly(src->i_sb)))
2730                 return -EROFS;
2731
2732         if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2733                 return -EINVAL;
2734
2735         if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2736                 return -EOPNOTSUPP;
2737
2738         if (pos_out < 0 || pos_in < 0)
2739                 return -EINVAL;
2740
2741         if (src == dst) {
2742                 if (pos_in == pos_out)
2743                         return 0;
2744                 if (pos_out > pos_in && pos_out < pos_in + len)
2745                         return -EINVAL;
2746         }
2747
2748         inode_lock(src);
2749         if (src != dst) {
2750                 ret = -EBUSY;
2751                 if (!inode_trylock(dst))
2752                         goto out;
2753         }
2754
2755         ret = -EINVAL;
2756         if (pos_in + len > src->i_size || pos_in + len < pos_in)
2757                 goto out_unlock;
2758         if (len == 0)
2759                 olen = len = src->i_size - pos_in;
2760         if (pos_in + len == src->i_size)
2761                 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2762         if (len == 0) {
2763                 ret = 0;
2764                 goto out_unlock;
2765         }
2766
2767         dst_osize = dst->i_size;
2768         if (pos_out + olen > dst->i_size)
2769                 dst_max_i_size = pos_out + olen;
2770
2771         /* verify the end result is block aligned */
2772         if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2773                         !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2774                         !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2775                 goto out_unlock;
2776
2777         ret = f2fs_convert_inline_inode(src);
2778         if (ret)
2779                 goto out_unlock;
2780
2781         ret = f2fs_convert_inline_inode(dst);
2782         if (ret)
2783                 goto out_unlock;
2784
2785         /* write out all dirty pages from offset */
2786         ret = filemap_write_and_wait_range(src->i_mapping,
2787                                         pos_in, pos_in + len);
2788         if (ret)
2789                 goto out_unlock;
2790
2791         ret = filemap_write_and_wait_range(dst->i_mapping,
2792                                         pos_out, pos_out + len);
2793         if (ret)
2794                 goto out_unlock;
2795
2796         f2fs_balance_fs(sbi, true);
2797
2798         down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2799         if (src != dst) {
2800                 ret = -EBUSY;
2801                 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2802                         goto out_src;
2803         }
2804
2805         f2fs_lock_op(sbi);
2806         ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2807                                 pos_out >> F2FS_BLKSIZE_BITS,
2808                                 len >> F2FS_BLKSIZE_BITS, false);
2809
2810         if (!ret) {
2811                 if (dst_max_i_size)
2812                         f2fs_i_size_write(dst, dst_max_i_size);
2813                 else if (dst_osize != dst->i_size)
2814                         f2fs_i_size_write(dst, dst_osize);
2815         }
2816         f2fs_unlock_op(sbi);
2817
2818         if (src != dst)
2819                 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2820 out_src:
2821         up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2822 out_unlock:
2823         if (src != dst)
2824                 inode_unlock(dst);
2825 out:
2826         inode_unlock(src);
2827         return ret;
2828 }
2829
2830 static int __f2fs_ioc_move_range(struct file *filp,
2831                                 struct f2fs_move_range *range)
2832 {
2833         struct fd dst;
2834         int err;
2835
2836         if (!(filp->f_mode & FMODE_READ) ||
2837                         !(filp->f_mode & FMODE_WRITE))
2838                 return -EBADF;
2839
2840         dst = fdget(range->dst_fd);
2841         if (!dst.file)
2842                 return -EBADF;
2843
2844         if (!(dst.file->f_mode & FMODE_WRITE)) {
2845                 err = -EBADF;
2846                 goto err_out;
2847         }
2848
2849         err = mnt_want_write_file(filp);
2850         if (err)
2851                 goto err_out;
2852
2853         err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2854                                         range->pos_out, range->len);
2855
2856         mnt_drop_write_file(filp);
2857 err_out:
2858         fdput(dst);
2859         return err;
2860 }
2861
2862 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2863 {
2864         struct f2fs_move_range range;
2865
2866         if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2867                                                         sizeof(range)))
2868                 return -EFAULT;
2869         return __f2fs_ioc_move_range(filp, &range);
2870 }
2871
2872 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2873 {
2874         struct inode *inode = file_inode(filp);
2875         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2876         struct sit_info *sm = SIT_I(sbi);
2877         unsigned int start_segno = 0, end_segno = 0;
2878         unsigned int dev_start_segno = 0, dev_end_segno = 0;
2879         struct f2fs_flush_device range;
2880         int ret;
2881
2882         if (!capable(CAP_SYS_ADMIN))
2883                 return -EPERM;
2884
2885         if (f2fs_readonly(sbi->sb))
2886                 return -EROFS;
2887
2888         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2889                 return -EINVAL;
2890
2891         if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2892                                                         sizeof(range)))
2893                 return -EFAULT;
2894
2895         if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2896                         __is_large_section(sbi)) {
2897                 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2898                           range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2899                 return -EINVAL;
2900         }
2901
2902         ret = mnt_want_write_file(filp);
2903         if (ret)
2904                 return ret;
2905
2906         if (range.dev_num != 0)
2907                 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2908         dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2909
2910         start_segno = sm->last_victim[FLUSH_DEVICE];
2911         if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2912                 start_segno = dev_start_segno;
2913         end_segno = min(start_segno + range.segments, dev_end_segno);
2914
2915         while (start_segno < end_segno) {
2916                 if (!down_write_trylock(&sbi->gc_lock)) {
2917                         ret = -EBUSY;
2918                         goto out;
2919                 }
2920                 sm->last_victim[GC_CB] = end_segno + 1;
2921                 sm->last_victim[GC_GREEDY] = end_segno + 1;
2922                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2923                 ret = f2fs_gc(sbi, true, true, true, start_segno);
2924                 if (ret == -EAGAIN)
2925                         ret = 0;
2926                 else if (ret < 0)
2927                         break;
2928                 start_segno++;
2929         }
2930 out:
2931         mnt_drop_write_file(filp);
2932         return ret;
2933 }
2934
2935 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2936 {
2937         struct inode *inode = file_inode(filp);
2938         u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2939
2940         /* Must validate to set it with SQLite behavior in Android. */
2941         sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2942
2943         return put_user(sb_feature, (u32 __user *)arg);
2944 }
2945
2946 #ifdef CONFIG_QUOTA
2947 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2948 {
2949         struct dquot *transfer_to[MAXQUOTAS] = {};
2950         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2951         struct super_block *sb = sbi->sb;
2952         int err = 0;
2953
2954         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2955         if (!IS_ERR(transfer_to[PRJQUOTA])) {
2956                 err = __dquot_transfer(inode, transfer_to);
2957                 if (err)
2958                         set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2959                 dqput(transfer_to[PRJQUOTA]);
2960         }
2961         return err;
2962 }
2963
2964 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2965 {
2966         struct f2fs_inode_info *fi = F2FS_I(inode);
2967         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2968         struct page *ipage;
2969         kprojid_t kprojid;
2970         int err;
2971
2972         if (!f2fs_sb_has_project_quota(sbi)) {
2973                 if (projid != F2FS_DEF_PROJID)
2974                         return -EOPNOTSUPP;
2975                 else
2976                         return 0;
2977         }
2978
2979         if (!f2fs_has_extra_attr(inode))
2980                 return -EOPNOTSUPP;
2981
2982         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2983
2984         if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2985                 return 0;
2986
2987         err = -EPERM;
2988         /* Is it quota file? Do not allow user to mess with it */
2989         if (IS_NOQUOTA(inode))
2990                 return err;
2991
2992         ipage = f2fs_get_node_page(sbi, inode->i_ino);
2993         if (IS_ERR(ipage))
2994                 return PTR_ERR(ipage);
2995
2996         if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2997                                                                 i_projid)) {
2998                 err = -EOVERFLOW;
2999                 f2fs_put_page(ipage, 1);
3000                 return err;
3001         }
3002         f2fs_put_page(ipage, 1);
3003
3004         err = dquot_initialize(inode);
3005         if (err)
3006                 return err;
3007
3008         f2fs_lock_op(sbi);
3009         err = f2fs_transfer_project_quota(inode, kprojid);
3010         if (err)
3011                 goto out_unlock;
3012
3013         F2FS_I(inode)->i_projid = kprojid;
3014         inode->i_ctime = current_time(inode);
3015         f2fs_mark_inode_dirty_sync(inode, true);
3016 out_unlock:
3017         f2fs_unlock_op(sbi);
3018         return err;
3019 }
3020 #else
3021 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3022 {
3023         return 0;
3024 }
3025
3026 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3027 {
3028         if (projid != F2FS_DEF_PROJID)
3029                 return -EOPNOTSUPP;
3030         return 0;
3031 }
3032 #endif
3033
3034 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3035 {
3036         struct inode *inode = d_inode(dentry);
3037         struct f2fs_inode_info *fi = F2FS_I(inode);
3038         u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3039
3040         if (IS_ENCRYPTED(inode))
3041                 fsflags |= FS_ENCRYPT_FL;
3042         if (IS_VERITY(inode))
3043                 fsflags |= FS_VERITY_FL;
3044         if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3045                 fsflags |= FS_INLINE_DATA_FL;
3046         if (is_inode_flag_set(inode, FI_PIN_FILE))
3047                 fsflags |= FS_NOCOW_FL;
3048
3049         fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3050
3051         if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3052                 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3053
3054         return 0;
3055 }
3056
3057 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3058                       struct dentry *dentry, struct fileattr *fa)
3059 {
3060         struct inode *inode = d_inode(dentry);
3061         u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3062         u32 iflags;
3063         int err;
3064
3065         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3066                 return -EIO;
3067         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3068                 return -ENOSPC;
3069         if (fsflags & ~F2FS_GETTABLE_FS_FL)
3070                 return -EOPNOTSUPP;
3071         fsflags &= F2FS_SETTABLE_FS_FL;
3072         if (!fa->flags_valid)
3073                 mask &= FS_COMMON_FL;
3074
3075         iflags = f2fs_fsflags_to_iflags(fsflags);
3076         if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3077                 return -EOPNOTSUPP;
3078
3079         err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3080         if (!err)
3081                 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3082
3083         return err;
3084 }
3085
3086 int f2fs_pin_file_control(struct inode *inode, bool inc)
3087 {
3088         struct f2fs_inode_info *fi = F2FS_I(inode);
3089         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3090
3091         /* Use i_gc_failures for normal file as a risk signal. */
3092         if (inc)
3093                 f2fs_i_gc_failures_write(inode,
3094                                 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3095
3096         if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3097                 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3098                           __func__, inode->i_ino,
3099                           fi->i_gc_failures[GC_FAILURE_PIN]);
3100                 clear_inode_flag(inode, FI_PIN_FILE);
3101                 return -EAGAIN;
3102         }
3103         return 0;
3104 }
3105
3106 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3107 {
3108         struct inode *inode = file_inode(filp);
3109         __u32 pin;
3110         int ret = 0;
3111
3112         if (get_user(pin, (__u32 __user *)arg))
3113                 return -EFAULT;
3114
3115         if (!S_ISREG(inode->i_mode))
3116                 return -EINVAL;
3117
3118         if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3119                 return -EROFS;
3120
3121         ret = mnt_want_write_file(filp);
3122         if (ret)
3123                 return ret;
3124
3125         inode_lock(inode);
3126
3127         if (f2fs_should_update_outplace(inode, NULL)) {
3128                 ret = -EINVAL;
3129                 goto out;
3130         }
3131
3132         if (!pin) {
3133                 clear_inode_flag(inode, FI_PIN_FILE);
3134                 f2fs_i_gc_failures_write(inode, 0);
3135                 goto done;
3136         }
3137
3138         if (f2fs_pin_file_control(inode, false)) {
3139                 ret = -EAGAIN;
3140                 goto out;
3141         }
3142
3143         ret = f2fs_convert_inline_inode(inode);
3144         if (ret)
3145                 goto out;
3146
3147         if (!f2fs_disable_compressed_file(inode)) {
3148                 ret = -EOPNOTSUPP;
3149                 goto out;
3150         }
3151
3152         set_inode_flag(inode, FI_PIN_FILE);
3153         ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3154 done:
3155         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3156 out:
3157         inode_unlock(inode);
3158         mnt_drop_write_file(filp);
3159         return ret;
3160 }
3161
3162 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3163 {
3164         struct inode *inode = file_inode(filp);
3165         __u32 pin = 0;
3166
3167         if (is_inode_flag_set(inode, FI_PIN_FILE))
3168                 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3169         return put_user(pin, (u32 __user *)arg);
3170 }
3171
3172 int f2fs_precache_extents(struct inode *inode)
3173 {
3174         struct f2fs_inode_info *fi = F2FS_I(inode);
3175         struct f2fs_map_blocks map;
3176         pgoff_t m_next_extent;
3177         loff_t end;
3178         int err;
3179
3180         if (is_inode_flag_set(inode, FI_NO_EXTENT))
3181                 return -EOPNOTSUPP;
3182
3183         map.m_lblk = 0;
3184         map.m_next_pgofs = NULL;
3185         map.m_next_extent = &m_next_extent;
3186         map.m_seg_type = NO_CHECK_TYPE;
3187         map.m_may_create = false;
3188         end = max_file_blocks(inode);
3189
3190         while (map.m_lblk < end) {
3191                 map.m_len = end - map.m_lblk;
3192
3193                 down_write(&fi->i_gc_rwsem[WRITE]);
3194                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3195                 up_write(&fi->i_gc_rwsem[WRITE]);
3196                 if (err)
3197                         return err;
3198
3199                 map.m_lblk = m_next_extent;
3200         }
3201
3202         return 0;
3203 }
3204
3205 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3206 {
3207         return f2fs_precache_extents(file_inode(filp));
3208 }
3209
3210 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3211 {
3212         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3213         __u64 block_count;
3214
3215         if (!capable(CAP_SYS_ADMIN))
3216                 return -EPERM;
3217
3218         if (f2fs_readonly(sbi->sb))
3219                 return -EROFS;
3220
3221         if (copy_from_user(&block_count, (void __user *)arg,
3222                            sizeof(block_count)))
3223                 return -EFAULT;
3224
3225         return f2fs_resize_fs(sbi, block_count);
3226 }
3227
3228 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3229 {
3230         struct inode *inode = file_inode(filp);
3231
3232         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3233
3234         if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3235                 f2fs_warn(F2FS_I_SB(inode),
3236                           "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3237                           inode->i_ino);
3238                 return -EOPNOTSUPP;
3239         }
3240
3241         return fsverity_ioctl_enable(filp, (const void __user *)arg);
3242 }
3243
3244 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3245 {
3246         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3247                 return -EOPNOTSUPP;
3248
3249         return fsverity_ioctl_measure(filp, (void __user *)arg);
3250 }
3251
3252 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3253 {
3254         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3255                 return -EOPNOTSUPP;
3256
3257         return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3258 }
3259
3260 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3261 {
3262         struct inode *inode = file_inode(filp);
3263         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3264         char *vbuf;
3265         int count;
3266         int err = 0;
3267
3268         vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3269         if (!vbuf)
3270                 return -ENOMEM;
3271
3272         down_read(&sbi->sb_lock);
3273         count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3274                         ARRAY_SIZE(sbi->raw_super->volume_name),
3275                         UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3276         up_read(&sbi->sb_lock);
3277
3278         if (copy_to_user((char __user *)arg, vbuf,
3279                                 min(FSLABEL_MAX, count)))
3280                 err = -EFAULT;
3281
3282         kfree(vbuf);
3283         return err;
3284 }
3285
3286 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3287 {
3288         struct inode *inode = file_inode(filp);
3289         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3290         char *vbuf;
3291         int err = 0;
3292
3293         if (!capable(CAP_SYS_ADMIN))
3294                 return -EPERM;
3295
3296         vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3297         if (IS_ERR(vbuf))
3298                 return PTR_ERR(vbuf);
3299
3300         err = mnt_want_write_file(filp);
3301         if (err)
3302                 goto out;
3303
3304         down_write(&sbi->sb_lock);
3305
3306         memset(sbi->raw_super->volume_name, 0,
3307                         sizeof(sbi->raw_super->volume_name));
3308         utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3309                         sbi->raw_super->volume_name,
3310                         ARRAY_SIZE(sbi->raw_super->volume_name));
3311
3312         err = f2fs_commit_super(sbi, false);
3313
3314         up_write(&sbi->sb_lock);
3315
3316         mnt_drop_write_file(filp);
3317 out:
3318         kfree(vbuf);
3319         return err;
3320 }
3321
3322 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3323 {
3324         struct inode *inode = file_inode(filp);
3325         __u64 blocks;
3326
3327         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3328                 return -EOPNOTSUPP;
3329
3330         if (!f2fs_compressed_file(inode))
3331                 return -EINVAL;
3332
3333         blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3334         return put_user(blocks, (u64 __user *)arg);
3335 }
3336
3337 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3338 {
3339         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3340         unsigned int released_blocks = 0;
3341         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3342         block_t blkaddr;
3343         int i;
3344
3345         for (i = 0; i < count; i++) {
3346                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3347                                                 dn->ofs_in_node + i);
3348
3349                 if (!__is_valid_data_blkaddr(blkaddr))
3350                         continue;
3351                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3352                                         DATA_GENERIC_ENHANCE)))
3353                         return -EFSCORRUPTED;
3354         }
3355
3356         while (count) {
3357                 int compr_blocks = 0;
3358
3359                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3360                         blkaddr = f2fs_data_blkaddr(dn);
3361
3362                         if (i == 0) {
3363                                 if (blkaddr == COMPRESS_ADDR)
3364                                         continue;
3365                                 dn->ofs_in_node += cluster_size;
3366                                 goto next;
3367                         }
3368
3369                         if (__is_valid_data_blkaddr(blkaddr))
3370                                 compr_blocks++;
3371
3372                         if (blkaddr != NEW_ADDR)
3373                                 continue;
3374
3375                         dn->data_blkaddr = NULL_ADDR;
3376                         f2fs_set_data_blkaddr(dn);
3377                 }
3378
3379                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3380                 dec_valid_block_count(sbi, dn->inode,
3381                                         cluster_size - compr_blocks);
3382
3383                 released_blocks += cluster_size - compr_blocks;
3384 next:
3385                 count -= cluster_size;
3386         }
3387
3388         return released_blocks;
3389 }
3390
3391 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3392 {
3393         struct inode *inode = file_inode(filp);
3394         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3395         pgoff_t page_idx = 0, last_idx;
3396         unsigned int released_blocks = 0;
3397         int ret;
3398         int writecount;
3399
3400         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3401                 return -EOPNOTSUPP;
3402
3403         if (!f2fs_compressed_file(inode))
3404                 return -EINVAL;
3405
3406         if (f2fs_readonly(sbi->sb))
3407                 return -EROFS;
3408
3409         ret = mnt_want_write_file(filp);
3410         if (ret)
3411                 return ret;
3412
3413         f2fs_balance_fs(F2FS_I_SB(inode), true);
3414
3415         inode_lock(inode);
3416
3417         writecount = atomic_read(&inode->i_writecount);
3418         if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3419                         (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3420                 ret = -EBUSY;
3421                 goto out;
3422         }
3423
3424         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3425                 ret = -EINVAL;
3426                 goto out;
3427         }
3428
3429         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3430         if (ret)
3431                 goto out;
3432
3433         set_inode_flag(inode, FI_COMPRESS_RELEASED);
3434         inode->i_ctime = current_time(inode);
3435         f2fs_mark_inode_dirty_sync(inode, true);
3436
3437         if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3438                 goto out;
3439
3440         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3441         filemap_invalidate_lock(inode->i_mapping);
3442
3443         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3444
3445         while (page_idx < last_idx) {
3446                 struct dnode_of_data dn;
3447                 pgoff_t end_offset, count;
3448
3449                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3450                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3451                 if (ret) {
3452                         if (ret == -ENOENT) {
3453                                 page_idx = f2fs_get_next_page_offset(&dn,
3454                                                                 page_idx);
3455                                 ret = 0;
3456                                 continue;
3457                         }
3458                         break;
3459                 }
3460
3461                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3462                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3463                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3464
3465                 ret = release_compress_blocks(&dn, count);
3466
3467                 f2fs_put_dnode(&dn);
3468
3469                 if (ret < 0)
3470                         break;
3471
3472                 page_idx += count;
3473                 released_blocks += ret;
3474         }
3475
3476         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3477         filemap_invalidate_unlock(inode->i_mapping);
3478 out:
3479         inode_unlock(inode);
3480
3481         mnt_drop_write_file(filp);
3482
3483         if (ret >= 0) {
3484                 ret = put_user(released_blocks, (u64 __user *)arg);
3485         } else if (released_blocks &&
3486                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3487                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3488                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3489                         "iblocks=%llu, released=%u, compr_blocks=%u, "
3490                         "run fsck to fix.",
3491                         __func__, inode->i_ino, inode->i_blocks,
3492                         released_blocks,
3493                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3494         }
3495
3496         return ret;
3497 }
3498
3499 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3500 {
3501         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3502         unsigned int reserved_blocks = 0;
3503         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3504         block_t blkaddr;
3505         int i;
3506
3507         for (i = 0; i < count; i++) {
3508                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3509                                                 dn->ofs_in_node + i);
3510
3511                 if (!__is_valid_data_blkaddr(blkaddr))
3512                         continue;
3513                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3514                                         DATA_GENERIC_ENHANCE)))
3515                         return -EFSCORRUPTED;
3516         }
3517
3518         while (count) {
3519                 int compr_blocks = 0;
3520                 blkcnt_t reserved;
3521                 int ret;
3522
3523                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3524                         blkaddr = f2fs_data_blkaddr(dn);
3525
3526                         if (i == 0) {
3527                                 if (blkaddr == COMPRESS_ADDR)
3528                                         continue;
3529                                 dn->ofs_in_node += cluster_size;
3530                                 goto next;
3531                         }
3532
3533                         if (__is_valid_data_blkaddr(blkaddr)) {
3534                                 compr_blocks++;
3535                                 continue;
3536                         }
3537
3538                         dn->data_blkaddr = NEW_ADDR;
3539                         f2fs_set_data_blkaddr(dn);
3540                 }
3541
3542                 reserved = cluster_size - compr_blocks;
3543                 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3544                 if (ret)
3545                         return ret;
3546
3547                 if (reserved != cluster_size - compr_blocks)
3548                         return -ENOSPC;
3549
3550                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3551
3552                 reserved_blocks += reserved;
3553 next:
3554                 count -= cluster_size;
3555         }
3556
3557         return reserved_blocks;
3558 }
3559
3560 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3561 {
3562         struct inode *inode = file_inode(filp);
3563         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3564         pgoff_t page_idx = 0, last_idx;
3565         unsigned int reserved_blocks = 0;
3566         int ret;
3567
3568         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3569                 return -EOPNOTSUPP;
3570
3571         if (!f2fs_compressed_file(inode))
3572                 return -EINVAL;
3573
3574         if (f2fs_readonly(sbi->sb))
3575                 return -EROFS;
3576
3577         ret = mnt_want_write_file(filp);
3578         if (ret)
3579                 return ret;
3580
3581         if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3582                 goto out;
3583
3584         f2fs_balance_fs(F2FS_I_SB(inode), true);
3585
3586         inode_lock(inode);
3587
3588         if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3589                 ret = -EINVAL;
3590                 goto unlock_inode;
3591         }
3592
3593         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3594         filemap_invalidate_lock(inode->i_mapping);
3595
3596         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3597
3598         while (page_idx < last_idx) {
3599                 struct dnode_of_data dn;
3600                 pgoff_t end_offset, count;
3601
3602                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3603                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3604                 if (ret) {
3605                         if (ret == -ENOENT) {
3606                                 page_idx = f2fs_get_next_page_offset(&dn,
3607                                                                 page_idx);
3608                                 ret = 0;
3609                                 continue;
3610                         }
3611                         break;
3612                 }
3613
3614                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3615                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3616                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3617
3618                 ret = reserve_compress_blocks(&dn, count);
3619
3620                 f2fs_put_dnode(&dn);
3621
3622                 if (ret < 0)
3623                         break;
3624
3625                 page_idx += count;
3626                 reserved_blocks += ret;
3627         }
3628
3629         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3630         filemap_invalidate_unlock(inode->i_mapping);
3631
3632         if (ret >= 0) {
3633                 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3634                 inode->i_ctime = current_time(inode);
3635                 f2fs_mark_inode_dirty_sync(inode, true);
3636         }
3637 unlock_inode:
3638         inode_unlock(inode);
3639 out:
3640         mnt_drop_write_file(filp);
3641
3642         if (ret >= 0) {
3643                 ret = put_user(reserved_blocks, (u64 __user *)arg);
3644         } else if (reserved_blocks &&
3645                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3646                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3647                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3648                         "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3649                         "run fsck to fix.",
3650                         __func__, inode->i_ino, inode->i_blocks,
3651                         reserved_blocks,
3652                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3653         }
3654
3655         return ret;
3656 }
3657
3658 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3659                 pgoff_t off, block_t block, block_t len, u32 flags)
3660 {
3661         struct request_queue *q = bdev_get_queue(bdev);
3662         sector_t sector = SECTOR_FROM_BLOCK(block);
3663         sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3664         int ret = 0;
3665
3666         if (!q)
3667                 return -ENXIO;
3668
3669         if (flags & F2FS_TRIM_FILE_DISCARD)
3670                 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3671                                                 blk_queue_secure_erase(q) ?
3672                                                 BLKDEV_DISCARD_SECURE : 0);
3673
3674         if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3675                 if (IS_ENCRYPTED(inode))
3676                         ret = fscrypt_zeroout_range(inode, off, block, len);
3677                 else
3678                         ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3679                                         GFP_NOFS, 0);
3680         }
3681
3682         return ret;
3683 }
3684
3685 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3686 {
3687         struct inode *inode = file_inode(filp);
3688         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3689         struct address_space *mapping = inode->i_mapping;
3690         struct block_device *prev_bdev = NULL;
3691         struct f2fs_sectrim_range range;
3692         pgoff_t index, pg_end, prev_index = 0;
3693         block_t prev_block = 0, len = 0;
3694         loff_t end_addr;
3695         bool to_end = false;
3696         int ret = 0;
3697
3698         if (!(filp->f_mode & FMODE_WRITE))
3699                 return -EBADF;
3700
3701         if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3702                                 sizeof(range)))
3703                 return -EFAULT;
3704
3705         if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3706                         !S_ISREG(inode->i_mode))
3707                 return -EINVAL;
3708
3709         if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3710                         !f2fs_hw_support_discard(sbi)) ||
3711                         ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3712                          IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3713                 return -EOPNOTSUPP;
3714
3715         file_start_write(filp);
3716         inode_lock(inode);
3717
3718         if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3719                         range.start >= inode->i_size) {
3720                 ret = -EINVAL;
3721                 goto err;
3722         }
3723
3724         if (range.len == 0)
3725                 goto err;
3726
3727         if (inode->i_size - range.start > range.len) {
3728                 end_addr = range.start + range.len;
3729         } else {
3730                 end_addr = range.len == (u64)-1 ?
3731                         sbi->sb->s_maxbytes : inode->i_size;
3732                 to_end = true;
3733         }
3734
3735         if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3736                         (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3737                 ret = -EINVAL;
3738                 goto err;
3739         }
3740
3741         index = F2FS_BYTES_TO_BLK(range.start);
3742         pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3743
3744         ret = f2fs_convert_inline_inode(inode);
3745         if (ret)
3746                 goto err;
3747
3748         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3749         filemap_invalidate_lock(mapping);
3750
3751         ret = filemap_write_and_wait_range(mapping, range.start,
3752                         to_end ? LLONG_MAX : end_addr - 1);
3753         if (ret)
3754                 goto out;
3755
3756         truncate_inode_pages_range(mapping, range.start,
3757                         to_end ? -1 : end_addr - 1);
3758
3759         while (index < pg_end) {
3760                 struct dnode_of_data dn;
3761                 pgoff_t end_offset, count;
3762                 int i;
3763
3764                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3765                 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3766                 if (ret) {
3767                         if (ret == -ENOENT) {
3768                                 index = f2fs_get_next_page_offset(&dn, index);
3769                                 continue;
3770                         }
3771                         goto out;
3772                 }
3773
3774                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3775                 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3776                 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3777                         struct block_device *cur_bdev;
3778                         block_t blkaddr = f2fs_data_blkaddr(&dn);
3779
3780                         if (!__is_valid_data_blkaddr(blkaddr))
3781                                 continue;
3782
3783                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3784                                                 DATA_GENERIC_ENHANCE)) {
3785                                 ret = -EFSCORRUPTED;
3786                                 f2fs_put_dnode(&dn);
3787                                 goto out;
3788                         }
3789
3790                         cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3791                         if (f2fs_is_multi_device(sbi)) {
3792                                 int di = f2fs_target_device_index(sbi, blkaddr);
3793
3794                                 blkaddr -= FDEV(di).start_blk;
3795                         }
3796
3797                         if (len) {
3798                                 if (prev_bdev == cur_bdev &&
3799                                                 index == prev_index + len &&
3800                                                 blkaddr == prev_block + len) {
3801                                         len++;
3802                                 } else {
3803                                         ret = f2fs_secure_erase(prev_bdev,
3804                                                 inode, prev_index, prev_block,
3805                                                 len, range.flags);
3806                                         if (ret) {
3807                                                 f2fs_put_dnode(&dn);
3808                                                 goto out;
3809                                         }
3810
3811                                         len = 0;
3812                                 }
3813                         }
3814
3815                         if (!len) {
3816                                 prev_bdev = cur_bdev;
3817                                 prev_index = index;
3818                                 prev_block = blkaddr;
3819                                 len = 1;
3820                         }
3821                 }
3822
3823                 f2fs_put_dnode(&dn);
3824
3825                 if (fatal_signal_pending(current)) {
3826                         ret = -EINTR;
3827                         goto out;
3828                 }
3829                 cond_resched();
3830         }
3831
3832         if (len)
3833                 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3834                                 prev_block, len, range.flags);
3835 out:
3836         filemap_invalidate_unlock(mapping);
3837         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3838 err:
3839         inode_unlock(inode);
3840         file_end_write(filp);
3841
3842         return ret;
3843 }
3844
3845 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3846 {
3847         struct inode *inode = file_inode(filp);
3848         struct f2fs_comp_option option;
3849
3850         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3851                 return -EOPNOTSUPP;
3852
3853         inode_lock_shared(inode);
3854
3855         if (!f2fs_compressed_file(inode)) {
3856                 inode_unlock_shared(inode);
3857                 return -ENODATA;
3858         }
3859
3860         option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3861         option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3862
3863         inode_unlock_shared(inode);
3864
3865         if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3866                                 sizeof(option)))
3867                 return -EFAULT;
3868
3869         return 0;
3870 }
3871
3872 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3873 {
3874         struct inode *inode = file_inode(filp);
3875         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3876         struct f2fs_comp_option option;
3877         int ret = 0;
3878
3879         if (!f2fs_sb_has_compression(sbi))
3880                 return -EOPNOTSUPP;
3881
3882         if (!(filp->f_mode & FMODE_WRITE))
3883                 return -EBADF;
3884
3885         if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3886                                 sizeof(option)))
3887                 return -EFAULT;
3888
3889         if (!f2fs_compressed_file(inode) ||
3890                         option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3891                         option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3892                         option.algorithm >= COMPRESS_MAX)
3893                 return -EINVAL;
3894
3895         file_start_write(filp);
3896         inode_lock(inode);
3897
3898         if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3899                 ret = -EBUSY;
3900                 goto out;
3901         }
3902
3903         if (inode->i_size != 0) {
3904                 ret = -EFBIG;
3905                 goto out;
3906         }
3907
3908         F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3909         F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3910         F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3911         f2fs_mark_inode_dirty_sync(inode, true);
3912
3913         if (!f2fs_is_compress_backend_ready(inode))
3914                 f2fs_warn(sbi, "compression algorithm is successfully set, "
3915                         "but current kernel doesn't support this algorithm.");
3916 out:
3917         inode_unlock(inode);
3918         file_end_write(filp);
3919
3920         return ret;
3921 }
3922
3923 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3924 {
3925         DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3926         struct address_space *mapping = inode->i_mapping;
3927         struct page *page;
3928         pgoff_t redirty_idx = page_idx;
3929         int i, page_len = 0, ret = 0;
3930
3931         page_cache_ra_unbounded(&ractl, len, 0);
3932
3933         for (i = 0; i < len; i++, page_idx++) {
3934                 page = read_cache_page(mapping, page_idx, NULL, NULL);
3935                 if (IS_ERR(page)) {
3936                         ret = PTR_ERR(page);
3937                         break;
3938                 }
3939                 page_len++;
3940         }
3941
3942         for (i = 0; i < page_len; i++, redirty_idx++) {
3943                 page = find_lock_page(mapping, redirty_idx);
3944                 if (!page) {
3945                         ret = -ENOMEM;
3946                         break;
3947                 }
3948                 set_page_dirty(page);
3949                 f2fs_put_page(page, 1);
3950                 f2fs_put_page(page, 0);
3951         }
3952
3953         return ret;
3954 }
3955
3956 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3957 {
3958         struct inode *inode = file_inode(filp);
3959         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3960         struct f2fs_inode_info *fi = F2FS_I(inode);
3961         pgoff_t page_idx = 0, last_idx;
3962         unsigned int blk_per_seg = sbi->blocks_per_seg;
3963         int cluster_size = F2FS_I(inode)->i_cluster_size;
3964         int count, ret;
3965
3966         if (!f2fs_sb_has_compression(sbi) ||
3967                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3968                 return -EOPNOTSUPP;
3969
3970         if (!(filp->f_mode & FMODE_WRITE))
3971                 return -EBADF;
3972
3973         if (!f2fs_compressed_file(inode))
3974                 return -EINVAL;
3975
3976         f2fs_balance_fs(F2FS_I_SB(inode), true);
3977
3978         file_start_write(filp);
3979         inode_lock(inode);
3980
3981         if (!f2fs_is_compress_backend_ready(inode)) {
3982                 ret = -EOPNOTSUPP;
3983                 goto out;
3984         }
3985
3986         if (f2fs_is_mmap_file(inode)) {
3987                 ret = -EBUSY;
3988                 goto out;
3989         }
3990
3991         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3992         if (ret)
3993                 goto out;
3994
3995         if (!atomic_read(&fi->i_compr_blocks))
3996                 goto out;
3997
3998         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3999
4000         count = last_idx - page_idx;
4001         while (count) {
4002                 int len = min(cluster_size, count);
4003
4004                 ret = redirty_blocks(inode, page_idx, len);
4005                 if (ret < 0)
4006                         break;
4007
4008                 if (get_dirty_pages(inode) >= blk_per_seg)
4009                         filemap_fdatawrite(inode->i_mapping);
4010
4011                 count -= len;
4012                 page_idx += len;
4013         }
4014
4015         if (!ret)
4016                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4017                                                         LLONG_MAX);
4018
4019         if (ret)
4020                 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4021                           __func__, ret);
4022 out:
4023         inode_unlock(inode);
4024         file_end_write(filp);
4025
4026         return ret;
4027 }
4028
4029 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4030 {
4031         struct inode *inode = file_inode(filp);
4032         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4033         pgoff_t page_idx = 0, last_idx;
4034         unsigned int blk_per_seg = sbi->blocks_per_seg;
4035         int cluster_size = F2FS_I(inode)->i_cluster_size;
4036         int count, ret;
4037
4038         if (!f2fs_sb_has_compression(sbi) ||
4039                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4040                 return -EOPNOTSUPP;
4041
4042         if (!(filp->f_mode & FMODE_WRITE))
4043                 return -EBADF;
4044
4045         if (!f2fs_compressed_file(inode))
4046                 return -EINVAL;
4047
4048         f2fs_balance_fs(F2FS_I_SB(inode), true);
4049
4050         file_start_write(filp);
4051         inode_lock(inode);
4052
4053         if (!f2fs_is_compress_backend_ready(inode)) {
4054                 ret = -EOPNOTSUPP;
4055                 goto out;
4056         }
4057
4058         if (f2fs_is_mmap_file(inode)) {
4059                 ret = -EBUSY;
4060                 goto out;
4061         }
4062
4063         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4064         if (ret)
4065                 goto out;
4066
4067         set_inode_flag(inode, FI_ENABLE_COMPRESS);
4068
4069         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4070
4071         count = last_idx - page_idx;
4072         while (count) {
4073                 int len = min(cluster_size, count);
4074
4075                 ret = redirty_blocks(inode, page_idx, len);
4076                 if (ret < 0)
4077                         break;
4078
4079                 if (get_dirty_pages(inode) >= blk_per_seg)
4080                         filemap_fdatawrite(inode->i_mapping);
4081
4082                 count -= len;
4083                 page_idx += len;
4084         }
4085
4086         if (!ret)
4087                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4088                                                         LLONG_MAX);
4089
4090         clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4091
4092         if (ret)
4093                 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4094                           __func__, ret);
4095 out:
4096         inode_unlock(inode);
4097         file_end_write(filp);
4098
4099         return ret;
4100 }
4101
4102 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4103 {
4104         switch (cmd) {
4105         case FS_IOC_GETVERSION:
4106                 return f2fs_ioc_getversion(filp, arg);
4107         case F2FS_IOC_START_ATOMIC_WRITE:
4108                 return f2fs_ioc_start_atomic_write(filp);
4109         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4110                 return f2fs_ioc_commit_atomic_write(filp);
4111         case F2FS_IOC_START_VOLATILE_WRITE:
4112                 return f2fs_ioc_start_volatile_write(filp);
4113         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4114                 return f2fs_ioc_release_volatile_write(filp);
4115         case F2FS_IOC_ABORT_VOLATILE_WRITE:
4116                 return f2fs_ioc_abort_volatile_write(filp);
4117         case F2FS_IOC_SHUTDOWN:
4118                 return f2fs_ioc_shutdown(filp, arg);
4119         case FITRIM:
4120                 return f2fs_ioc_fitrim(filp, arg);
4121         case FS_IOC_SET_ENCRYPTION_POLICY:
4122                 return f2fs_ioc_set_encryption_policy(filp, arg);
4123         case FS_IOC_GET_ENCRYPTION_POLICY:
4124                 return f2fs_ioc_get_encryption_policy(filp, arg);
4125         case FS_IOC_GET_ENCRYPTION_PWSALT:
4126                 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4127         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4128                 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4129         case FS_IOC_ADD_ENCRYPTION_KEY:
4130                 return f2fs_ioc_add_encryption_key(filp, arg);
4131         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4132                 return f2fs_ioc_remove_encryption_key(filp, arg);
4133         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4134                 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4135         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4136                 return f2fs_ioc_get_encryption_key_status(filp, arg);
4137         case FS_IOC_GET_ENCRYPTION_NONCE:
4138                 return f2fs_ioc_get_encryption_nonce(filp, arg);
4139         case F2FS_IOC_GARBAGE_COLLECT:
4140                 return f2fs_ioc_gc(filp, arg);
4141         case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4142                 return f2fs_ioc_gc_range(filp, arg);
4143         case F2FS_IOC_WRITE_CHECKPOINT:
4144                 return f2fs_ioc_write_checkpoint(filp, arg);
4145         case F2FS_IOC_DEFRAGMENT:
4146                 return f2fs_ioc_defragment(filp, arg);
4147         case F2FS_IOC_MOVE_RANGE:
4148                 return f2fs_ioc_move_range(filp, arg);
4149         case F2FS_IOC_FLUSH_DEVICE:
4150                 return f2fs_ioc_flush_device(filp, arg);
4151         case F2FS_IOC_GET_FEATURES:
4152                 return f2fs_ioc_get_features(filp, arg);
4153         case F2FS_IOC_GET_PIN_FILE:
4154                 return f2fs_ioc_get_pin_file(filp, arg);
4155         case F2FS_IOC_SET_PIN_FILE:
4156                 return f2fs_ioc_set_pin_file(filp, arg);
4157         case F2FS_IOC_PRECACHE_EXTENTS:
4158                 return f2fs_ioc_precache_extents(filp, arg);
4159         case F2FS_IOC_RESIZE_FS:
4160                 return f2fs_ioc_resize_fs(filp, arg);
4161         case FS_IOC_ENABLE_VERITY:
4162                 return f2fs_ioc_enable_verity(filp, arg);
4163         case FS_IOC_MEASURE_VERITY:
4164                 return f2fs_ioc_measure_verity(filp, arg);
4165         case FS_IOC_READ_VERITY_METADATA:
4166                 return f2fs_ioc_read_verity_metadata(filp, arg);
4167         case FS_IOC_GETFSLABEL:
4168                 return f2fs_ioc_getfslabel(filp, arg);
4169         case FS_IOC_SETFSLABEL:
4170                 return f2fs_ioc_setfslabel(filp, arg);
4171         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4172                 return f2fs_get_compress_blocks(filp, arg);
4173         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4174                 return f2fs_release_compress_blocks(filp, arg);
4175         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4176                 return f2fs_reserve_compress_blocks(filp, arg);
4177         case F2FS_IOC_SEC_TRIM_FILE:
4178                 return f2fs_sec_trim_file(filp, arg);
4179         case F2FS_IOC_GET_COMPRESS_OPTION:
4180                 return f2fs_ioc_get_compress_option(filp, arg);
4181         case F2FS_IOC_SET_COMPRESS_OPTION:
4182                 return f2fs_ioc_set_compress_option(filp, arg);
4183         case F2FS_IOC_DECOMPRESS_FILE:
4184                 return f2fs_ioc_decompress_file(filp, arg);
4185         case F2FS_IOC_COMPRESS_FILE:
4186                 return f2fs_ioc_compress_file(filp, arg);
4187         default:
4188                 return -ENOTTY;
4189         }
4190 }
4191
4192 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4193 {
4194         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4195                 return -EIO;
4196         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4197                 return -ENOSPC;
4198
4199         return __f2fs_ioctl(filp, cmd, arg);
4200 }
4201
4202 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4203 {
4204         struct file *file = iocb->ki_filp;
4205         struct inode *inode = file_inode(file);
4206         int ret;
4207
4208         if (!f2fs_is_compress_backend_ready(inode))
4209                 return -EOPNOTSUPP;
4210
4211         ret = generic_file_read_iter(iocb, iter);
4212
4213         if (ret > 0)
4214                 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4215
4216         return ret;
4217 }
4218
4219 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4220 {
4221         struct file *file = iocb->ki_filp;
4222         struct inode *inode = file_inode(file);
4223         ssize_t ret;
4224
4225         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4226                 ret = -EIO;
4227                 goto out;
4228         }
4229
4230         if (!f2fs_is_compress_backend_ready(inode)) {
4231                 ret = -EOPNOTSUPP;
4232                 goto out;
4233         }
4234
4235         if (iocb->ki_flags & IOCB_NOWAIT) {
4236                 if (!inode_trylock(inode)) {
4237                         ret = -EAGAIN;
4238                         goto out;
4239                 }
4240         } else {
4241                 inode_lock(inode);
4242         }
4243
4244         if (unlikely(IS_IMMUTABLE(inode))) {
4245                 ret = -EPERM;
4246                 goto unlock;
4247         }
4248
4249         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4250                 ret = -EPERM;
4251                 goto unlock;
4252         }
4253
4254         ret = generic_write_checks(iocb, from);
4255         if (ret > 0) {
4256                 bool preallocated = false;
4257                 size_t target_size = 0;
4258                 int err;
4259
4260                 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4261                         set_inode_flag(inode, FI_NO_PREALLOC);
4262
4263                 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4264                         if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4265                                                 iov_iter_count(from)) ||
4266                                 f2fs_has_inline_data(inode) ||
4267                                 f2fs_force_buffered_io(inode, iocb, from)) {
4268                                 clear_inode_flag(inode, FI_NO_PREALLOC);
4269                                 inode_unlock(inode);
4270                                 ret = -EAGAIN;
4271                                 goto out;
4272                         }
4273                         goto write;
4274                 }
4275
4276                 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4277                         goto write;
4278
4279                 if (iocb->ki_flags & IOCB_DIRECT) {
4280                         /*
4281                          * Convert inline data for Direct I/O before entering
4282                          * f2fs_direct_IO().
4283                          */
4284                         err = f2fs_convert_inline_inode(inode);
4285                         if (err)
4286                                 goto out_err;
4287                         /*
4288                          * If force_buffere_io() is true, we have to allocate
4289                          * blocks all the time, since f2fs_direct_IO will fall
4290                          * back to buffered IO.
4291                          */
4292                         if (!f2fs_force_buffered_io(inode, iocb, from) &&
4293                                         allow_outplace_dio(inode, iocb, from))
4294                                 goto write;
4295                 }
4296                 preallocated = true;
4297                 target_size = iocb->ki_pos + iov_iter_count(from);
4298
4299                 err = f2fs_preallocate_blocks(iocb, from);
4300                 if (err) {
4301 out_err:
4302                         clear_inode_flag(inode, FI_NO_PREALLOC);
4303                         inode_unlock(inode);
4304                         ret = err;
4305                         goto out;
4306                 }
4307 write:
4308                 ret = __generic_file_write_iter(iocb, from);
4309                 clear_inode_flag(inode, FI_NO_PREALLOC);
4310
4311                 /* if we couldn't write data, we should deallocate blocks. */
4312                 if (preallocated && i_size_read(inode) < target_size) {
4313                         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4314                         filemap_invalidate_lock(inode->i_mapping);
4315                         f2fs_truncate(inode);
4316                         filemap_invalidate_unlock(inode->i_mapping);
4317                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4318                 }
4319
4320                 if (ret > 0)
4321                         f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4322         }
4323 unlock:
4324         inode_unlock(inode);
4325 out:
4326         trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4327                                         iov_iter_count(from), ret);
4328         if (ret > 0)
4329                 ret = generic_write_sync(iocb, ret);
4330         return ret;
4331 }
4332
4333 #ifdef CONFIG_COMPAT
4334 struct compat_f2fs_gc_range {
4335         u32 sync;
4336         compat_u64 start;
4337         compat_u64 len;
4338 };
4339 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE        _IOW(F2FS_IOCTL_MAGIC, 11,\
4340                                                 struct compat_f2fs_gc_range)
4341
4342 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4343 {
4344         struct compat_f2fs_gc_range __user *urange;
4345         struct f2fs_gc_range range;
4346         int err;
4347
4348         urange = compat_ptr(arg);
4349         err = get_user(range.sync, &urange->sync);
4350         err |= get_user(range.start, &urange->start);
4351         err |= get_user(range.len, &urange->len);
4352         if (err)
4353                 return -EFAULT;
4354
4355         return __f2fs_ioc_gc_range(file, &range);
4356 }
4357
4358 struct compat_f2fs_move_range {
4359         u32 dst_fd;
4360         compat_u64 pos_in;
4361         compat_u64 pos_out;
4362         compat_u64 len;
4363 };
4364 #define F2FS_IOC32_MOVE_RANGE           _IOWR(F2FS_IOCTL_MAGIC, 9,      \
4365                                         struct compat_f2fs_move_range)
4366
4367 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4368 {
4369         struct compat_f2fs_move_range __user *urange;
4370         struct f2fs_move_range range;
4371         int err;
4372
4373         urange = compat_ptr(arg);
4374         err = get_user(range.dst_fd, &urange->dst_fd);
4375         err |= get_user(range.pos_in, &urange->pos_in);
4376         err |= get_user(range.pos_out, &urange->pos_out);
4377         err |= get_user(range.len, &urange->len);
4378         if (err)
4379                 return -EFAULT;
4380
4381         return __f2fs_ioc_move_range(file, &range);
4382 }
4383
4384 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4385 {
4386         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4387                 return -EIO;
4388         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4389                 return -ENOSPC;
4390
4391         switch (cmd) {
4392         case FS_IOC32_GETVERSION:
4393                 cmd = FS_IOC_GETVERSION;
4394                 break;
4395         case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4396                 return f2fs_compat_ioc_gc_range(file, arg);
4397         case F2FS_IOC32_MOVE_RANGE:
4398                 return f2fs_compat_ioc_move_range(file, arg);
4399         case F2FS_IOC_START_ATOMIC_WRITE:
4400         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4401         case F2FS_IOC_START_VOLATILE_WRITE:
4402         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4403         case F2FS_IOC_ABORT_VOLATILE_WRITE:
4404         case F2FS_IOC_SHUTDOWN:
4405         case FITRIM:
4406         case FS_IOC_SET_ENCRYPTION_POLICY:
4407         case FS_IOC_GET_ENCRYPTION_PWSALT:
4408         case FS_IOC_GET_ENCRYPTION_POLICY:
4409         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4410         case FS_IOC_ADD_ENCRYPTION_KEY:
4411         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4412         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4413         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4414         case FS_IOC_GET_ENCRYPTION_NONCE:
4415         case F2FS_IOC_GARBAGE_COLLECT:
4416         case F2FS_IOC_WRITE_CHECKPOINT:
4417         case F2FS_IOC_DEFRAGMENT:
4418         case F2FS_IOC_FLUSH_DEVICE:
4419         case F2FS_IOC_GET_FEATURES:
4420         case F2FS_IOC_GET_PIN_FILE:
4421         case F2FS_IOC_SET_PIN_FILE:
4422         case F2FS_IOC_PRECACHE_EXTENTS:
4423         case F2FS_IOC_RESIZE_FS:
4424         case FS_IOC_ENABLE_VERITY:
4425         case FS_IOC_MEASURE_VERITY:
4426         case FS_IOC_READ_VERITY_METADATA:
4427         case FS_IOC_GETFSLABEL:
4428         case FS_IOC_SETFSLABEL:
4429         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4430         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4431         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4432         case F2FS_IOC_SEC_TRIM_FILE:
4433         case F2FS_IOC_GET_COMPRESS_OPTION:
4434         case F2FS_IOC_SET_COMPRESS_OPTION:
4435         case F2FS_IOC_DECOMPRESS_FILE:
4436         case F2FS_IOC_COMPRESS_FILE:
4437                 break;
4438         default:
4439                 return -ENOIOCTLCMD;
4440         }
4441         return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4442 }
4443 #endif
4444
4445 const struct file_operations f2fs_file_operations = {
4446         .llseek         = f2fs_llseek,
4447         .read_iter      = f2fs_file_read_iter,
4448         .write_iter     = f2fs_file_write_iter,
4449         .open           = f2fs_file_open,
4450         .release        = f2fs_release_file,
4451         .mmap           = f2fs_file_mmap,
4452         .flush          = f2fs_file_flush,
4453         .fsync          = f2fs_sync_file,
4454         .fallocate      = f2fs_fallocate,
4455         .unlocked_ioctl = f2fs_ioctl,
4456 #ifdef CONFIG_COMPAT
4457         .compat_ioctl   = f2fs_compat_ioctl,
4458 #endif
4459         .splice_read    = generic_file_splice_read,
4460         .splice_write   = iter_file_splice_write,
4461 };