Merge tag 'modules-for-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
[linux-2.6-microblaze.git] / fs / f2fs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25
26 #include "f2fs.h"
27 #include "node.h"
28 #include "segment.h"
29 #include "xattr.h"
30 #include "acl.h"
31 #include "gc.h"
32 #include <trace/events/f2fs.h>
33 #include <uapi/linux/f2fs.h>
34
35 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
36 {
37         struct inode *inode = file_inode(vmf->vma->vm_file);
38         vm_fault_t ret;
39
40         down_read(&F2FS_I(inode)->i_mmap_sem);
41         ret = filemap_fault(vmf);
42         up_read(&F2FS_I(inode)->i_mmap_sem);
43
44         if (!ret)
45                 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
46                                                         F2FS_BLKSIZE);
47
48         trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
49
50         return ret;
51 }
52
53 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
54 {
55         struct page *page = vmf->page;
56         struct inode *inode = file_inode(vmf->vma->vm_file);
57         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58         struct dnode_of_data dn;
59         bool need_alloc = true;
60         int err = 0;
61
62         if (unlikely(IS_IMMUTABLE(inode)))
63                 return VM_FAULT_SIGBUS;
64
65         if (unlikely(f2fs_cp_error(sbi))) {
66                 err = -EIO;
67                 goto err;
68         }
69
70         if (!f2fs_is_checkpoint_ready(sbi)) {
71                 err = -ENOSPC;
72                 goto err;
73         }
74
75         err = f2fs_convert_inline_inode(inode);
76         if (err)
77                 goto err;
78
79 #ifdef CONFIG_F2FS_FS_COMPRESSION
80         if (f2fs_compressed_file(inode)) {
81                 int ret = f2fs_is_compressed_cluster(inode, page->index);
82
83                 if (ret < 0) {
84                         err = ret;
85                         goto err;
86                 } else if (ret) {
87                         if (ret < F2FS_I(inode)->i_cluster_size) {
88                                 err = -EAGAIN;
89                                 goto err;
90                         }
91                         need_alloc = false;
92                 }
93         }
94 #endif
95         /* should do out of any locked page */
96         if (need_alloc)
97                 f2fs_balance_fs(sbi, true);
98
99         sb_start_pagefault(inode->i_sb);
100
101         f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
102
103         file_update_time(vmf->vma->vm_file);
104         down_read(&F2FS_I(inode)->i_mmap_sem);
105         lock_page(page);
106         if (unlikely(page->mapping != inode->i_mapping ||
107                         page_offset(page) > i_size_read(inode) ||
108                         !PageUptodate(page))) {
109                 unlock_page(page);
110                 err = -EFAULT;
111                 goto out_sem;
112         }
113
114         if (need_alloc) {
115                 /* block allocation */
116                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
117                 set_new_dnode(&dn, inode, NULL, NULL, 0);
118                 err = f2fs_get_block(&dn, page->index);
119                 f2fs_put_dnode(&dn);
120                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
121         }
122
123 #ifdef CONFIG_F2FS_FS_COMPRESSION
124         if (!need_alloc) {
125                 set_new_dnode(&dn, inode, NULL, NULL, 0);
126                 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
127                 f2fs_put_dnode(&dn);
128         }
129 #endif
130         if (err) {
131                 unlock_page(page);
132                 goto out_sem;
133         }
134
135         f2fs_wait_on_page_writeback(page, DATA, false, true);
136
137         /* wait for GCed page writeback via META_MAPPING */
138         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
139
140         /*
141          * check to see if the page is mapped already (no holes)
142          */
143         if (PageMappedToDisk(page))
144                 goto out_sem;
145
146         /* page is wholly or partially inside EOF */
147         if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
148                                                 i_size_read(inode)) {
149                 loff_t offset;
150
151                 offset = i_size_read(inode) & ~PAGE_MASK;
152                 zero_user_segment(page, offset, PAGE_SIZE);
153         }
154         set_page_dirty(page);
155         if (!PageUptodate(page))
156                 SetPageUptodate(page);
157
158         f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
159         f2fs_update_time(sbi, REQ_TIME);
160
161         trace_f2fs_vm_page_mkwrite(page, DATA);
162 out_sem:
163         up_read(&F2FS_I(inode)->i_mmap_sem);
164
165         sb_end_pagefault(inode->i_sb);
166 err:
167         return block_page_mkwrite_return(err);
168 }
169
170 static const struct vm_operations_struct f2fs_file_vm_ops = {
171         .fault          = f2fs_filemap_fault,
172         .map_pages      = filemap_map_pages,
173         .page_mkwrite   = f2fs_vm_page_mkwrite,
174 };
175
176 static int get_parent_ino(struct inode *inode, nid_t *pino)
177 {
178         struct dentry *dentry;
179
180         /*
181          * Make sure to get the non-deleted alias.  The alias associated with
182          * the open file descriptor being fsync()'ed may be deleted already.
183          */
184         dentry = d_find_alias(inode);
185         if (!dentry)
186                 return 0;
187
188         *pino = parent_ino(dentry);
189         dput(dentry);
190         return 1;
191 }
192
193 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
194 {
195         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
196         enum cp_reason_type cp_reason = CP_NO_NEEDED;
197
198         if (!S_ISREG(inode->i_mode))
199                 cp_reason = CP_NON_REGULAR;
200         else if (f2fs_compressed_file(inode))
201                 cp_reason = CP_COMPRESSED;
202         else if (inode->i_nlink != 1)
203                 cp_reason = CP_HARDLINK;
204         else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
205                 cp_reason = CP_SB_NEED_CP;
206         else if (file_wrong_pino(inode))
207                 cp_reason = CP_WRONG_PINO;
208         else if (!f2fs_space_for_roll_forward(sbi))
209                 cp_reason = CP_NO_SPC_ROLL;
210         else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
211                 cp_reason = CP_NODE_NEED_CP;
212         else if (test_opt(sbi, FASTBOOT))
213                 cp_reason = CP_FASTBOOT_MODE;
214         else if (F2FS_OPTION(sbi).active_logs == 2)
215                 cp_reason = CP_SPEC_LOG_NUM;
216         else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
217                 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
218                 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
219                                                         TRANS_DIR_INO))
220                 cp_reason = CP_RECOVER_DIR;
221
222         return cp_reason;
223 }
224
225 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
226 {
227         struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
228         bool ret = false;
229         /* But we need to avoid that there are some inode updates */
230         if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
231                 ret = true;
232         f2fs_put_page(i, 0);
233         return ret;
234 }
235
236 static void try_to_fix_pino(struct inode *inode)
237 {
238         struct f2fs_inode_info *fi = F2FS_I(inode);
239         nid_t pino;
240
241         down_write(&fi->i_sem);
242         if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
243                         get_parent_ino(inode, &pino)) {
244                 f2fs_i_pino_write(inode, pino);
245                 file_got_pino(inode);
246         }
247         up_write(&fi->i_sem);
248 }
249
250 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
251                                                 int datasync, bool atomic)
252 {
253         struct inode *inode = file->f_mapping->host;
254         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
255         nid_t ino = inode->i_ino;
256         int ret = 0;
257         enum cp_reason_type cp_reason = 0;
258         struct writeback_control wbc = {
259                 .sync_mode = WB_SYNC_ALL,
260                 .nr_to_write = LONG_MAX,
261                 .for_reclaim = 0,
262         };
263         unsigned int seq_id = 0;
264
265         if (unlikely(f2fs_readonly(inode->i_sb) ||
266                                 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
267                 return 0;
268
269         trace_f2fs_sync_file_enter(inode);
270
271         if (S_ISDIR(inode->i_mode))
272                 goto go_write;
273
274         /* if fdatasync is triggered, let's do in-place-update */
275         if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
276                 set_inode_flag(inode, FI_NEED_IPU);
277         ret = file_write_and_wait_range(file, start, end);
278         clear_inode_flag(inode, FI_NEED_IPU);
279
280         if (ret) {
281                 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
282                 return ret;
283         }
284
285         /* if the inode is dirty, let's recover all the time */
286         if (!f2fs_skip_inode_update(inode, datasync)) {
287                 f2fs_write_inode(inode, NULL);
288                 goto go_write;
289         }
290
291         /*
292          * if there is no written data, don't waste time to write recovery info.
293          */
294         if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
295                         !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
296
297                 /* it may call write_inode just prior to fsync */
298                 if (need_inode_page_update(sbi, ino))
299                         goto go_write;
300
301                 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
302                                 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
303                         goto flush_out;
304                 goto out;
305         }
306 go_write:
307         /*
308          * Both of fdatasync() and fsync() are able to be recovered from
309          * sudden-power-off.
310          */
311         down_read(&F2FS_I(inode)->i_sem);
312         cp_reason = need_do_checkpoint(inode);
313         up_read(&F2FS_I(inode)->i_sem);
314
315         if (cp_reason) {
316                 /* all the dirty node pages should be flushed for POR */
317                 ret = f2fs_sync_fs(inode->i_sb, 1);
318
319                 /*
320                  * We've secured consistency through sync_fs. Following pino
321                  * will be used only for fsynced inodes after checkpoint.
322                  */
323                 try_to_fix_pino(inode);
324                 clear_inode_flag(inode, FI_APPEND_WRITE);
325                 clear_inode_flag(inode, FI_UPDATE_WRITE);
326                 goto out;
327         }
328 sync_nodes:
329         atomic_inc(&sbi->wb_sync_req[NODE]);
330         ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
331         atomic_dec(&sbi->wb_sync_req[NODE]);
332         if (ret)
333                 goto out;
334
335         /* if cp_error was enabled, we should avoid infinite loop */
336         if (unlikely(f2fs_cp_error(sbi))) {
337                 ret = -EIO;
338                 goto out;
339         }
340
341         if (f2fs_need_inode_block_update(sbi, ino)) {
342                 f2fs_mark_inode_dirty_sync(inode, true);
343                 f2fs_write_inode(inode, NULL);
344                 goto sync_nodes;
345         }
346
347         /*
348          * If it's atomic_write, it's just fine to keep write ordering. So
349          * here we don't need to wait for node write completion, since we use
350          * node chain which serializes node blocks. If one of node writes are
351          * reordered, we can see simply broken chain, resulting in stopping
352          * roll-forward recovery. It means we'll recover all or none node blocks
353          * given fsync mark.
354          */
355         if (!atomic) {
356                 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
357                 if (ret)
358                         goto out;
359         }
360
361         /* once recovery info is written, don't need to tack this */
362         f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
363         clear_inode_flag(inode, FI_APPEND_WRITE);
364 flush_out:
365         if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
366                 ret = f2fs_issue_flush(sbi, inode->i_ino);
367         if (!ret) {
368                 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
369                 clear_inode_flag(inode, FI_UPDATE_WRITE);
370                 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
371         }
372         f2fs_update_time(sbi, REQ_TIME);
373 out:
374         trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
375         return ret;
376 }
377
378 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
379 {
380         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
381                 return -EIO;
382         return f2fs_do_sync_file(file, start, end, datasync, false);
383 }
384
385 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
386                                 pgoff_t index, int whence)
387 {
388         switch (whence) {
389         case SEEK_DATA:
390                 if (__is_valid_data_blkaddr(blkaddr))
391                         return true;
392                 if (blkaddr == NEW_ADDR &&
393                     xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
394                         return true;
395                 break;
396         case SEEK_HOLE:
397                 if (blkaddr == NULL_ADDR)
398                         return true;
399                 break;
400         }
401         return false;
402 }
403
404 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
405 {
406         struct inode *inode = file->f_mapping->host;
407         loff_t maxbytes = inode->i_sb->s_maxbytes;
408         struct dnode_of_data dn;
409         pgoff_t pgofs, end_offset;
410         loff_t data_ofs = offset;
411         loff_t isize;
412         int err = 0;
413
414         inode_lock(inode);
415
416         isize = i_size_read(inode);
417         if (offset >= isize)
418                 goto fail;
419
420         /* handle inline data case */
421         if (f2fs_has_inline_data(inode)) {
422                 if (whence == SEEK_HOLE) {
423                         data_ofs = isize;
424                         goto found;
425                 } else if (whence == SEEK_DATA) {
426                         data_ofs = offset;
427                         goto found;
428                 }
429         }
430
431         pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
432
433         for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
434                 set_new_dnode(&dn, inode, NULL, NULL, 0);
435                 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
436                 if (err && err != -ENOENT) {
437                         goto fail;
438                 } else if (err == -ENOENT) {
439                         /* direct node does not exists */
440                         if (whence == SEEK_DATA) {
441                                 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
442                                 continue;
443                         } else {
444                                 goto found;
445                         }
446                 }
447
448                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
449
450                 /* find data/hole in dnode block */
451                 for (; dn.ofs_in_node < end_offset;
452                                 dn.ofs_in_node++, pgofs++,
453                                 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
454                         block_t blkaddr;
455
456                         blkaddr = f2fs_data_blkaddr(&dn);
457
458                         if (__is_valid_data_blkaddr(blkaddr) &&
459                                 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
460                                         blkaddr, DATA_GENERIC_ENHANCE)) {
461                                 f2fs_put_dnode(&dn);
462                                 goto fail;
463                         }
464
465                         if (__found_offset(file->f_mapping, blkaddr,
466                                                         pgofs, whence)) {
467                                 f2fs_put_dnode(&dn);
468                                 goto found;
469                         }
470                 }
471                 f2fs_put_dnode(&dn);
472         }
473
474         if (whence == SEEK_DATA)
475                 goto fail;
476 found:
477         if (whence == SEEK_HOLE && data_ofs > isize)
478                 data_ofs = isize;
479         inode_unlock(inode);
480         return vfs_setpos(file, data_ofs, maxbytes);
481 fail:
482         inode_unlock(inode);
483         return -ENXIO;
484 }
485
486 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
487 {
488         struct inode *inode = file->f_mapping->host;
489         loff_t maxbytes = inode->i_sb->s_maxbytes;
490
491         if (f2fs_compressed_file(inode))
492                 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
493
494         switch (whence) {
495         case SEEK_SET:
496         case SEEK_CUR:
497         case SEEK_END:
498                 return generic_file_llseek_size(file, offset, whence,
499                                                 maxbytes, i_size_read(inode));
500         case SEEK_DATA:
501         case SEEK_HOLE:
502                 if (offset < 0)
503                         return -ENXIO;
504                 return f2fs_seek_block(file, offset, whence);
505         }
506
507         return -EINVAL;
508 }
509
510 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
511 {
512         struct inode *inode = file_inode(file);
513
514         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
515                 return -EIO;
516
517         if (!f2fs_is_compress_backend_ready(inode))
518                 return -EOPNOTSUPP;
519
520         file_accessed(file);
521         vma->vm_ops = &f2fs_file_vm_ops;
522         set_inode_flag(inode, FI_MMAP_FILE);
523         return 0;
524 }
525
526 static int f2fs_file_open(struct inode *inode, struct file *filp)
527 {
528         int err = fscrypt_file_open(inode, filp);
529
530         if (err)
531                 return err;
532
533         if (!f2fs_is_compress_backend_ready(inode))
534                 return -EOPNOTSUPP;
535
536         err = fsverity_file_open(inode, filp);
537         if (err)
538                 return err;
539
540         filp->f_mode |= FMODE_NOWAIT;
541
542         return dquot_file_open(inode, filp);
543 }
544
545 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
546 {
547         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
548         struct f2fs_node *raw_node;
549         int nr_free = 0, ofs = dn->ofs_in_node, len = count;
550         __le32 *addr;
551         int base = 0;
552         bool compressed_cluster = false;
553         int cluster_index = 0, valid_blocks = 0;
554         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555         bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
556
557         if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
558                 base = get_extra_isize(dn->inode);
559
560         raw_node = F2FS_NODE(dn->node_page);
561         addr = blkaddr_in_node(raw_node) + base + ofs;
562
563         /* Assumption: truncateion starts with cluster */
564         for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
565                 block_t blkaddr = le32_to_cpu(*addr);
566
567                 if (f2fs_compressed_file(dn->inode) &&
568                                         !(cluster_index & (cluster_size - 1))) {
569                         if (compressed_cluster)
570                                 f2fs_i_compr_blocks_update(dn->inode,
571                                                         valid_blocks, false);
572                         compressed_cluster = (blkaddr == COMPRESS_ADDR);
573                         valid_blocks = 0;
574                 }
575
576                 if (blkaddr == NULL_ADDR)
577                         continue;
578
579                 dn->data_blkaddr = NULL_ADDR;
580                 f2fs_set_data_blkaddr(dn);
581
582                 if (__is_valid_data_blkaddr(blkaddr)) {
583                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
584                                         DATA_GENERIC_ENHANCE))
585                                 continue;
586                         if (compressed_cluster)
587                                 valid_blocks++;
588                 }
589
590                 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
591                         clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
592
593                 f2fs_invalidate_blocks(sbi, blkaddr);
594
595                 if (!released || blkaddr != COMPRESS_ADDR)
596                         nr_free++;
597         }
598
599         if (compressed_cluster)
600                 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
601
602         if (nr_free) {
603                 pgoff_t fofs;
604                 /*
605                  * once we invalidate valid blkaddr in range [ofs, ofs + count],
606                  * we will invalidate all blkaddr in the whole range.
607                  */
608                 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
609                                                         dn->inode) + ofs;
610                 f2fs_update_extent_cache_range(dn, fofs, 0, len);
611                 dec_valid_block_count(sbi, dn->inode, nr_free);
612         }
613         dn->ofs_in_node = ofs;
614
615         f2fs_update_time(sbi, REQ_TIME);
616         trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
617                                          dn->ofs_in_node, nr_free);
618 }
619
620 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
621 {
622         f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
623 }
624
625 static int truncate_partial_data_page(struct inode *inode, u64 from,
626                                                                 bool cache_only)
627 {
628         loff_t offset = from & (PAGE_SIZE - 1);
629         pgoff_t index = from >> PAGE_SHIFT;
630         struct address_space *mapping = inode->i_mapping;
631         struct page *page;
632
633         if (!offset && !cache_only)
634                 return 0;
635
636         if (cache_only) {
637                 page = find_lock_page(mapping, index);
638                 if (page && PageUptodate(page))
639                         goto truncate_out;
640                 f2fs_put_page(page, 1);
641                 return 0;
642         }
643
644         page = f2fs_get_lock_data_page(inode, index, true);
645         if (IS_ERR(page))
646                 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
647 truncate_out:
648         f2fs_wait_on_page_writeback(page, DATA, true, true);
649         zero_user(page, offset, PAGE_SIZE - offset);
650
651         /* An encrypted inode should have a key and truncate the last page. */
652         f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
653         if (!cache_only)
654                 set_page_dirty(page);
655         f2fs_put_page(page, 1);
656         return 0;
657 }
658
659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
660 {
661         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
662         struct dnode_of_data dn;
663         pgoff_t free_from;
664         int count = 0, err = 0;
665         struct page *ipage;
666         bool truncate_page = false;
667
668         trace_f2fs_truncate_blocks_enter(inode, from);
669
670         free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
671
672         if (free_from >= max_file_blocks(inode))
673                 goto free_partial;
674
675         if (lock)
676                 f2fs_lock_op(sbi);
677
678         ipage = f2fs_get_node_page(sbi, inode->i_ino);
679         if (IS_ERR(ipage)) {
680                 err = PTR_ERR(ipage);
681                 goto out;
682         }
683
684         if (f2fs_has_inline_data(inode)) {
685                 f2fs_truncate_inline_inode(inode, ipage, from);
686                 f2fs_put_page(ipage, 1);
687                 truncate_page = true;
688                 goto out;
689         }
690
691         set_new_dnode(&dn, inode, ipage, NULL, 0);
692         err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
693         if (err) {
694                 if (err == -ENOENT)
695                         goto free_next;
696                 goto out;
697         }
698
699         count = ADDRS_PER_PAGE(dn.node_page, inode);
700
701         count -= dn.ofs_in_node;
702         f2fs_bug_on(sbi, count < 0);
703
704         if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
705                 f2fs_truncate_data_blocks_range(&dn, count);
706                 free_from += count;
707         }
708
709         f2fs_put_dnode(&dn);
710 free_next:
711         err = f2fs_truncate_inode_blocks(inode, free_from);
712 out:
713         if (lock)
714                 f2fs_unlock_op(sbi);
715 free_partial:
716         /* lastly zero out the first data page */
717         if (!err)
718                 err = truncate_partial_data_page(inode, from, truncate_page);
719
720         trace_f2fs_truncate_blocks_exit(inode, err);
721         return err;
722 }
723
724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
725 {
726         u64 free_from = from;
727         int err;
728
729 #ifdef CONFIG_F2FS_FS_COMPRESSION
730         /*
731          * for compressed file, only support cluster size
732          * aligned truncation.
733          */
734         if (f2fs_compressed_file(inode))
735                 free_from = round_up(from,
736                                 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
737 #endif
738
739         err = f2fs_do_truncate_blocks(inode, free_from, lock);
740         if (err)
741                 return err;
742
743 #ifdef CONFIG_F2FS_FS_COMPRESSION
744         if (from != free_from) {
745                 err = f2fs_truncate_partial_cluster(inode, from, lock);
746                 if (err)
747                         return err;
748         }
749 #endif
750
751         return 0;
752 }
753
754 int f2fs_truncate(struct inode *inode)
755 {
756         int err;
757
758         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
759                 return -EIO;
760
761         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
762                                 S_ISLNK(inode->i_mode)))
763                 return 0;
764
765         trace_f2fs_truncate(inode);
766
767         if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
768                 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
769                 return -EIO;
770         }
771
772         err = dquot_initialize(inode);
773         if (err)
774                 return err;
775
776         /* we should check inline_data size */
777         if (!f2fs_may_inline_data(inode)) {
778                 err = f2fs_convert_inline_inode(inode);
779                 if (err)
780                         return err;
781         }
782
783         err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
784         if (err)
785                 return err;
786
787         inode->i_mtime = inode->i_ctime = current_time(inode);
788         f2fs_mark_inode_dirty_sync(inode, false);
789         return 0;
790 }
791
792 int f2fs_getattr(const struct path *path, struct kstat *stat,
793                  u32 request_mask, unsigned int query_flags)
794 {
795         struct inode *inode = d_inode(path->dentry);
796         struct f2fs_inode_info *fi = F2FS_I(inode);
797         struct f2fs_inode *ri;
798         unsigned int flags;
799
800         if (f2fs_has_extra_attr(inode) &&
801                         f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
802                         F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
803                 stat->result_mask |= STATX_BTIME;
804                 stat->btime.tv_sec = fi->i_crtime.tv_sec;
805                 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
806         }
807
808         flags = fi->i_flags;
809         if (flags & F2FS_COMPR_FL)
810                 stat->attributes |= STATX_ATTR_COMPRESSED;
811         if (flags & F2FS_APPEND_FL)
812                 stat->attributes |= STATX_ATTR_APPEND;
813         if (IS_ENCRYPTED(inode))
814                 stat->attributes |= STATX_ATTR_ENCRYPTED;
815         if (flags & F2FS_IMMUTABLE_FL)
816                 stat->attributes |= STATX_ATTR_IMMUTABLE;
817         if (flags & F2FS_NODUMP_FL)
818                 stat->attributes |= STATX_ATTR_NODUMP;
819         if (IS_VERITY(inode))
820                 stat->attributes |= STATX_ATTR_VERITY;
821
822         stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
823                                   STATX_ATTR_APPEND |
824                                   STATX_ATTR_ENCRYPTED |
825                                   STATX_ATTR_IMMUTABLE |
826                                   STATX_ATTR_NODUMP |
827                                   STATX_ATTR_VERITY);
828
829         generic_fillattr(inode, stat);
830
831         /* we need to show initial sectors used for inline_data/dentries */
832         if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
833                                         f2fs_has_inline_dentry(inode))
834                 stat->blocks += (stat->size + 511) >> 9;
835
836         return 0;
837 }
838
839 #ifdef CONFIG_F2FS_FS_POSIX_ACL
840 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
841 {
842         unsigned int ia_valid = attr->ia_valid;
843
844         if (ia_valid & ATTR_UID)
845                 inode->i_uid = attr->ia_uid;
846         if (ia_valid & ATTR_GID)
847                 inode->i_gid = attr->ia_gid;
848         if (ia_valid & ATTR_ATIME)
849                 inode->i_atime = attr->ia_atime;
850         if (ia_valid & ATTR_MTIME)
851                 inode->i_mtime = attr->ia_mtime;
852         if (ia_valid & ATTR_CTIME)
853                 inode->i_ctime = attr->ia_ctime;
854         if (ia_valid & ATTR_MODE) {
855                 umode_t mode = attr->ia_mode;
856
857                 if (!in_group_p(inode->i_gid) &&
858                         !capable_wrt_inode_uidgid(inode, CAP_FSETID))
859                         mode &= ~S_ISGID;
860                 set_acl_inode(inode, mode);
861         }
862 }
863 #else
864 #define __setattr_copy setattr_copy
865 #endif
866
867 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
868 {
869         struct inode *inode = d_inode(dentry);
870         int err;
871
872         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
873                 return -EIO;
874
875         if (unlikely(IS_IMMUTABLE(inode)))
876                 return -EPERM;
877
878         if (unlikely(IS_APPEND(inode) &&
879                         (attr->ia_valid & (ATTR_MODE | ATTR_UID |
880                                   ATTR_GID | ATTR_TIMES_SET))))
881                 return -EPERM;
882
883         if ((attr->ia_valid & ATTR_SIZE) &&
884                 !f2fs_is_compress_backend_ready(inode))
885                 return -EOPNOTSUPP;
886
887         err = setattr_prepare(dentry, attr);
888         if (err)
889                 return err;
890
891         err = fscrypt_prepare_setattr(dentry, attr);
892         if (err)
893                 return err;
894
895         err = fsverity_prepare_setattr(dentry, attr);
896         if (err)
897                 return err;
898
899         if (is_quota_modification(inode, attr)) {
900                 err = dquot_initialize(inode);
901                 if (err)
902                         return err;
903         }
904         if ((attr->ia_valid & ATTR_UID &&
905                 !uid_eq(attr->ia_uid, inode->i_uid)) ||
906                 (attr->ia_valid & ATTR_GID &&
907                 !gid_eq(attr->ia_gid, inode->i_gid))) {
908                 f2fs_lock_op(F2FS_I_SB(inode));
909                 err = dquot_transfer(inode, attr);
910                 if (err) {
911                         set_sbi_flag(F2FS_I_SB(inode),
912                                         SBI_QUOTA_NEED_REPAIR);
913                         f2fs_unlock_op(F2FS_I_SB(inode));
914                         return err;
915                 }
916                 /*
917                  * update uid/gid under lock_op(), so that dquot and inode can
918                  * be updated atomically.
919                  */
920                 if (attr->ia_valid & ATTR_UID)
921                         inode->i_uid = attr->ia_uid;
922                 if (attr->ia_valid & ATTR_GID)
923                         inode->i_gid = attr->ia_gid;
924                 f2fs_mark_inode_dirty_sync(inode, true);
925                 f2fs_unlock_op(F2FS_I_SB(inode));
926         }
927
928         if (attr->ia_valid & ATTR_SIZE) {
929                 loff_t old_size = i_size_read(inode);
930
931                 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
932                         /*
933                          * should convert inline inode before i_size_write to
934                          * keep smaller than inline_data size with inline flag.
935                          */
936                         err = f2fs_convert_inline_inode(inode);
937                         if (err)
938                                 return err;
939                 }
940
941                 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
942                 down_write(&F2FS_I(inode)->i_mmap_sem);
943
944                 truncate_setsize(inode, attr->ia_size);
945
946                 if (attr->ia_size <= old_size)
947                         err = f2fs_truncate(inode);
948                 /*
949                  * do not trim all blocks after i_size if target size is
950                  * larger than i_size.
951                  */
952                 up_write(&F2FS_I(inode)->i_mmap_sem);
953                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
954                 if (err)
955                         return err;
956
957                 spin_lock(&F2FS_I(inode)->i_size_lock);
958                 inode->i_mtime = inode->i_ctime = current_time(inode);
959                 F2FS_I(inode)->last_disk_size = i_size_read(inode);
960                 spin_unlock(&F2FS_I(inode)->i_size_lock);
961         }
962
963         __setattr_copy(inode, attr);
964
965         if (attr->ia_valid & ATTR_MODE) {
966                 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
967
968                 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
969                         if (!err)
970                                 inode->i_mode = F2FS_I(inode)->i_acl_mode;
971                         clear_inode_flag(inode, FI_ACL_MODE);
972                 }
973         }
974
975         /* file size may changed here */
976         f2fs_mark_inode_dirty_sync(inode, true);
977
978         /* inode change will produce dirty node pages flushed by checkpoint */
979         f2fs_balance_fs(F2FS_I_SB(inode), true);
980
981         return err;
982 }
983
984 const struct inode_operations f2fs_file_inode_operations = {
985         .getattr        = f2fs_getattr,
986         .setattr        = f2fs_setattr,
987         .get_acl        = f2fs_get_acl,
988         .set_acl        = f2fs_set_acl,
989         .listxattr      = f2fs_listxattr,
990         .fiemap         = f2fs_fiemap,
991 };
992
993 static int fill_zero(struct inode *inode, pgoff_t index,
994                                         loff_t start, loff_t len)
995 {
996         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
997         struct page *page;
998
999         if (!len)
1000                 return 0;
1001
1002         f2fs_balance_fs(sbi, true);
1003
1004         f2fs_lock_op(sbi);
1005         page = f2fs_get_new_data_page(inode, NULL, index, false);
1006         f2fs_unlock_op(sbi);
1007
1008         if (IS_ERR(page))
1009                 return PTR_ERR(page);
1010
1011         f2fs_wait_on_page_writeback(page, DATA, true, true);
1012         zero_user(page, start, len);
1013         set_page_dirty(page);
1014         f2fs_put_page(page, 1);
1015         return 0;
1016 }
1017
1018 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1019 {
1020         int err;
1021
1022         while (pg_start < pg_end) {
1023                 struct dnode_of_data dn;
1024                 pgoff_t end_offset, count;
1025
1026                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1027                 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1028                 if (err) {
1029                         if (err == -ENOENT) {
1030                                 pg_start = f2fs_get_next_page_offset(&dn,
1031                                                                 pg_start);
1032                                 continue;
1033                         }
1034                         return err;
1035                 }
1036
1037                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1038                 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1039
1040                 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1041
1042                 f2fs_truncate_data_blocks_range(&dn, count);
1043                 f2fs_put_dnode(&dn);
1044
1045                 pg_start += count;
1046         }
1047         return 0;
1048 }
1049
1050 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1051 {
1052         pgoff_t pg_start, pg_end;
1053         loff_t off_start, off_end;
1054         int ret;
1055
1056         ret = f2fs_convert_inline_inode(inode);
1057         if (ret)
1058                 return ret;
1059
1060         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1061         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1062
1063         off_start = offset & (PAGE_SIZE - 1);
1064         off_end = (offset + len) & (PAGE_SIZE - 1);
1065
1066         if (pg_start == pg_end) {
1067                 ret = fill_zero(inode, pg_start, off_start,
1068                                                 off_end - off_start);
1069                 if (ret)
1070                         return ret;
1071         } else {
1072                 if (off_start) {
1073                         ret = fill_zero(inode, pg_start++, off_start,
1074                                                 PAGE_SIZE - off_start);
1075                         if (ret)
1076                                 return ret;
1077                 }
1078                 if (off_end) {
1079                         ret = fill_zero(inode, pg_end, 0, off_end);
1080                         if (ret)
1081                                 return ret;
1082                 }
1083
1084                 if (pg_start < pg_end) {
1085                         struct address_space *mapping = inode->i_mapping;
1086                         loff_t blk_start, blk_end;
1087                         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1088
1089                         f2fs_balance_fs(sbi, true);
1090
1091                         blk_start = (loff_t)pg_start << PAGE_SHIFT;
1092                         blk_end = (loff_t)pg_end << PAGE_SHIFT;
1093
1094                         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1095                         down_write(&F2FS_I(inode)->i_mmap_sem);
1096
1097                         truncate_inode_pages_range(mapping, blk_start,
1098                                         blk_end - 1);
1099
1100                         f2fs_lock_op(sbi);
1101                         ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1102                         f2fs_unlock_op(sbi);
1103
1104                         up_write(&F2FS_I(inode)->i_mmap_sem);
1105                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1106                 }
1107         }
1108
1109         return ret;
1110 }
1111
1112 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1113                                 int *do_replace, pgoff_t off, pgoff_t len)
1114 {
1115         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1116         struct dnode_of_data dn;
1117         int ret, done, i;
1118
1119 next_dnode:
1120         set_new_dnode(&dn, inode, NULL, NULL, 0);
1121         ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1122         if (ret && ret != -ENOENT) {
1123                 return ret;
1124         } else if (ret == -ENOENT) {
1125                 if (dn.max_level == 0)
1126                         return -ENOENT;
1127                 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1128                                                 dn.ofs_in_node, len);
1129                 blkaddr += done;
1130                 do_replace += done;
1131                 goto next;
1132         }
1133
1134         done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1135                                                         dn.ofs_in_node, len);
1136         for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1137                 *blkaddr = f2fs_data_blkaddr(&dn);
1138
1139                 if (__is_valid_data_blkaddr(*blkaddr) &&
1140                         !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1141                                         DATA_GENERIC_ENHANCE)) {
1142                         f2fs_put_dnode(&dn);
1143                         return -EFSCORRUPTED;
1144                 }
1145
1146                 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1147
1148                         if (f2fs_lfs_mode(sbi)) {
1149                                 f2fs_put_dnode(&dn);
1150                                 return -EOPNOTSUPP;
1151                         }
1152
1153                         /* do not invalidate this block address */
1154                         f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1155                         *do_replace = 1;
1156                 }
1157         }
1158         f2fs_put_dnode(&dn);
1159 next:
1160         len -= done;
1161         off += done;
1162         if (len)
1163                 goto next_dnode;
1164         return 0;
1165 }
1166
1167 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1168                                 int *do_replace, pgoff_t off, int len)
1169 {
1170         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1171         struct dnode_of_data dn;
1172         int ret, i;
1173
1174         for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1175                 if (*do_replace == 0)
1176                         continue;
1177
1178                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1179                 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1180                 if (ret) {
1181                         dec_valid_block_count(sbi, inode, 1);
1182                         f2fs_invalidate_blocks(sbi, *blkaddr);
1183                 } else {
1184                         f2fs_update_data_blkaddr(&dn, *blkaddr);
1185                 }
1186                 f2fs_put_dnode(&dn);
1187         }
1188         return 0;
1189 }
1190
1191 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1192                         block_t *blkaddr, int *do_replace,
1193                         pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1194 {
1195         struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1196         pgoff_t i = 0;
1197         int ret;
1198
1199         while (i < len) {
1200                 if (blkaddr[i] == NULL_ADDR && !full) {
1201                         i++;
1202                         continue;
1203                 }
1204
1205                 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1206                         struct dnode_of_data dn;
1207                         struct node_info ni;
1208                         size_t new_size;
1209                         pgoff_t ilen;
1210
1211                         set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1212                         ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1213                         if (ret)
1214                                 return ret;
1215
1216                         ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1217                         if (ret) {
1218                                 f2fs_put_dnode(&dn);
1219                                 return ret;
1220                         }
1221
1222                         ilen = min((pgoff_t)
1223                                 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1224                                                 dn.ofs_in_node, len - i);
1225                         do {
1226                                 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1227                                 f2fs_truncate_data_blocks_range(&dn, 1);
1228
1229                                 if (do_replace[i]) {
1230                                         f2fs_i_blocks_write(src_inode,
1231                                                         1, false, false);
1232                                         f2fs_i_blocks_write(dst_inode,
1233                                                         1, true, false);
1234                                         f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1235                                         blkaddr[i], ni.version, true, false);
1236
1237                                         do_replace[i] = 0;
1238                                 }
1239                                 dn.ofs_in_node++;
1240                                 i++;
1241                                 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1242                                 if (dst_inode->i_size < new_size)
1243                                         f2fs_i_size_write(dst_inode, new_size);
1244                         } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1245
1246                         f2fs_put_dnode(&dn);
1247                 } else {
1248                         struct page *psrc, *pdst;
1249
1250                         psrc = f2fs_get_lock_data_page(src_inode,
1251                                                         src + i, true);
1252                         if (IS_ERR(psrc))
1253                                 return PTR_ERR(psrc);
1254                         pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1255                                                                 true);
1256                         if (IS_ERR(pdst)) {
1257                                 f2fs_put_page(psrc, 1);
1258                                 return PTR_ERR(pdst);
1259                         }
1260                         f2fs_copy_page(psrc, pdst);
1261                         set_page_dirty(pdst);
1262                         f2fs_put_page(pdst, 1);
1263                         f2fs_put_page(psrc, 1);
1264
1265                         ret = f2fs_truncate_hole(src_inode,
1266                                                 src + i, src + i + 1);
1267                         if (ret)
1268                                 return ret;
1269                         i++;
1270                 }
1271         }
1272         return 0;
1273 }
1274
1275 static int __exchange_data_block(struct inode *src_inode,
1276                         struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1277                         pgoff_t len, bool full)
1278 {
1279         block_t *src_blkaddr;
1280         int *do_replace;
1281         pgoff_t olen;
1282         int ret;
1283
1284         while (len) {
1285                 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1286
1287                 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1288                                         array_size(olen, sizeof(block_t)),
1289                                         GFP_NOFS);
1290                 if (!src_blkaddr)
1291                         return -ENOMEM;
1292
1293                 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1294                                         array_size(olen, sizeof(int)),
1295                                         GFP_NOFS);
1296                 if (!do_replace) {
1297                         kvfree(src_blkaddr);
1298                         return -ENOMEM;
1299                 }
1300
1301                 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1302                                         do_replace, src, olen);
1303                 if (ret)
1304                         goto roll_back;
1305
1306                 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1307                                         do_replace, src, dst, olen, full);
1308                 if (ret)
1309                         goto roll_back;
1310
1311                 src += olen;
1312                 dst += olen;
1313                 len -= olen;
1314
1315                 kvfree(src_blkaddr);
1316                 kvfree(do_replace);
1317         }
1318         return 0;
1319
1320 roll_back:
1321         __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1322         kvfree(src_blkaddr);
1323         kvfree(do_replace);
1324         return ret;
1325 }
1326
1327 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1328 {
1329         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1330         pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1331         pgoff_t start = offset >> PAGE_SHIFT;
1332         pgoff_t end = (offset + len) >> PAGE_SHIFT;
1333         int ret;
1334
1335         f2fs_balance_fs(sbi, true);
1336
1337         /* avoid gc operation during block exchange */
1338         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1339         down_write(&F2FS_I(inode)->i_mmap_sem);
1340
1341         f2fs_lock_op(sbi);
1342         f2fs_drop_extent_tree(inode);
1343         truncate_pagecache(inode, offset);
1344         ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1345         f2fs_unlock_op(sbi);
1346
1347         up_write(&F2FS_I(inode)->i_mmap_sem);
1348         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1349         return ret;
1350 }
1351
1352 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1353 {
1354         loff_t new_size;
1355         int ret;
1356
1357         if (offset + len >= i_size_read(inode))
1358                 return -EINVAL;
1359
1360         /* collapse range should be aligned to block size of f2fs. */
1361         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1362                 return -EINVAL;
1363
1364         ret = f2fs_convert_inline_inode(inode);
1365         if (ret)
1366                 return ret;
1367
1368         /* write out all dirty pages from offset */
1369         ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1370         if (ret)
1371                 return ret;
1372
1373         ret = f2fs_do_collapse(inode, offset, len);
1374         if (ret)
1375                 return ret;
1376
1377         /* write out all moved pages, if possible */
1378         down_write(&F2FS_I(inode)->i_mmap_sem);
1379         filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1380         truncate_pagecache(inode, offset);
1381
1382         new_size = i_size_read(inode) - len;
1383         ret = f2fs_truncate_blocks(inode, new_size, true);
1384         up_write(&F2FS_I(inode)->i_mmap_sem);
1385         if (!ret)
1386                 f2fs_i_size_write(inode, new_size);
1387         return ret;
1388 }
1389
1390 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1391                                                                 pgoff_t end)
1392 {
1393         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1394         pgoff_t index = start;
1395         unsigned int ofs_in_node = dn->ofs_in_node;
1396         blkcnt_t count = 0;
1397         int ret;
1398
1399         for (; index < end; index++, dn->ofs_in_node++) {
1400                 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1401                         count++;
1402         }
1403
1404         dn->ofs_in_node = ofs_in_node;
1405         ret = f2fs_reserve_new_blocks(dn, count);
1406         if (ret)
1407                 return ret;
1408
1409         dn->ofs_in_node = ofs_in_node;
1410         for (index = start; index < end; index++, dn->ofs_in_node++) {
1411                 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1412                 /*
1413                  * f2fs_reserve_new_blocks will not guarantee entire block
1414                  * allocation.
1415                  */
1416                 if (dn->data_blkaddr == NULL_ADDR) {
1417                         ret = -ENOSPC;
1418                         break;
1419                 }
1420                 if (dn->data_blkaddr != NEW_ADDR) {
1421                         f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1422                         dn->data_blkaddr = NEW_ADDR;
1423                         f2fs_set_data_blkaddr(dn);
1424                 }
1425         }
1426
1427         f2fs_update_extent_cache_range(dn, start, 0, index - start);
1428
1429         return ret;
1430 }
1431
1432 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1433                                                                 int mode)
1434 {
1435         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1436         struct address_space *mapping = inode->i_mapping;
1437         pgoff_t index, pg_start, pg_end;
1438         loff_t new_size = i_size_read(inode);
1439         loff_t off_start, off_end;
1440         int ret = 0;
1441
1442         ret = inode_newsize_ok(inode, (len + offset));
1443         if (ret)
1444                 return ret;
1445
1446         ret = f2fs_convert_inline_inode(inode);
1447         if (ret)
1448                 return ret;
1449
1450         ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1451         if (ret)
1452                 return ret;
1453
1454         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1455         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1456
1457         off_start = offset & (PAGE_SIZE - 1);
1458         off_end = (offset + len) & (PAGE_SIZE - 1);
1459
1460         if (pg_start == pg_end) {
1461                 ret = fill_zero(inode, pg_start, off_start,
1462                                                 off_end - off_start);
1463                 if (ret)
1464                         return ret;
1465
1466                 new_size = max_t(loff_t, new_size, offset + len);
1467         } else {
1468                 if (off_start) {
1469                         ret = fill_zero(inode, pg_start++, off_start,
1470                                                 PAGE_SIZE - off_start);
1471                         if (ret)
1472                                 return ret;
1473
1474                         new_size = max_t(loff_t, new_size,
1475                                         (loff_t)pg_start << PAGE_SHIFT);
1476                 }
1477
1478                 for (index = pg_start; index < pg_end;) {
1479                         struct dnode_of_data dn;
1480                         unsigned int end_offset;
1481                         pgoff_t end;
1482
1483                         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1484                         down_write(&F2FS_I(inode)->i_mmap_sem);
1485
1486                         truncate_pagecache_range(inode,
1487                                 (loff_t)index << PAGE_SHIFT,
1488                                 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1489
1490                         f2fs_lock_op(sbi);
1491
1492                         set_new_dnode(&dn, inode, NULL, NULL, 0);
1493                         ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1494                         if (ret) {
1495                                 f2fs_unlock_op(sbi);
1496                                 up_write(&F2FS_I(inode)->i_mmap_sem);
1497                                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1498                                 goto out;
1499                         }
1500
1501                         end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1502                         end = min(pg_end, end_offset - dn.ofs_in_node + index);
1503
1504                         ret = f2fs_do_zero_range(&dn, index, end);
1505                         f2fs_put_dnode(&dn);
1506
1507                         f2fs_unlock_op(sbi);
1508                         up_write(&F2FS_I(inode)->i_mmap_sem);
1509                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1510
1511                         f2fs_balance_fs(sbi, dn.node_changed);
1512
1513                         if (ret)
1514                                 goto out;
1515
1516                         index = end;
1517                         new_size = max_t(loff_t, new_size,
1518                                         (loff_t)index << PAGE_SHIFT);
1519                 }
1520
1521                 if (off_end) {
1522                         ret = fill_zero(inode, pg_end, 0, off_end);
1523                         if (ret)
1524                                 goto out;
1525
1526                         new_size = max_t(loff_t, new_size, offset + len);
1527                 }
1528         }
1529
1530 out:
1531         if (new_size > i_size_read(inode)) {
1532                 if (mode & FALLOC_FL_KEEP_SIZE)
1533                         file_set_keep_isize(inode);
1534                 else
1535                         f2fs_i_size_write(inode, new_size);
1536         }
1537         return ret;
1538 }
1539
1540 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1541 {
1542         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1543         pgoff_t nr, pg_start, pg_end, delta, idx;
1544         loff_t new_size;
1545         int ret = 0;
1546
1547         new_size = i_size_read(inode) + len;
1548         ret = inode_newsize_ok(inode, new_size);
1549         if (ret)
1550                 return ret;
1551
1552         if (offset >= i_size_read(inode))
1553                 return -EINVAL;
1554
1555         /* insert range should be aligned to block size of f2fs. */
1556         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1557                 return -EINVAL;
1558
1559         ret = f2fs_convert_inline_inode(inode);
1560         if (ret)
1561                 return ret;
1562
1563         f2fs_balance_fs(sbi, true);
1564
1565         down_write(&F2FS_I(inode)->i_mmap_sem);
1566         ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1567         up_write(&F2FS_I(inode)->i_mmap_sem);
1568         if (ret)
1569                 return ret;
1570
1571         /* write out all dirty pages from offset */
1572         ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1573         if (ret)
1574                 return ret;
1575
1576         pg_start = offset >> PAGE_SHIFT;
1577         pg_end = (offset + len) >> PAGE_SHIFT;
1578         delta = pg_end - pg_start;
1579         idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1580
1581         /* avoid gc operation during block exchange */
1582         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1583         down_write(&F2FS_I(inode)->i_mmap_sem);
1584         truncate_pagecache(inode, offset);
1585
1586         while (!ret && idx > pg_start) {
1587                 nr = idx - pg_start;
1588                 if (nr > delta)
1589                         nr = delta;
1590                 idx -= nr;
1591
1592                 f2fs_lock_op(sbi);
1593                 f2fs_drop_extent_tree(inode);
1594
1595                 ret = __exchange_data_block(inode, inode, idx,
1596                                         idx + delta, nr, false);
1597                 f2fs_unlock_op(sbi);
1598         }
1599         up_write(&F2FS_I(inode)->i_mmap_sem);
1600         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1601
1602         /* write out all moved pages, if possible */
1603         down_write(&F2FS_I(inode)->i_mmap_sem);
1604         filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1605         truncate_pagecache(inode, offset);
1606         up_write(&F2FS_I(inode)->i_mmap_sem);
1607
1608         if (!ret)
1609                 f2fs_i_size_write(inode, new_size);
1610         return ret;
1611 }
1612
1613 static int expand_inode_data(struct inode *inode, loff_t offset,
1614                                         loff_t len, int mode)
1615 {
1616         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1617         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1618                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1619                         .m_may_create = true };
1620         pgoff_t pg_end;
1621         loff_t new_size = i_size_read(inode);
1622         loff_t off_end;
1623         int err;
1624
1625         err = inode_newsize_ok(inode, (len + offset));
1626         if (err)
1627                 return err;
1628
1629         err = f2fs_convert_inline_inode(inode);
1630         if (err)
1631                 return err;
1632
1633         f2fs_balance_fs(sbi, true);
1634
1635         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1636         off_end = (offset + len) & (PAGE_SIZE - 1);
1637
1638         map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1639         map.m_len = pg_end - map.m_lblk;
1640         if (off_end)
1641                 map.m_len++;
1642
1643         if (!map.m_len)
1644                 return 0;
1645
1646         if (f2fs_is_pinned_file(inode)) {
1647                 block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1648                                         sbi->log_blocks_per_seg;
1649                 block_t done = 0;
1650
1651                 if (map.m_len % sbi->blocks_per_seg)
1652                         len += sbi->blocks_per_seg;
1653
1654                 map.m_len = sbi->blocks_per_seg;
1655 next_alloc:
1656                 if (has_not_enough_free_secs(sbi, 0,
1657                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1658                         down_write(&sbi->gc_lock);
1659                         err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1660                         if (err && err != -ENODATA && err != -EAGAIN)
1661                                 goto out_err;
1662                 }
1663
1664                 down_write(&sbi->pin_sem);
1665
1666                 f2fs_lock_op(sbi);
1667                 f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
1668                 f2fs_unlock_op(sbi);
1669
1670                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1671                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1672
1673                 up_write(&sbi->pin_sem);
1674
1675                 done += map.m_len;
1676                 len -= map.m_len;
1677                 map.m_lblk += map.m_len;
1678                 if (!err && len)
1679                         goto next_alloc;
1680
1681                 map.m_len = done;
1682         } else {
1683                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1684         }
1685 out_err:
1686         if (err) {
1687                 pgoff_t last_off;
1688
1689                 if (!map.m_len)
1690                         return err;
1691
1692                 last_off = map.m_lblk + map.m_len - 1;
1693
1694                 /* update new size to the failed position */
1695                 new_size = (last_off == pg_end) ? offset + len :
1696                                         (loff_t)(last_off + 1) << PAGE_SHIFT;
1697         } else {
1698                 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1699         }
1700
1701         if (new_size > i_size_read(inode)) {
1702                 if (mode & FALLOC_FL_KEEP_SIZE)
1703                         file_set_keep_isize(inode);
1704                 else
1705                         f2fs_i_size_write(inode, new_size);
1706         }
1707
1708         return err;
1709 }
1710
1711 static long f2fs_fallocate(struct file *file, int mode,
1712                                 loff_t offset, loff_t len)
1713 {
1714         struct inode *inode = file_inode(file);
1715         long ret = 0;
1716
1717         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1718                 return -EIO;
1719         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1720                 return -ENOSPC;
1721         if (!f2fs_is_compress_backend_ready(inode))
1722                 return -EOPNOTSUPP;
1723
1724         /* f2fs only support ->fallocate for regular file */
1725         if (!S_ISREG(inode->i_mode))
1726                 return -EINVAL;
1727
1728         if (IS_ENCRYPTED(inode) &&
1729                 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1730                 return -EOPNOTSUPP;
1731
1732         if (f2fs_compressed_file(inode) &&
1733                 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1734                         FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1735                 return -EOPNOTSUPP;
1736
1737         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1738                         FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1739                         FALLOC_FL_INSERT_RANGE))
1740                 return -EOPNOTSUPP;
1741
1742         inode_lock(inode);
1743
1744         if (mode & FALLOC_FL_PUNCH_HOLE) {
1745                 if (offset >= inode->i_size)
1746                         goto out;
1747
1748                 ret = punch_hole(inode, offset, len);
1749         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1750                 ret = f2fs_collapse_range(inode, offset, len);
1751         } else if (mode & FALLOC_FL_ZERO_RANGE) {
1752                 ret = f2fs_zero_range(inode, offset, len, mode);
1753         } else if (mode & FALLOC_FL_INSERT_RANGE) {
1754                 ret = f2fs_insert_range(inode, offset, len);
1755         } else {
1756                 ret = expand_inode_data(inode, offset, len, mode);
1757         }
1758
1759         if (!ret) {
1760                 inode->i_mtime = inode->i_ctime = current_time(inode);
1761                 f2fs_mark_inode_dirty_sync(inode, false);
1762                 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1763         }
1764
1765 out:
1766         inode_unlock(inode);
1767
1768         trace_f2fs_fallocate(inode, mode, offset, len, ret);
1769         return ret;
1770 }
1771
1772 static int f2fs_release_file(struct inode *inode, struct file *filp)
1773 {
1774         /*
1775          * f2fs_relase_file is called at every close calls. So we should
1776          * not drop any inmemory pages by close called by other process.
1777          */
1778         if (!(filp->f_mode & FMODE_WRITE) ||
1779                         atomic_read(&inode->i_writecount) != 1)
1780                 return 0;
1781
1782         /* some remained atomic pages should discarded */
1783         if (f2fs_is_atomic_file(inode))
1784                 f2fs_drop_inmem_pages(inode);
1785         if (f2fs_is_volatile_file(inode)) {
1786                 set_inode_flag(inode, FI_DROP_CACHE);
1787                 filemap_fdatawrite(inode->i_mapping);
1788                 clear_inode_flag(inode, FI_DROP_CACHE);
1789                 clear_inode_flag(inode, FI_VOLATILE_FILE);
1790                 stat_dec_volatile_write(inode);
1791         }
1792         return 0;
1793 }
1794
1795 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1796 {
1797         struct inode *inode = file_inode(file);
1798
1799         /*
1800          * If the process doing a transaction is crashed, we should do
1801          * roll-back. Otherwise, other reader/write can see corrupted database
1802          * until all the writers close its file. Since this should be done
1803          * before dropping file lock, it needs to do in ->flush.
1804          */
1805         if (f2fs_is_atomic_file(inode) &&
1806                         F2FS_I(inode)->inmem_task == current)
1807                 f2fs_drop_inmem_pages(inode);
1808         return 0;
1809 }
1810
1811 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1812 {
1813         struct f2fs_inode_info *fi = F2FS_I(inode);
1814         u32 masked_flags = fi->i_flags & mask;
1815
1816         f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1817
1818         /* Is it quota file? Do not allow user to mess with it */
1819         if (IS_NOQUOTA(inode))
1820                 return -EPERM;
1821
1822         if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1823                 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1824                         return -EOPNOTSUPP;
1825                 if (!f2fs_empty_dir(inode))
1826                         return -ENOTEMPTY;
1827         }
1828
1829         if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1830                 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1831                         return -EOPNOTSUPP;
1832                 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1833                         return -EINVAL;
1834         }
1835
1836         if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1837                 if (masked_flags & F2FS_COMPR_FL) {
1838                         if (!f2fs_disable_compressed_file(inode))
1839                                 return -EINVAL;
1840                 }
1841                 if (iflags & F2FS_NOCOMP_FL)
1842                         return -EINVAL;
1843                 if (iflags & F2FS_COMPR_FL) {
1844                         if (!f2fs_may_compress(inode))
1845                                 return -EINVAL;
1846                         if (S_ISREG(inode->i_mode) && inode->i_size)
1847                                 return -EINVAL;
1848
1849                         set_compress_context(inode);
1850                 }
1851         }
1852         if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1853                 if (masked_flags & F2FS_COMPR_FL)
1854                         return -EINVAL;
1855         }
1856
1857         fi->i_flags = iflags | (fi->i_flags & ~mask);
1858         f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1859                                         (fi->i_flags & F2FS_NOCOMP_FL));
1860
1861         if (fi->i_flags & F2FS_PROJINHERIT_FL)
1862                 set_inode_flag(inode, FI_PROJ_INHERIT);
1863         else
1864                 clear_inode_flag(inode, FI_PROJ_INHERIT);
1865
1866         inode->i_ctime = current_time(inode);
1867         f2fs_set_inode_flags(inode);
1868         f2fs_mark_inode_dirty_sync(inode, true);
1869         return 0;
1870 }
1871
1872 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1873
1874 /*
1875  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1876  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1877  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1878  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1879  */
1880
1881 static const struct {
1882         u32 iflag;
1883         u32 fsflag;
1884 } f2fs_fsflags_map[] = {
1885         { F2FS_COMPR_FL,        FS_COMPR_FL },
1886         { F2FS_SYNC_FL,         FS_SYNC_FL },
1887         { F2FS_IMMUTABLE_FL,    FS_IMMUTABLE_FL },
1888         { F2FS_APPEND_FL,       FS_APPEND_FL },
1889         { F2FS_NODUMP_FL,       FS_NODUMP_FL },
1890         { F2FS_NOATIME_FL,      FS_NOATIME_FL },
1891         { F2FS_NOCOMP_FL,       FS_NOCOMP_FL },
1892         { F2FS_INDEX_FL,        FS_INDEX_FL },
1893         { F2FS_DIRSYNC_FL,      FS_DIRSYNC_FL },
1894         { F2FS_PROJINHERIT_FL,  FS_PROJINHERIT_FL },
1895         { F2FS_CASEFOLD_FL,     FS_CASEFOLD_FL },
1896 };
1897
1898 #define F2FS_GETTABLE_FS_FL (           \
1899                 FS_COMPR_FL |           \
1900                 FS_SYNC_FL |            \
1901                 FS_IMMUTABLE_FL |       \
1902                 FS_APPEND_FL |          \
1903                 FS_NODUMP_FL |          \
1904                 FS_NOATIME_FL |         \
1905                 FS_NOCOMP_FL |          \
1906                 FS_INDEX_FL |           \
1907                 FS_DIRSYNC_FL |         \
1908                 FS_PROJINHERIT_FL |     \
1909                 FS_ENCRYPT_FL |         \
1910                 FS_INLINE_DATA_FL |     \
1911                 FS_NOCOW_FL |           \
1912                 FS_VERITY_FL |          \
1913                 FS_CASEFOLD_FL)
1914
1915 #define F2FS_SETTABLE_FS_FL (           \
1916                 FS_COMPR_FL |           \
1917                 FS_SYNC_FL |            \
1918                 FS_IMMUTABLE_FL |       \
1919                 FS_APPEND_FL |          \
1920                 FS_NODUMP_FL |          \
1921                 FS_NOATIME_FL |         \
1922                 FS_NOCOMP_FL |          \
1923                 FS_DIRSYNC_FL |         \
1924                 FS_PROJINHERIT_FL |     \
1925                 FS_CASEFOLD_FL)
1926
1927 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1928 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1929 {
1930         u32 fsflags = 0;
1931         int i;
1932
1933         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1934                 if (iflags & f2fs_fsflags_map[i].iflag)
1935                         fsflags |= f2fs_fsflags_map[i].fsflag;
1936
1937         return fsflags;
1938 }
1939
1940 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1941 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1942 {
1943         u32 iflags = 0;
1944         int i;
1945
1946         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1947                 if (fsflags & f2fs_fsflags_map[i].fsflag)
1948                         iflags |= f2fs_fsflags_map[i].iflag;
1949
1950         return iflags;
1951 }
1952
1953 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1954 {
1955         struct inode *inode = file_inode(filp);
1956         struct f2fs_inode_info *fi = F2FS_I(inode);
1957         u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1958
1959         if (IS_ENCRYPTED(inode))
1960                 fsflags |= FS_ENCRYPT_FL;
1961         if (IS_VERITY(inode))
1962                 fsflags |= FS_VERITY_FL;
1963         if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1964                 fsflags |= FS_INLINE_DATA_FL;
1965         if (is_inode_flag_set(inode, FI_PIN_FILE))
1966                 fsflags |= FS_NOCOW_FL;
1967
1968         fsflags &= F2FS_GETTABLE_FS_FL;
1969
1970         return put_user(fsflags, (int __user *)arg);
1971 }
1972
1973 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1974 {
1975         struct inode *inode = file_inode(filp);
1976         struct f2fs_inode_info *fi = F2FS_I(inode);
1977         u32 fsflags, old_fsflags;
1978         u32 iflags;
1979         int ret;
1980
1981         if (!inode_owner_or_capable(inode))
1982                 return -EACCES;
1983
1984         if (get_user(fsflags, (int __user *)arg))
1985                 return -EFAULT;
1986
1987         if (fsflags & ~F2FS_GETTABLE_FS_FL)
1988                 return -EOPNOTSUPP;
1989         fsflags &= F2FS_SETTABLE_FS_FL;
1990
1991         iflags = f2fs_fsflags_to_iflags(fsflags);
1992         if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1993                 return -EOPNOTSUPP;
1994
1995         ret = mnt_want_write_file(filp);
1996         if (ret)
1997                 return ret;
1998
1999         inode_lock(inode);
2000
2001         old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2002         ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2003         if (ret)
2004                 goto out;
2005
2006         ret = f2fs_setflags_common(inode, iflags,
2007                         f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2008 out:
2009         inode_unlock(inode);
2010         mnt_drop_write_file(filp);
2011         return ret;
2012 }
2013
2014 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2015 {
2016         struct inode *inode = file_inode(filp);
2017
2018         return put_user(inode->i_generation, (int __user *)arg);
2019 }
2020
2021 static int f2fs_ioc_start_atomic_write(struct file *filp)
2022 {
2023         struct inode *inode = file_inode(filp);
2024         struct f2fs_inode_info *fi = F2FS_I(inode);
2025         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2026         int ret;
2027
2028         if (!inode_owner_or_capable(inode))
2029                 return -EACCES;
2030
2031         if (!S_ISREG(inode->i_mode))
2032                 return -EINVAL;
2033
2034         if (filp->f_flags & O_DIRECT)
2035                 return -EINVAL;
2036
2037         ret = mnt_want_write_file(filp);
2038         if (ret)
2039                 return ret;
2040
2041         inode_lock(inode);
2042
2043         f2fs_disable_compressed_file(inode);
2044
2045         if (f2fs_is_atomic_file(inode)) {
2046                 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2047                         ret = -EINVAL;
2048                 goto out;
2049         }
2050
2051         ret = f2fs_convert_inline_inode(inode);
2052         if (ret)
2053                 goto out;
2054
2055         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2056
2057         /*
2058          * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2059          * f2fs_is_atomic_file.
2060          */
2061         if (get_dirty_pages(inode))
2062                 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2063                           inode->i_ino, get_dirty_pages(inode));
2064         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2065         if (ret) {
2066                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2067                 goto out;
2068         }
2069
2070         spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2071         if (list_empty(&fi->inmem_ilist))
2072                 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2073         sbi->atomic_files++;
2074         spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2075
2076         /* add inode in inmem_list first and set atomic_file */
2077         set_inode_flag(inode, FI_ATOMIC_FILE);
2078         clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2079         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2080
2081         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2082         F2FS_I(inode)->inmem_task = current;
2083         stat_update_max_atomic_write(inode);
2084 out:
2085         inode_unlock(inode);
2086         mnt_drop_write_file(filp);
2087         return ret;
2088 }
2089
2090 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2091 {
2092         struct inode *inode = file_inode(filp);
2093         int ret;
2094
2095         if (!inode_owner_or_capable(inode))
2096                 return -EACCES;
2097
2098         ret = mnt_want_write_file(filp);
2099         if (ret)
2100                 return ret;
2101
2102         f2fs_balance_fs(F2FS_I_SB(inode), true);
2103
2104         inode_lock(inode);
2105
2106         if (f2fs_is_volatile_file(inode)) {
2107                 ret = -EINVAL;
2108                 goto err_out;
2109         }
2110
2111         if (f2fs_is_atomic_file(inode)) {
2112                 ret = f2fs_commit_inmem_pages(inode);
2113                 if (ret)
2114                         goto err_out;
2115
2116                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2117                 if (!ret)
2118                         f2fs_drop_inmem_pages(inode);
2119         } else {
2120                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2121         }
2122 err_out:
2123         if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2124                 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2125                 ret = -EINVAL;
2126         }
2127         inode_unlock(inode);
2128         mnt_drop_write_file(filp);
2129         return ret;
2130 }
2131
2132 static int f2fs_ioc_start_volatile_write(struct file *filp)
2133 {
2134         struct inode *inode = file_inode(filp);
2135         int ret;
2136
2137         if (!inode_owner_or_capable(inode))
2138                 return -EACCES;
2139
2140         if (!S_ISREG(inode->i_mode))
2141                 return -EINVAL;
2142
2143         ret = mnt_want_write_file(filp);
2144         if (ret)
2145                 return ret;
2146
2147         inode_lock(inode);
2148
2149         if (f2fs_is_volatile_file(inode))
2150                 goto out;
2151
2152         ret = f2fs_convert_inline_inode(inode);
2153         if (ret)
2154                 goto out;
2155
2156         stat_inc_volatile_write(inode);
2157         stat_update_max_volatile_write(inode);
2158
2159         set_inode_flag(inode, FI_VOLATILE_FILE);
2160         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2161 out:
2162         inode_unlock(inode);
2163         mnt_drop_write_file(filp);
2164         return ret;
2165 }
2166
2167 static int f2fs_ioc_release_volatile_write(struct file *filp)
2168 {
2169         struct inode *inode = file_inode(filp);
2170         int ret;
2171
2172         if (!inode_owner_or_capable(inode))
2173                 return -EACCES;
2174
2175         ret = mnt_want_write_file(filp);
2176         if (ret)
2177                 return ret;
2178
2179         inode_lock(inode);
2180
2181         if (!f2fs_is_volatile_file(inode))
2182                 goto out;
2183
2184         if (!f2fs_is_first_block_written(inode)) {
2185                 ret = truncate_partial_data_page(inode, 0, true);
2186                 goto out;
2187         }
2188
2189         ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2190 out:
2191         inode_unlock(inode);
2192         mnt_drop_write_file(filp);
2193         return ret;
2194 }
2195
2196 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2197 {
2198         struct inode *inode = file_inode(filp);
2199         int ret;
2200
2201         if (!inode_owner_or_capable(inode))
2202                 return -EACCES;
2203
2204         ret = mnt_want_write_file(filp);
2205         if (ret)
2206                 return ret;
2207
2208         inode_lock(inode);
2209
2210         if (f2fs_is_atomic_file(inode))
2211                 f2fs_drop_inmem_pages(inode);
2212         if (f2fs_is_volatile_file(inode)) {
2213                 clear_inode_flag(inode, FI_VOLATILE_FILE);
2214                 stat_dec_volatile_write(inode);
2215                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2216         }
2217
2218         clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2219
2220         inode_unlock(inode);
2221
2222         mnt_drop_write_file(filp);
2223         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2224         return ret;
2225 }
2226
2227 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2228 {
2229         struct inode *inode = file_inode(filp);
2230         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2231         struct super_block *sb = sbi->sb;
2232         __u32 in;
2233         int ret = 0;
2234
2235         if (!capable(CAP_SYS_ADMIN))
2236                 return -EPERM;
2237
2238         if (get_user(in, (__u32 __user *)arg))
2239                 return -EFAULT;
2240
2241         if (in != F2FS_GOING_DOWN_FULLSYNC) {
2242                 ret = mnt_want_write_file(filp);
2243                 if (ret) {
2244                         if (ret == -EROFS) {
2245                                 ret = 0;
2246                                 f2fs_stop_checkpoint(sbi, false);
2247                                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2248                                 trace_f2fs_shutdown(sbi, in, ret);
2249                         }
2250                         return ret;
2251                 }
2252         }
2253
2254         switch (in) {
2255         case F2FS_GOING_DOWN_FULLSYNC:
2256                 ret = freeze_bdev(sb->s_bdev);
2257                 if (ret)
2258                         goto out;
2259                 f2fs_stop_checkpoint(sbi, false);
2260                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2261                 thaw_bdev(sb->s_bdev);
2262                 break;
2263         case F2FS_GOING_DOWN_METASYNC:
2264                 /* do checkpoint only */
2265                 ret = f2fs_sync_fs(sb, 1);
2266                 if (ret)
2267                         goto out;
2268                 f2fs_stop_checkpoint(sbi, false);
2269                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2270                 break;
2271         case F2FS_GOING_DOWN_NOSYNC:
2272                 f2fs_stop_checkpoint(sbi, false);
2273                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2274                 break;
2275         case F2FS_GOING_DOWN_METAFLUSH:
2276                 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2277                 f2fs_stop_checkpoint(sbi, false);
2278                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2279                 break;
2280         case F2FS_GOING_DOWN_NEED_FSCK:
2281                 set_sbi_flag(sbi, SBI_NEED_FSCK);
2282                 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2283                 set_sbi_flag(sbi, SBI_IS_DIRTY);
2284                 /* do checkpoint only */
2285                 ret = f2fs_sync_fs(sb, 1);
2286                 goto out;
2287         default:
2288                 ret = -EINVAL;
2289                 goto out;
2290         }
2291
2292         f2fs_stop_gc_thread(sbi);
2293         f2fs_stop_discard_thread(sbi);
2294
2295         f2fs_drop_discard_cmd(sbi);
2296         clear_opt(sbi, DISCARD);
2297
2298         f2fs_update_time(sbi, REQ_TIME);
2299 out:
2300         if (in != F2FS_GOING_DOWN_FULLSYNC)
2301                 mnt_drop_write_file(filp);
2302
2303         trace_f2fs_shutdown(sbi, in, ret);
2304
2305         return ret;
2306 }
2307
2308 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2309 {
2310         struct inode *inode = file_inode(filp);
2311         struct super_block *sb = inode->i_sb;
2312         struct request_queue *q = bdev_get_queue(sb->s_bdev);
2313         struct fstrim_range range;
2314         int ret;
2315
2316         if (!capable(CAP_SYS_ADMIN))
2317                 return -EPERM;
2318
2319         if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2320                 return -EOPNOTSUPP;
2321
2322         if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2323                                 sizeof(range)))
2324                 return -EFAULT;
2325
2326         ret = mnt_want_write_file(filp);
2327         if (ret)
2328                 return ret;
2329
2330         range.minlen = max((unsigned int)range.minlen,
2331                                 q->limits.discard_granularity);
2332         ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2333         mnt_drop_write_file(filp);
2334         if (ret < 0)
2335                 return ret;
2336
2337         if (copy_to_user((struct fstrim_range __user *)arg, &range,
2338                                 sizeof(range)))
2339                 return -EFAULT;
2340         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2341         return 0;
2342 }
2343
2344 static bool uuid_is_nonzero(__u8 u[16])
2345 {
2346         int i;
2347
2348         for (i = 0; i < 16; i++)
2349                 if (u[i])
2350                         return true;
2351         return false;
2352 }
2353
2354 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2355 {
2356         struct inode *inode = file_inode(filp);
2357
2358         if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2359                 return -EOPNOTSUPP;
2360
2361         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2362
2363         return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2364 }
2365
2366 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2367 {
2368         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2369                 return -EOPNOTSUPP;
2370         return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2371 }
2372
2373 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2374 {
2375         struct inode *inode = file_inode(filp);
2376         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2377         int err;
2378
2379         if (!f2fs_sb_has_encrypt(sbi))
2380                 return -EOPNOTSUPP;
2381
2382         err = mnt_want_write_file(filp);
2383         if (err)
2384                 return err;
2385
2386         down_write(&sbi->sb_lock);
2387
2388         if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2389                 goto got_it;
2390
2391         /* update superblock with uuid */
2392         generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2393
2394         err = f2fs_commit_super(sbi, false);
2395         if (err) {
2396                 /* undo new data */
2397                 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2398                 goto out_err;
2399         }
2400 got_it:
2401         if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2402                                                                         16))
2403                 err = -EFAULT;
2404 out_err:
2405         up_write(&sbi->sb_lock);
2406         mnt_drop_write_file(filp);
2407         return err;
2408 }
2409
2410 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2411                                              unsigned long arg)
2412 {
2413         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2414                 return -EOPNOTSUPP;
2415
2416         return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2417 }
2418
2419 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2420 {
2421         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2422                 return -EOPNOTSUPP;
2423
2424         return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2425 }
2426
2427 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2428 {
2429         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2430                 return -EOPNOTSUPP;
2431
2432         return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2433 }
2434
2435 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2436                                                     unsigned long arg)
2437 {
2438         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2439                 return -EOPNOTSUPP;
2440
2441         return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2442 }
2443
2444 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2445                                               unsigned long arg)
2446 {
2447         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2448                 return -EOPNOTSUPP;
2449
2450         return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2451 }
2452
2453 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2454 {
2455         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2456                 return -EOPNOTSUPP;
2457
2458         return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2459 }
2460
2461 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2462 {
2463         struct inode *inode = file_inode(filp);
2464         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2465         __u32 sync;
2466         int ret;
2467
2468         if (!capable(CAP_SYS_ADMIN))
2469                 return -EPERM;
2470
2471         if (get_user(sync, (__u32 __user *)arg))
2472                 return -EFAULT;
2473
2474         if (f2fs_readonly(sbi->sb))
2475                 return -EROFS;
2476
2477         ret = mnt_want_write_file(filp);
2478         if (ret)
2479                 return ret;
2480
2481         if (!sync) {
2482                 if (!down_write_trylock(&sbi->gc_lock)) {
2483                         ret = -EBUSY;
2484                         goto out;
2485                 }
2486         } else {
2487                 down_write(&sbi->gc_lock);
2488         }
2489
2490         ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2491 out:
2492         mnt_drop_write_file(filp);
2493         return ret;
2494 }
2495
2496 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2497 {
2498         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2499         u64 end;
2500         int ret;
2501
2502         if (!capable(CAP_SYS_ADMIN))
2503                 return -EPERM;
2504         if (f2fs_readonly(sbi->sb))
2505                 return -EROFS;
2506
2507         end = range->start + range->len;
2508         if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2509                                         end >= MAX_BLKADDR(sbi))
2510                 return -EINVAL;
2511
2512         ret = mnt_want_write_file(filp);
2513         if (ret)
2514                 return ret;
2515
2516 do_more:
2517         if (!range->sync) {
2518                 if (!down_write_trylock(&sbi->gc_lock)) {
2519                         ret = -EBUSY;
2520                         goto out;
2521                 }
2522         } else {
2523                 down_write(&sbi->gc_lock);
2524         }
2525
2526         ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
2527         if (ret) {
2528                 if (ret == -EBUSY)
2529                         ret = -EAGAIN;
2530                 goto out;
2531         }
2532         range->start += BLKS_PER_SEC(sbi);
2533         if (range->start <= end)
2534                 goto do_more;
2535 out:
2536         mnt_drop_write_file(filp);
2537         return ret;
2538 }
2539
2540 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2541 {
2542         struct f2fs_gc_range range;
2543
2544         if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2545                                                         sizeof(range)))
2546                 return -EFAULT;
2547         return __f2fs_ioc_gc_range(filp, &range);
2548 }
2549
2550 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2551 {
2552         struct inode *inode = file_inode(filp);
2553         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2554         int ret;
2555
2556         if (!capable(CAP_SYS_ADMIN))
2557                 return -EPERM;
2558
2559         if (f2fs_readonly(sbi->sb))
2560                 return -EROFS;
2561
2562         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2563                 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2564                 return -EINVAL;
2565         }
2566
2567         ret = mnt_want_write_file(filp);
2568         if (ret)
2569                 return ret;
2570
2571         ret = f2fs_sync_fs(sbi->sb, 1);
2572
2573         mnt_drop_write_file(filp);
2574         return ret;
2575 }
2576
2577 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2578                                         struct file *filp,
2579                                         struct f2fs_defragment *range)
2580 {
2581         struct inode *inode = file_inode(filp);
2582         struct f2fs_map_blocks map = { .m_next_extent = NULL,
2583                                         .m_seg_type = NO_CHECK_TYPE ,
2584                                         .m_may_create = false };
2585         struct extent_info ei = {0, 0, 0};
2586         pgoff_t pg_start, pg_end, next_pgofs;
2587         unsigned int blk_per_seg = sbi->blocks_per_seg;
2588         unsigned int total = 0, sec_num;
2589         block_t blk_end = 0;
2590         bool fragmented = false;
2591         int err;
2592
2593         /* if in-place-update policy is enabled, don't waste time here */
2594         if (f2fs_should_update_inplace(inode, NULL))
2595                 return -EINVAL;
2596
2597         pg_start = range->start >> PAGE_SHIFT;
2598         pg_end = (range->start + range->len) >> PAGE_SHIFT;
2599
2600         f2fs_balance_fs(sbi, true);
2601
2602         inode_lock(inode);
2603
2604         /* writeback all dirty pages in the range */
2605         err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2606                                                 range->start + range->len - 1);
2607         if (err)
2608                 goto out;
2609
2610         /*
2611          * lookup mapping info in extent cache, skip defragmenting if physical
2612          * block addresses are continuous.
2613          */
2614         if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2615                 if (ei.fofs + ei.len >= pg_end)
2616                         goto out;
2617         }
2618
2619         map.m_lblk = pg_start;
2620         map.m_next_pgofs = &next_pgofs;
2621
2622         /*
2623          * lookup mapping info in dnode page cache, skip defragmenting if all
2624          * physical block addresses are continuous even if there are hole(s)
2625          * in logical blocks.
2626          */
2627         while (map.m_lblk < pg_end) {
2628                 map.m_len = pg_end - map.m_lblk;
2629                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2630                 if (err)
2631                         goto out;
2632
2633                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2634                         map.m_lblk = next_pgofs;
2635                         continue;
2636                 }
2637
2638                 if (blk_end && blk_end != map.m_pblk)
2639                         fragmented = true;
2640
2641                 /* record total count of block that we're going to move */
2642                 total += map.m_len;
2643
2644                 blk_end = map.m_pblk + map.m_len;
2645
2646                 map.m_lblk += map.m_len;
2647         }
2648
2649         if (!fragmented) {
2650                 total = 0;
2651                 goto out;
2652         }
2653
2654         sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2655
2656         /*
2657          * make sure there are enough free section for LFS allocation, this can
2658          * avoid defragment running in SSR mode when free section are allocated
2659          * intensively
2660          */
2661         if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2662                 err = -EAGAIN;
2663                 goto out;
2664         }
2665
2666         map.m_lblk = pg_start;
2667         map.m_len = pg_end - pg_start;
2668         total = 0;
2669
2670         while (map.m_lblk < pg_end) {
2671                 pgoff_t idx;
2672                 int cnt = 0;
2673
2674 do_map:
2675                 map.m_len = pg_end - map.m_lblk;
2676                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2677                 if (err)
2678                         goto clear_out;
2679
2680                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2681                         map.m_lblk = next_pgofs;
2682                         goto check;
2683                 }
2684
2685                 set_inode_flag(inode, FI_DO_DEFRAG);
2686
2687                 idx = map.m_lblk;
2688                 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2689                         struct page *page;
2690
2691                         page = f2fs_get_lock_data_page(inode, idx, true);
2692                         if (IS_ERR(page)) {
2693                                 err = PTR_ERR(page);
2694                                 goto clear_out;
2695                         }
2696
2697                         set_page_dirty(page);
2698                         f2fs_put_page(page, 1);
2699
2700                         idx++;
2701                         cnt++;
2702                         total++;
2703                 }
2704
2705                 map.m_lblk = idx;
2706 check:
2707                 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2708                         goto do_map;
2709
2710                 clear_inode_flag(inode, FI_DO_DEFRAG);
2711
2712                 err = filemap_fdatawrite(inode->i_mapping);
2713                 if (err)
2714                         goto out;
2715         }
2716 clear_out:
2717         clear_inode_flag(inode, FI_DO_DEFRAG);
2718 out:
2719         inode_unlock(inode);
2720         if (!err)
2721                 range->len = (u64)total << PAGE_SHIFT;
2722         return err;
2723 }
2724
2725 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2726 {
2727         struct inode *inode = file_inode(filp);
2728         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2729         struct f2fs_defragment range;
2730         int err;
2731
2732         if (!capable(CAP_SYS_ADMIN))
2733                 return -EPERM;
2734
2735         if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2736                 return -EINVAL;
2737
2738         if (f2fs_readonly(sbi->sb))
2739                 return -EROFS;
2740
2741         if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2742                                                         sizeof(range)))
2743                 return -EFAULT;
2744
2745         /* verify alignment of offset & size */
2746         if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2747                 return -EINVAL;
2748
2749         if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2750                                         max_file_blocks(inode)))
2751                 return -EINVAL;
2752
2753         err = mnt_want_write_file(filp);
2754         if (err)
2755                 return err;
2756
2757         err = f2fs_defragment_range(sbi, filp, &range);
2758         mnt_drop_write_file(filp);
2759
2760         f2fs_update_time(sbi, REQ_TIME);
2761         if (err < 0)
2762                 return err;
2763
2764         if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2765                                                         sizeof(range)))
2766                 return -EFAULT;
2767
2768         return 0;
2769 }
2770
2771 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2772                         struct file *file_out, loff_t pos_out, size_t len)
2773 {
2774         struct inode *src = file_inode(file_in);
2775         struct inode *dst = file_inode(file_out);
2776         struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2777         size_t olen = len, dst_max_i_size = 0;
2778         size_t dst_osize;
2779         int ret;
2780
2781         if (file_in->f_path.mnt != file_out->f_path.mnt ||
2782                                 src->i_sb != dst->i_sb)
2783                 return -EXDEV;
2784
2785         if (unlikely(f2fs_readonly(src->i_sb)))
2786                 return -EROFS;
2787
2788         if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2789                 return -EINVAL;
2790
2791         if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2792                 return -EOPNOTSUPP;
2793
2794         if (pos_out < 0 || pos_in < 0)
2795                 return -EINVAL;
2796
2797         if (src == dst) {
2798                 if (pos_in == pos_out)
2799                         return 0;
2800                 if (pos_out > pos_in && pos_out < pos_in + len)
2801                         return -EINVAL;
2802         }
2803
2804         inode_lock(src);
2805         if (src != dst) {
2806                 ret = -EBUSY;
2807                 if (!inode_trylock(dst))
2808                         goto out;
2809         }
2810
2811         ret = -EINVAL;
2812         if (pos_in + len > src->i_size || pos_in + len < pos_in)
2813                 goto out_unlock;
2814         if (len == 0)
2815                 olen = len = src->i_size - pos_in;
2816         if (pos_in + len == src->i_size)
2817                 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2818         if (len == 0) {
2819                 ret = 0;
2820                 goto out_unlock;
2821         }
2822
2823         dst_osize = dst->i_size;
2824         if (pos_out + olen > dst->i_size)
2825                 dst_max_i_size = pos_out + olen;
2826
2827         /* verify the end result is block aligned */
2828         if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2829                         !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2830                         !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2831                 goto out_unlock;
2832
2833         ret = f2fs_convert_inline_inode(src);
2834         if (ret)
2835                 goto out_unlock;
2836
2837         ret = f2fs_convert_inline_inode(dst);
2838         if (ret)
2839                 goto out_unlock;
2840
2841         /* write out all dirty pages from offset */
2842         ret = filemap_write_and_wait_range(src->i_mapping,
2843                                         pos_in, pos_in + len);
2844         if (ret)
2845                 goto out_unlock;
2846
2847         ret = filemap_write_and_wait_range(dst->i_mapping,
2848                                         pos_out, pos_out + len);
2849         if (ret)
2850                 goto out_unlock;
2851
2852         f2fs_balance_fs(sbi, true);
2853
2854         down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2855         if (src != dst) {
2856                 ret = -EBUSY;
2857                 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2858                         goto out_src;
2859         }
2860
2861         f2fs_lock_op(sbi);
2862         ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2863                                 pos_out >> F2FS_BLKSIZE_BITS,
2864                                 len >> F2FS_BLKSIZE_BITS, false);
2865
2866         if (!ret) {
2867                 if (dst_max_i_size)
2868                         f2fs_i_size_write(dst, dst_max_i_size);
2869                 else if (dst_osize != dst->i_size)
2870                         f2fs_i_size_write(dst, dst_osize);
2871         }
2872         f2fs_unlock_op(sbi);
2873
2874         if (src != dst)
2875                 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2876 out_src:
2877         up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2878 out_unlock:
2879         if (src != dst)
2880                 inode_unlock(dst);
2881 out:
2882         inode_unlock(src);
2883         return ret;
2884 }
2885
2886 static int __f2fs_ioc_move_range(struct file *filp,
2887                                 struct f2fs_move_range *range)
2888 {
2889         struct fd dst;
2890         int err;
2891
2892         if (!(filp->f_mode & FMODE_READ) ||
2893                         !(filp->f_mode & FMODE_WRITE))
2894                 return -EBADF;
2895
2896         dst = fdget(range->dst_fd);
2897         if (!dst.file)
2898                 return -EBADF;
2899
2900         if (!(dst.file->f_mode & FMODE_WRITE)) {
2901                 err = -EBADF;
2902                 goto err_out;
2903         }
2904
2905         err = mnt_want_write_file(filp);
2906         if (err)
2907                 goto err_out;
2908
2909         err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2910                                         range->pos_out, range->len);
2911
2912         mnt_drop_write_file(filp);
2913 err_out:
2914         fdput(dst);
2915         return err;
2916 }
2917
2918 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2919 {
2920         struct f2fs_move_range range;
2921
2922         if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2923                                                         sizeof(range)))
2924                 return -EFAULT;
2925         return __f2fs_ioc_move_range(filp, &range);
2926 }
2927
2928 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2929 {
2930         struct inode *inode = file_inode(filp);
2931         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2932         struct sit_info *sm = SIT_I(sbi);
2933         unsigned int start_segno = 0, end_segno = 0;
2934         unsigned int dev_start_segno = 0, dev_end_segno = 0;
2935         struct f2fs_flush_device range;
2936         int ret;
2937
2938         if (!capable(CAP_SYS_ADMIN))
2939                 return -EPERM;
2940
2941         if (f2fs_readonly(sbi->sb))
2942                 return -EROFS;
2943
2944         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2945                 return -EINVAL;
2946
2947         if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2948                                                         sizeof(range)))
2949                 return -EFAULT;
2950
2951         if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2952                         __is_large_section(sbi)) {
2953                 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2954                           range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2955                 return -EINVAL;
2956         }
2957
2958         ret = mnt_want_write_file(filp);
2959         if (ret)
2960                 return ret;
2961
2962         if (range.dev_num != 0)
2963                 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2964         dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2965
2966         start_segno = sm->last_victim[FLUSH_DEVICE];
2967         if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2968                 start_segno = dev_start_segno;
2969         end_segno = min(start_segno + range.segments, dev_end_segno);
2970
2971         while (start_segno < end_segno) {
2972                 if (!down_write_trylock(&sbi->gc_lock)) {
2973                         ret = -EBUSY;
2974                         goto out;
2975                 }
2976                 sm->last_victim[GC_CB] = end_segno + 1;
2977                 sm->last_victim[GC_GREEDY] = end_segno + 1;
2978                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2979                 ret = f2fs_gc(sbi, true, true, start_segno);
2980                 if (ret == -EAGAIN)
2981                         ret = 0;
2982                 else if (ret < 0)
2983                         break;
2984                 start_segno++;
2985         }
2986 out:
2987         mnt_drop_write_file(filp);
2988         return ret;
2989 }
2990
2991 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2992 {
2993         struct inode *inode = file_inode(filp);
2994         u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2995
2996         /* Must validate to set it with SQLite behavior in Android. */
2997         sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2998
2999         return put_user(sb_feature, (u32 __user *)arg);
3000 }
3001
3002 #ifdef CONFIG_QUOTA
3003 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3004 {
3005         struct dquot *transfer_to[MAXQUOTAS] = {};
3006         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3007         struct super_block *sb = sbi->sb;
3008         int err = 0;
3009
3010         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3011         if (!IS_ERR(transfer_to[PRJQUOTA])) {
3012                 err = __dquot_transfer(inode, transfer_to);
3013                 if (err)
3014                         set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3015                 dqput(transfer_to[PRJQUOTA]);
3016         }
3017         return err;
3018 }
3019
3020 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3021 {
3022         struct inode *inode = file_inode(filp);
3023         struct f2fs_inode_info *fi = F2FS_I(inode);
3024         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3025         struct page *ipage;
3026         kprojid_t kprojid;
3027         int err;
3028
3029         if (!f2fs_sb_has_project_quota(sbi)) {
3030                 if (projid != F2FS_DEF_PROJID)
3031                         return -EOPNOTSUPP;
3032                 else
3033                         return 0;
3034         }
3035
3036         if (!f2fs_has_extra_attr(inode))
3037                 return -EOPNOTSUPP;
3038
3039         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3040
3041         if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3042                 return 0;
3043
3044         err = -EPERM;
3045         /* Is it quota file? Do not allow user to mess with it */
3046         if (IS_NOQUOTA(inode))
3047                 return err;
3048
3049         ipage = f2fs_get_node_page(sbi, inode->i_ino);
3050         if (IS_ERR(ipage))
3051                 return PTR_ERR(ipage);
3052
3053         if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3054                                                                 i_projid)) {
3055                 err = -EOVERFLOW;
3056                 f2fs_put_page(ipage, 1);
3057                 return err;
3058         }
3059         f2fs_put_page(ipage, 1);
3060
3061         err = dquot_initialize(inode);
3062         if (err)
3063                 return err;
3064
3065         f2fs_lock_op(sbi);
3066         err = f2fs_transfer_project_quota(inode, kprojid);
3067         if (err)
3068                 goto out_unlock;
3069
3070         F2FS_I(inode)->i_projid = kprojid;
3071         inode->i_ctime = current_time(inode);
3072         f2fs_mark_inode_dirty_sync(inode, true);
3073 out_unlock:
3074         f2fs_unlock_op(sbi);
3075         return err;
3076 }
3077 #else
3078 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3079 {
3080         return 0;
3081 }
3082
3083 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3084 {
3085         if (projid != F2FS_DEF_PROJID)
3086                 return -EOPNOTSUPP;
3087         return 0;
3088 }
3089 #endif
3090
3091 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3092
3093 /*
3094  * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3095  * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3096  * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3097  */
3098
3099 static const struct {
3100         u32 iflag;
3101         u32 xflag;
3102 } f2fs_xflags_map[] = {
3103         { F2FS_SYNC_FL,         FS_XFLAG_SYNC },
3104         { F2FS_IMMUTABLE_FL,    FS_XFLAG_IMMUTABLE },
3105         { F2FS_APPEND_FL,       FS_XFLAG_APPEND },
3106         { F2FS_NODUMP_FL,       FS_XFLAG_NODUMP },
3107         { F2FS_NOATIME_FL,      FS_XFLAG_NOATIME },
3108         { F2FS_PROJINHERIT_FL,  FS_XFLAG_PROJINHERIT },
3109 };
3110
3111 #define F2FS_SUPPORTED_XFLAGS (         \
3112                 FS_XFLAG_SYNC |         \
3113                 FS_XFLAG_IMMUTABLE |    \
3114                 FS_XFLAG_APPEND |       \
3115                 FS_XFLAG_NODUMP |       \
3116                 FS_XFLAG_NOATIME |      \
3117                 FS_XFLAG_PROJINHERIT)
3118
3119 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3120 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3121 {
3122         u32 xflags = 0;
3123         int i;
3124
3125         for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3126                 if (iflags & f2fs_xflags_map[i].iflag)
3127                         xflags |= f2fs_xflags_map[i].xflag;
3128
3129         return xflags;
3130 }
3131
3132 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3133 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3134 {
3135         u32 iflags = 0;
3136         int i;
3137
3138         for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3139                 if (xflags & f2fs_xflags_map[i].xflag)
3140                         iflags |= f2fs_xflags_map[i].iflag;
3141
3142         return iflags;
3143 }
3144
3145 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3146 {
3147         struct f2fs_inode_info *fi = F2FS_I(inode);
3148
3149         simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3150
3151         if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3152                 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3153 }
3154
3155 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3156 {
3157         struct inode *inode = file_inode(filp);
3158         struct fsxattr fa;
3159
3160         f2fs_fill_fsxattr(inode, &fa);
3161
3162         if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3163                 return -EFAULT;
3164         return 0;
3165 }
3166
3167 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3168 {
3169         struct inode *inode = file_inode(filp);
3170         struct fsxattr fa, old_fa;
3171         u32 iflags;
3172         int err;
3173
3174         if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3175                 return -EFAULT;
3176
3177         /* Make sure caller has proper permission */
3178         if (!inode_owner_or_capable(inode))
3179                 return -EACCES;
3180
3181         if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3182                 return -EOPNOTSUPP;
3183
3184         iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3185         if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3186                 return -EOPNOTSUPP;
3187
3188         err = mnt_want_write_file(filp);
3189         if (err)
3190                 return err;
3191
3192         inode_lock(inode);
3193
3194         f2fs_fill_fsxattr(inode, &old_fa);
3195         err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3196         if (err)
3197                 goto out;
3198
3199         err = f2fs_setflags_common(inode, iflags,
3200                         f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3201         if (err)
3202                 goto out;
3203
3204         err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3205 out:
3206         inode_unlock(inode);
3207         mnt_drop_write_file(filp);
3208         return err;
3209 }
3210
3211 int f2fs_pin_file_control(struct inode *inode, bool inc)
3212 {
3213         struct f2fs_inode_info *fi = F2FS_I(inode);
3214         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3215
3216         /* Use i_gc_failures for normal file as a risk signal. */
3217         if (inc)
3218                 f2fs_i_gc_failures_write(inode,
3219                                 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3220
3221         if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3222                 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3223                           __func__, inode->i_ino,
3224                           fi->i_gc_failures[GC_FAILURE_PIN]);
3225                 clear_inode_flag(inode, FI_PIN_FILE);
3226                 return -EAGAIN;
3227         }
3228         return 0;
3229 }
3230
3231 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3232 {
3233         struct inode *inode = file_inode(filp);
3234         __u32 pin;
3235         int ret = 0;
3236
3237         if (get_user(pin, (__u32 __user *)arg))
3238                 return -EFAULT;
3239
3240         if (!S_ISREG(inode->i_mode))
3241                 return -EINVAL;
3242
3243         if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3244                 return -EROFS;
3245
3246         ret = mnt_want_write_file(filp);
3247         if (ret)
3248                 return ret;
3249
3250         inode_lock(inode);
3251
3252         if (f2fs_should_update_outplace(inode, NULL)) {
3253                 ret = -EINVAL;
3254                 goto out;
3255         }
3256
3257         if (!pin) {
3258                 clear_inode_flag(inode, FI_PIN_FILE);
3259                 f2fs_i_gc_failures_write(inode, 0);
3260                 goto done;
3261         }
3262
3263         if (f2fs_pin_file_control(inode, false)) {
3264                 ret = -EAGAIN;
3265                 goto out;
3266         }
3267
3268         ret = f2fs_convert_inline_inode(inode);
3269         if (ret)
3270                 goto out;
3271
3272         if (!f2fs_disable_compressed_file(inode)) {
3273                 ret = -EOPNOTSUPP;
3274                 goto out;
3275         }
3276
3277         set_inode_flag(inode, FI_PIN_FILE);
3278         ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3279 done:
3280         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3281 out:
3282         inode_unlock(inode);
3283         mnt_drop_write_file(filp);
3284         return ret;
3285 }
3286
3287 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3288 {
3289         struct inode *inode = file_inode(filp);
3290         __u32 pin = 0;
3291
3292         if (is_inode_flag_set(inode, FI_PIN_FILE))
3293                 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3294         return put_user(pin, (u32 __user *)arg);
3295 }
3296
3297 int f2fs_precache_extents(struct inode *inode)
3298 {
3299         struct f2fs_inode_info *fi = F2FS_I(inode);
3300         struct f2fs_map_blocks map;
3301         pgoff_t m_next_extent;
3302         loff_t end;
3303         int err;
3304
3305         if (is_inode_flag_set(inode, FI_NO_EXTENT))
3306                 return -EOPNOTSUPP;
3307
3308         map.m_lblk = 0;
3309         map.m_next_pgofs = NULL;
3310         map.m_next_extent = &m_next_extent;
3311         map.m_seg_type = NO_CHECK_TYPE;
3312         map.m_may_create = false;
3313         end = max_file_blocks(inode);
3314
3315         while (map.m_lblk < end) {
3316                 map.m_len = end - map.m_lblk;
3317
3318                 down_write(&fi->i_gc_rwsem[WRITE]);
3319                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3320                 up_write(&fi->i_gc_rwsem[WRITE]);
3321                 if (err)
3322                         return err;
3323
3324                 map.m_lblk = m_next_extent;
3325         }
3326
3327         return err;
3328 }
3329
3330 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3331 {
3332         return f2fs_precache_extents(file_inode(filp));
3333 }
3334
3335 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3336 {
3337         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3338         __u64 block_count;
3339
3340         if (!capable(CAP_SYS_ADMIN))
3341                 return -EPERM;
3342
3343         if (f2fs_readonly(sbi->sb))
3344                 return -EROFS;
3345
3346         if (copy_from_user(&block_count, (void __user *)arg,
3347                            sizeof(block_count)))
3348                 return -EFAULT;
3349
3350         return f2fs_resize_fs(sbi, block_count);
3351 }
3352
3353 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3354 {
3355         struct inode *inode = file_inode(filp);
3356
3357         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3358
3359         if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3360                 f2fs_warn(F2FS_I_SB(inode),
3361                           "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3362                           inode->i_ino);
3363                 return -EOPNOTSUPP;
3364         }
3365
3366         return fsverity_ioctl_enable(filp, (const void __user *)arg);
3367 }
3368
3369 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3370 {
3371         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3372                 return -EOPNOTSUPP;
3373
3374         return fsverity_ioctl_measure(filp, (void __user *)arg);
3375 }
3376
3377 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3378 {
3379         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3380                 return -EOPNOTSUPP;
3381
3382         return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3383 }
3384
3385 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3386 {
3387         struct inode *inode = file_inode(filp);
3388         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3389         char *vbuf;
3390         int count;
3391         int err = 0;
3392
3393         vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3394         if (!vbuf)
3395                 return -ENOMEM;
3396
3397         down_read(&sbi->sb_lock);
3398         count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3399                         ARRAY_SIZE(sbi->raw_super->volume_name),
3400                         UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3401         up_read(&sbi->sb_lock);
3402
3403         if (copy_to_user((char __user *)arg, vbuf,
3404                                 min(FSLABEL_MAX, count)))
3405                 err = -EFAULT;
3406
3407         kfree(vbuf);
3408         return err;
3409 }
3410
3411 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3412 {
3413         struct inode *inode = file_inode(filp);
3414         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3415         char *vbuf;
3416         int err = 0;
3417
3418         if (!capable(CAP_SYS_ADMIN))
3419                 return -EPERM;
3420
3421         vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3422         if (IS_ERR(vbuf))
3423                 return PTR_ERR(vbuf);
3424
3425         err = mnt_want_write_file(filp);
3426         if (err)
3427                 goto out;
3428
3429         down_write(&sbi->sb_lock);
3430
3431         memset(sbi->raw_super->volume_name, 0,
3432                         sizeof(sbi->raw_super->volume_name));
3433         utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3434                         sbi->raw_super->volume_name,
3435                         ARRAY_SIZE(sbi->raw_super->volume_name));
3436
3437         err = f2fs_commit_super(sbi, false);
3438
3439         up_write(&sbi->sb_lock);
3440
3441         mnt_drop_write_file(filp);
3442 out:
3443         kfree(vbuf);
3444         return err;
3445 }
3446
3447 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3448 {
3449         struct inode *inode = file_inode(filp);
3450         __u64 blocks;
3451
3452         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3453                 return -EOPNOTSUPP;
3454
3455         if (!f2fs_compressed_file(inode))
3456                 return -EINVAL;
3457
3458         blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3459         return put_user(blocks, (u64 __user *)arg);
3460 }
3461
3462 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3463 {
3464         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3465         unsigned int released_blocks = 0;
3466         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3467         block_t blkaddr;
3468         int i;
3469
3470         for (i = 0; i < count; i++) {
3471                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3472                                                 dn->ofs_in_node + i);
3473
3474                 if (!__is_valid_data_blkaddr(blkaddr))
3475                         continue;
3476                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3477                                         DATA_GENERIC_ENHANCE)))
3478                         return -EFSCORRUPTED;
3479         }
3480
3481         while (count) {
3482                 int compr_blocks = 0;
3483
3484                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3485                         blkaddr = f2fs_data_blkaddr(dn);
3486
3487                         if (i == 0) {
3488                                 if (blkaddr == COMPRESS_ADDR)
3489                                         continue;
3490                                 dn->ofs_in_node += cluster_size;
3491                                 goto next;
3492                         }
3493
3494                         if (__is_valid_data_blkaddr(blkaddr))
3495                                 compr_blocks++;
3496
3497                         if (blkaddr != NEW_ADDR)
3498                                 continue;
3499
3500                         dn->data_blkaddr = NULL_ADDR;
3501                         f2fs_set_data_blkaddr(dn);
3502                 }
3503
3504                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3505                 dec_valid_block_count(sbi, dn->inode,
3506                                         cluster_size - compr_blocks);
3507
3508                 released_blocks += cluster_size - compr_blocks;
3509 next:
3510                 count -= cluster_size;
3511         }
3512
3513         return released_blocks;
3514 }
3515
3516 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3517 {
3518         struct inode *inode = file_inode(filp);
3519         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3520         pgoff_t page_idx = 0, last_idx;
3521         unsigned int released_blocks = 0;
3522         int ret;
3523         int writecount;
3524
3525         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3526                 return -EOPNOTSUPP;
3527
3528         if (!f2fs_compressed_file(inode))
3529                 return -EINVAL;
3530
3531         if (f2fs_readonly(sbi->sb))
3532                 return -EROFS;
3533
3534         ret = mnt_want_write_file(filp);
3535         if (ret)
3536                 return ret;
3537
3538         f2fs_balance_fs(F2FS_I_SB(inode), true);
3539
3540         inode_lock(inode);
3541
3542         writecount = atomic_read(&inode->i_writecount);
3543         if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3544                         (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3545                 ret = -EBUSY;
3546                 goto out;
3547         }
3548
3549         if (IS_IMMUTABLE(inode)) {
3550                 ret = -EINVAL;
3551                 goto out;
3552         }
3553
3554         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3555         if (ret)
3556                 goto out;
3557
3558         F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3559         f2fs_set_inode_flags(inode);
3560         inode->i_ctime = current_time(inode);
3561         f2fs_mark_inode_dirty_sync(inode, true);
3562
3563         if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3564                 goto out;
3565
3566         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3567         down_write(&F2FS_I(inode)->i_mmap_sem);
3568
3569         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3570
3571         while (page_idx < last_idx) {
3572                 struct dnode_of_data dn;
3573                 pgoff_t end_offset, count;
3574
3575                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3576                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3577                 if (ret) {
3578                         if (ret == -ENOENT) {
3579                                 page_idx = f2fs_get_next_page_offset(&dn,
3580                                                                 page_idx);
3581                                 ret = 0;
3582                                 continue;
3583                         }
3584                         break;
3585                 }
3586
3587                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3588                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3589                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3590
3591                 ret = release_compress_blocks(&dn, count);
3592
3593                 f2fs_put_dnode(&dn);
3594
3595                 if (ret < 0)
3596                         break;
3597
3598                 page_idx += count;
3599                 released_blocks += ret;
3600         }
3601
3602         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3603         up_write(&F2FS_I(inode)->i_mmap_sem);
3604 out:
3605         inode_unlock(inode);
3606
3607         mnt_drop_write_file(filp);
3608
3609         if (ret >= 0) {
3610                 ret = put_user(released_blocks, (u64 __user *)arg);
3611         } else if (released_blocks &&
3612                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3613                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3614                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3615                         "iblocks=%llu, released=%u, compr_blocks=%u, "
3616                         "run fsck to fix.",
3617                         __func__, inode->i_ino, inode->i_blocks,
3618                         released_blocks,
3619                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3620         }
3621
3622         return ret;
3623 }
3624
3625 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3626 {
3627         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3628         unsigned int reserved_blocks = 0;
3629         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3630         block_t blkaddr;
3631         int i;
3632
3633         for (i = 0; i < count; i++) {
3634                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3635                                                 dn->ofs_in_node + i);
3636
3637                 if (!__is_valid_data_blkaddr(blkaddr))
3638                         continue;
3639                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3640                                         DATA_GENERIC_ENHANCE)))
3641                         return -EFSCORRUPTED;
3642         }
3643
3644         while (count) {
3645                 int compr_blocks = 0;
3646                 blkcnt_t reserved;
3647                 int ret;
3648
3649                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3650                         blkaddr = f2fs_data_blkaddr(dn);
3651
3652                         if (i == 0) {
3653                                 if (blkaddr == COMPRESS_ADDR)
3654                                         continue;
3655                                 dn->ofs_in_node += cluster_size;
3656                                 goto next;
3657                         }
3658
3659                         if (__is_valid_data_blkaddr(blkaddr)) {
3660                                 compr_blocks++;
3661                                 continue;
3662                         }
3663
3664                         dn->data_blkaddr = NEW_ADDR;
3665                         f2fs_set_data_blkaddr(dn);
3666                 }
3667
3668                 reserved = cluster_size - compr_blocks;
3669                 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3670                 if (ret)
3671                         return ret;
3672
3673                 if (reserved != cluster_size - compr_blocks)
3674                         return -ENOSPC;
3675
3676                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3677
3678                 reserved_blocks += reserved;
3679 next:
3680                 count -= cluster_size;
3681         }
3682
3683         return reserved_blocks;
3684 }
3685
3686 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3687 {
3688         struct inode *inode = file_inode(filp);
3689         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3690         pgoff_t page_idx = 0, last_idx;
3691         unsigned int reserved_blocks = 0;
3692         int ret;
3693
3694         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3695                 return -EOPNOTSUPP;
3696
3697         if (!f2fs_compressed_file(inode))
3698                 return -EINVAL;
3699
3700         if (f2fs_readonly(sbi->sb))
3701                 return -EROFS;
3702
3703         ret = mnt_want_write_file(filp);
3704         if (ret)
3705                 return ret;
3706
3707         if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3708                 goto out;
3709
3710         f2fs_balance_fs(F2FS_I_SB(inode), true);
3711
3712         inode_lock(inode);
3713
3714         if (!IS_IMMUTABLE(inode)) {
3715                 ret = -EINVAL;
3716                 goto unlock_inode;
3717         }
3718
3719         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3720         down_write(&F2FS_I(inode)->i_mmap_sem);
3721
3722         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3723
3724         while (page_idx < last_idx) {
3725                 struct dnode_of_data dn;
3726                 pgoff_t end_offset, count;
3727
3728                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3729                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3730                 if (ret) {
3731                         if (ret == -ENOENT) {
3732                                 page_idx = f2fs_get_next_page_offset(&dn,
3733                                                                 page_idx);
3734                                 ret = 0;
3735                                 continue;
3736                         }
3737                         break;
3738                 }
3739
3740                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3741                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3742                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3743
3744                 ret = reserve_compress_blocks(&dn, count);
3745
3746                 f2fs_put_dnode(&dn);
3747
3748                 if (ret < 0)
3749                         break;
3750
3751                 page_idx += count;
3752                 reserved_blocks += ret;
3753         }
3754
3755         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3756         up_write(&F2FS_I(inode)->i_mmap_sem);
3757
3758         if (ret >= 0) {
3759                 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3760                 f2fs_set_inode_flags(inode);
3761                 inode->i_ctime = current_time(inode);
3762                 f2fs_mark_inode_dirty_sync(inode, true);
3763         }
3764 unlock_inode:
3765         inode_unlock(inode);
3766 out:
3767         mnt_drop_write_file(filp);
3768
3769         if (ret >= 0) {
3770                 ret = put_user(reserved_blocks, (u64 __user *)arg);
3771         } else if (reserved_blocks &&
3772                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3773                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3774                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3775                         "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3776                         "run fsck to fix.",
3777                         __func__, inode->i_ino, inode->i_blocks,
3778                         reserved_blocks,
3779                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3780         }
3781
3782         return ret;
3783 }
3784
3785 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3786                 pgoff_t off, block_t block, block_t len, u32 flags)
3787 {
3788         struct request_queue *q = bdev_get_queue(bdev);
3789         sector_t sector = SECTOR_FROM_BLOCK(block);
3790         sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3791         int ret = 0;
3792
3793         if (!q)
3794                 return -ENXIO;
3795
3796         if (flags & F2FS_TRIM_FILE_DISCARD)
3797                 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3798                                                 blk_queue_secure_erase(q) ?
3799                                                 BLKDEV_DISCARD_SECURE : 0);
3800
3801         if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3802                 if (IS_ENCRYPTED(inode))
3803                         ret = fscrypt_zeroout_range(inode, off, block, len);
3804                 else
3805                         ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3806                                         GFP_NOFS, 0);
3807         }
3808
3809         return ret;
3810 }
3811
3812 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3813 {
3814         struct inode *inode = file_inode(filp);
3815         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3816         struct address_space *mapping = inode->i_mapping;
3817         struct block_device *prev_bdev = NULL;
3818         struct f2fs_sectrim_range range;
3819         pgoff_t index, pg_end, prev_index = 0;
3820         block_t prev_block = 0, len = 0;
3821         loff_t end_addr;
3822         bool to_end = false;
3823         int ret = 0;
3824
3825         if (!(filp->f_mode & FMODE_WRITE))
3826                 return -EBADF;
3827
3828         if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3829                                 sizeof(range)))
3830                 return -EFAULT;
3831
3832         if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3833                         !S_ISREG(inode->i_mode))
3834                 return -EINVAL;
3835
3836         if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3837                         !f2fs_hw_support_discard(sbi)) ||
3838                         ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3839                          IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3840                 return -EOPNOTSUPP;
3841
3842         file_start_write(filp);
3843         inode_lock(inode);
3844
3845         if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3846                         range.start >= inode->i_size) {
3847                 ret = -EINVAL;
3848                 goto err;
3849         }
3850
3851         if (range.len == 0)
3852                 goto err;
3853
3854         if (inode->i_size - range.start > range.len) {
3855                 end_addr = range.start + range.len;
3856         } else {
3857                 end_addr = range.len == (u64)-1 ?
3858                         sbi->sb->s_maxbytes : inode->i_size;
3859                 to_end = true;
3860         }
3861
3862         if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3863                         (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3864                 ret = -EINVAL;
3865                 goto err;
3866         }
3867
3868         index = F2FS_BYTES_TO_BLK(range.start);
3869         pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3870
3871         ret = f2fs_convert_inline_inode(inode);
3872         if (ret)
3873                 goto err;
3874
3875         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3876         down_write(&F2FS_I(inode)->i_mmap_sem);
3877
3878         ret = filemap_write_and_wait_range(mapping, range.start,
3879                         to_end ? LLONG_MAX : end_addr - 1);
3880         if (ret)
3881                 goto out;
3882
3883         truncate_inode_pages_range(mapping, range.start,
3884                         to_end ? -1 : end_addr - 1);
3885
3886         while (index < pg_end) {
3887                 struct dnode_of_data dn;
3888                 pgoff_t end_offset, count;
3889                 int i;
3890
3891                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3892                 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3893                 if (ret) {
3894                         if (ret == -ENOENT) {
3895                                 index = f2fs_get_next_page_offset(&dn, index);
3896                                 continue;
3897                         }
3898                         goto out;
3899                 }
3900
3901                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3902                 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3903                 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3904                         struct block_device *cur_bdev;
3905                         block_t blkaddr = f2fs_data_blkaddr(&dn);
3906
3907                         if (!__is_valid_data_blkaddr(blkaddr))
3908                                 continue;
3909
3910                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3911                                                 DATA_GENERIC_ENHANCE)) {
3912                                 ret = -EFSCORRUPTED;
3913                                 f2fs_put_dnode(&dn);
3914                                 goto out;
3915                         }
3916
3917                         cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3918                         if (f2fs_is_multi_device(sbi)) {
3919                                 int di = f2fs_target_device_index(sbi, blkaddr);
3920
3921                                 blkaddr -= FDEV(di).start_blk;
3922                         }
3923
3924                         if (len) {
3925                                 if (prev_bdev == cur_bdev &&
3926                                                 index == prev_index + len &&
3927                                                 blkaddr == prev_block + len) {
3928                                         len++;
3929                                 } else {
3930                                         ret = f2fs_secure_erase(prev_bdev,
3931                                                 inode, prev_index, prev_block,
3932                                                 len, range.flags);
3933                                         if (ret) {
3934                                                 f2fs_put_dnode(&dn);
3935                                                 goto out;
3936                                         }
3937
3938                                         len = 0;
3939                                 }
3940                         }
3941
3942                         if (!len) {
3943                                 prev_bdev = cur_bdev;
3944                                 prev_index = index;
3945                                 prev_block = blkaddr;
3946                                 len = 1;
3947                         }
3948                 }
3949
3950                 f2fs_put_dnode(&dn);
3951
3952                 if (fatal_signal_pending(current)) {
3953                         ret = -EINTR;
3954                         goto out;
3955                 }
3956                 cond_resched();
3957         }
3958
3959         if (len)
3960                 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3961                                 prev_block, len, range.flags);
3962 out:
3963         up_write(&F2FS_I(inode)->i_mmap_sem);
3964         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3965 err:
3966         inode_unlock(inode);
3967         file_end_write(filp);
3968
3969         return ret;
3970 }
3971
3972 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3973 {
3974         struct inode *inode = file_inode(filp);
3975         struct f2fs_comp_option option;
3976
3977         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3978                 return -EOPNOTSUPP;
3979
3980         inode_lock_shared(inode);
3981
3982         if (!f2fs_compressed_file(inode)) {
3983                 inode_unlock_shared(inode);
3984                 return -ENODATA;
3985         }
3986
3987         option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3988         option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3989
3990         inode_unlock_shared(inode);
3991
3992         if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3993                                 sizeof(option)))
3994                 return -EFAULT;
3995
3996         return 0;
3997 }
3998
3999 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4000 {
4001         struct inode *inode = file_inode(filp);
4002         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4003         struct f2fs_comp_option option;
4004         int ret = 0;
4005
4006         if (!f2fs_sb_has_compression(sbi))
4007                 return -EOPNOTSUPP;
4008
4009         if (!(filp->f_mode & FMODE_WRITE))
4010                 return -EBADF;
4011
4012         if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4013                                 sizeof(option)))
4014                 return -EFAULT;
4015
4016         if (!f2fs_compressed_file(inode) ||
4017                         option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4018                         option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4019                         option.algorithm >= COMPRESS_MAX)
4020                 return -EINVAL;
4021
4022         file_start_write(filp);
4023         inode_lock(inode);
4024
4025         if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4026                 ret = -EBUSY;
4027                 goto out;
4028         }
4029
4030         if (inode->i_size != 0) {
4031                 ret = -EFBIG;
4032                 goto out;
4033         }
4034
4035         F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4036         F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4037         F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4038         f2fs_mark_inode_dirty_sync(inode, true);
4039
4040         if (!f2fs_is_compress_backend_ready(inode))
4041                 f2fs_warn(sbi, "compression algorithm is successfully set, "
4042                         "but current kernel doesn't support this algorithm.");
4043 out:
4044         inode_unlock(inode);
4045         file_end_write(filp);
4046
4047         return ret;
4048 }
4049
4050 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4051 {
4052         DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4053         struct address_space *mapping = inode->i_mapping;
4054         struct page *page;
4055         pgoff_t redirty_idx = page_idx;
4056         int i, page_len = 0, ret = 0;
4057
4058         page_cache_ra_unbounded(&ractl, len, 0);
4059
4060         for (i = 0; i < len; i++, page_idx++) {
4061                 page = read_cache_page(mapping, page_idx, NULL, NULL);
4062                 if (IS_ERR(page)) {
4063                         ret = PTR_ERR(page);
4064                         break;
4065                 }
4066                 page_len++;
4067         }
4068
4069         for (i = 0; i < page_len; i++, redirty_idx++) {
4070                 page = find_lock_page(mapping, redirty_idx);
4071                 if (!page) {
4072                         ret = -ENOMEM;
4073                         break;
4074                 }
4075                 set_page_dirty(page);
4076                 f2fs_put_page(page, 1);
4077                 f2fs_put_page(page, 0);
4078         }
4079
4080         return ret;
4081 }
4082
4083 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4084 {
4085         struct inode *inode = file_inode(filp);
4086         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4087         struct f2fs_inode_info *fi = F2FS_I(inode);
4088         pgoff_t page_idx = 0, last_idx;
4089         unsigned int blk_per_seg = sbi->blocks_per_seg;
4090         int cluster_size = F2FS_I(inode)->i_cluster_size;
4091         int count, ret;
4092
4093         if (!f2fs_sb_has_compression(sbi) ||
4094                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4095                 return -EOPNOTSUPP;
4096
4097         if (!(filp->f_mode & FMODE_WRITE))
4098                 return -EBADF;
4099
4100         if (!f2fs_compressed_file(inode))
4101                 return -EINVAL;
4102
4103         f2fs_balance_fs(F2FS_I_SB(inode), true);
4104
4105         file_start_write(filp);
4106         inode_lock(inode);
4107
4108         if (!f2fs_is_compress_backend_ready(inode)) {
4109                 ret = -EOPNOTSUPP;
4110                 goto out;
4111         }
4112
4113         if (f2fs_is_mmap_file(inode)) {
4114                 ret = -EBUSY;
4115                 goto out;
4116         }
4117
4118         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4119         if (ret)
4120                 goto out;
4121
4122         if (!atomic_read(&fi->i_compr_blocks))
4123                 goto out;
4124
4125         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4126
4127         count = last_idx - page_idx;
4128         while (count) {
4129                 int len = min(cluster_size, count);
4130
4131                 ret = redirty_blocks(inode, page_idx, len);
4132                 if (ret < 0)
4133                         break;
4134
4135                 if (get_dirty_pages(inode) >= blk_per_seg)
4136                         filemap_fdatawrite(inode->i_mapping);
4137
4138                 count -= len;
4139                 page_idx += len;
4140         }
4141
4142         if (!ret)
4143                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4144                                                         LLONG_MAX);
4145
4146         if (ret)
4147                 f2fs_warn(sbi, "%s: The file might be partially decompressed "
4148                                 "(errno=%d). Please delete the file.\n",
4149                                 __func__, ret);
4150 out:
4151         inode_unlock(inode);
4152         file_end_write(filp);
4153
4154         return ret;
4155 }
4156
4157 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4158 {
4159         struct inode *inode = file_inode(filp);
4160         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4161         pgoff_t page_idx = 0, last_idx;
4162         unsigned int blk_per_seg = sbi->blocks_per_seg;
4163         int cluster_size = F2FS_I(inode)->i_cluster_size;
4164         int count, ret;
4165
4166         if (!f2fs_sb_has_compression(sbi) ||
4167                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4168                 return -EOPNOTSUPP;
4169
4170         if (!(filp->f_mode & FMODE_WRITE))
4171                 return -EBADF;
4172
4173         if (!f2fs_compressed_file(inode))
4174                 return -EINVAL;
4175
4176         f2fs_balance_fs(F2FS_I_SB(inode), true);
4177
4178         file_start_write(filp);
4179         inode_lock(inode);
4180
4181         if (!f2fs_is_compress_backend_ready(inode)) {
4182                 ret = -EOPNOTSUPP;
4183                 goto out;
4184         }
4185
4186         if (f2fs_is_mmap_file(inode)) {
4187                 ret = -EBUSY;
4188                 goto out;
4189         }
4190
4191         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4192         if (ret)
4193                 goto out;
4194
4195         set_inode_flag(inode, FI_ENABLE_COMPRESS);
4196
4197         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4198
4199         count = last_idx - page_idx;
4200         while (count) {
4201                 int len = min(cluster_size, count);
4202
4203                 ret = redirty_blocks(inode, page_idx, len);
4204                 if (ret < 0)
4205                         break;
4206
4207                 if (get_dirty_pages(inode) >= blk_per_seg)
4208                         filemap_fdatawrite(inode->i_mapping);
4209
4210                 count -= len;
4211                 page_idx += len;
4212         }
4213
4214         if (!ret)
4215                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4216                                                         LLONG_MAX);
4217
4218         clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4219
4220         if (ret)
4221                 f2fs_warn(sbi, "%s: The file might be partially compressed "
4222                                 "(errno=%d). Please delete the file.\n",
4223                                 __func__, ret);
4224 out:
4225         inode_unlock(inode);
4226         file_end_write(filp);
4227
4228         return ret;
4229 }
4230
4231 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4232 {
4233         switch (cmd) {
4234         case FS_IOC_GETFLAGS:
4235                 return f2fs_ioc_getflags(filp, arg);
4236         case FS_IOC_SETFLAGS:
4237                 return f2fs_ioc_setflags(filp, arg);
4238         case FS_IOC_GETVERSION:
4239                 return f2fs_ioc_getversion(filp, arg);
4240         case F2FS_IOC_START_ATOMIC_WRITE:
4241                 return f2fs_ioc_start_atomic_write(filp);
4242         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4243                 return f2fs_ioc_commit_atomic_write(filp);
4244         case F2FS_IOC_START_VOLATILE_WRITE:
4245                 return f2fs_ioc_start_volatile_write(filp);
4246         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4247                 return f2fs_ioc_release_volatile_write(filp);
4248         case F2FS_IOC_ABORT_VOLATILE_WRITE:
4249                 return f2fs_ioc_abort_volatile_write(filp);
4250         case F2FS_IOC_SHUTDOWN:
4251                 return f2fs_ioc_shutdown(filp, arg);
4252         case FITRIM:
4253                 return f2fs_ioc_fitrim(filp, arg);
4254         case FS_IOC_SET_ENCRYPTION_POLICY:
4255                 return f2fs_ioc_set_encryption_policy(filp, arg);
4256         case FS_IOC_GET_ENCRYPTION_POLICY:
4257                 return f2fs_ioc_get_encryption_policy(filp, arg);
4258         case FS_IOC_GET_ENCRYPTION_PWSALT:
4259                 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4260         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4261                 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4262         case FS_IOC_ADD_ENCRYPTION_KEY:
4263                 return f2fs_ioc_add_encryption_key(filp, arg);
4264         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4265                 return f2fs_ioc_remove_encryption_key(filp, arg);
4266         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4267                 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4268         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4269                 return f2fs_ioc_get_encryption_key_status(filp, arg);
4270         case FS_IOC_GET_ENCRYPTION_NONCE:
4271                 return f2fs_ioc_get_encryption_nonce(filp, arg);
4272         case F2FS_IOC_GARBAGE_COLLECT:
4273                 return f2fs_ioc_gc(filp, arg);
4274         case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4275                 return f2fs_ioc_gc_range(filp, arg);
4276         case F2FS_IOC_WRITE_CHECKPOINT:
4277                 return f2fs_ioc_write_checkpoint(filp, arg);
4278         case F2FS_IOC_DEFRAGMENT:
4279                 return f2fs_ioc_defragment(filp, arg);
4280         case F2FS_IOC_MOVE_RANGE:
4281                 return f2fs_ioc_move_range(filp, arg);
4282         case F2FS_IOC_FLUSH_DEVICE:
4283                 return f2fs_ioc_flush_device(filp, arg);
4284         case F2FS_IOC_GET_FEATURES:
4285                 return f2fs_ioc_get_features(filp, arg);
4286         case FS_IOC_FSGETXATTR:
4287                 return f2fs_ioc_fsgetxattr(filp, arg);
4288         case FS_IOC_FSSETXATTR:
4289                 return f2fs_ioc_fssetxattr(filp, arg);
4290         case F2FS_IOC_GET_PIN_FILE:
4291                 return f2fs_ioc_get_pin_file(filp, arg);
4292         case F2FS_IOC_SET_PIN_FILE:
4293                 return f2fs_ioc_set_pin_file(filp, arg);
4294         case F2FS_IOC_PRECACHE_EXTENTS:
4295                 return f2fs_ioc_precache_extents(filp, arg);
4296         case F2FS_IOC_RESIZE_FS:
4297                 return f2fs_ioc_resize_fs(filp, arg);
4298         case FS_IOC_ENABLE_VERITY:
4299                 return f2fs_ioc_enable_verity(filp, arg);
4300         case FS_IOC_MEASURE_VERITY:
4301                 return f2fs_ioc_measure_verity(filp, arg);
4302         case FS_IOC_READ_VERITY_METADATA:
4303                 return f2fs_ioc_read_verity_metadata(filp, arg);
4304         case FS_IOC_GETFSLABEL:
4305                 return f2fs_ioc_getfslabel(filp, arg);
4306         case FS_IOC_SETFSLABEL:
4307                 return f2fs_ioc_setfslabel(filp, arg);
4308         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4309                 return f2fs_get_compress_blocks(filp, arg);
4310         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4311                 return f2fs_release_compress_blocks(filp, arg);
4312         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4313                 return f2fs_reserve_compress_blocks(filp, arg);
4314         case F2FS_IOC_SEC_TRIM_FILE:
4315                 return f2fs_sec_trim_file(filp, arg);
4316         case F2FS_IOC_GET_COMPRESS_OPTION:
4317                 return f2fs_ioc_get_compress_option(filp, arg);
4318         case F2FS_IOC_SET_COMPRESS_OPTION:
4319                 return f2fs_ioc_set_compress_option(filp, arg);
4320         case F2FS_IOC_DECOMPRESS_FILE:
4321                 return f2fs_ioc_decompress_file(filp, arg);
4322         case F2FS_IOC_COMPRESS_FILE:
4323                 return f2fs_ioc_compress_file(filp, arg);
4324         default:
4325                 return -ENOTTY;
4326         }
4327 }
4328
4329 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4330 {
4331         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4332                 return -EIO;
4333         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4334                 return -ENOSPC;
4335
4336         return __f2fs_ioctl(filp, cmd, arg);
4337 }
4338
4339 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4340 {
4341         struct file *file = iocb->ki_filp;
4342         struct inode *inode = file_inode(file);
4343         int ret;
4344
4345         if (!f2fs_is_compress_backend_ready(inode))
4346                 return -EOPNOTSUPP;
4347
4348         ret = generic_file_read_iter(iocb, iter);
4349
4350         if (ret > 0)
4351                 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4352
4353         return ret;
4354 }
4355
4356 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4357 {
4358         struct file *file = iocb->ki_filp;
4359         struct inode *inode = file_inode(file);
4360         ssize_t ret;
4361
4362         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4363                 ret = -EIO;
4364                 goto out;
4365         }
4366
4367         if (!f2fs_is_compress_backend_ready(inode)) {
4368                 ret = -EOPNOTSUPP;
4369                 goto out;
4370         }
4371
4372         if (iocb->ki_flags & IOCB_NOWAIT) {
4373                 if (!inode_trylock(inode)) {
4374                         ret = -EAGAIN;
4375                         goto out;
4376                 }
4377         } else {
4378                 inode_lock(inode);
4379         }
4380
4381         if (unlikely(IS_IMMUTABLE(inode))) {
4382                 ret = -EPERM;
4383                 goto unlock;
4384         }
4385
4386         ret = generic_write_checks(iocb, from);
4387         if (ret > 0) {
4388                 bool preallocated = false;
4389                 size_t target_size = 0;
4390                 int err;
4391
4392                 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4393                         set_inode_flag(inode, FI_NO_PREALLOC);
4394
4395                 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4396                         if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4397                                                 iov_iter_count(from)) ||
4398                                 f2fs_has_inline_data(inode) ||
4399                                 f2fs_force_buffered_io(inode, iocb, from)) {
4400                                 clear_inode_flag(inode, FI_NO_PREALLOC);
4401                                 inode_unlock(inode);
4402                                 ret = -EAGAIN;
4403                                 goto out;
4404                         }
4405                         goto write;
4406                 }
4407
4408                 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4409                         goto write;
4410
4411                 if (iocb->ki_flags & IOCB_DIRECT) {
4412                         /*
4413                          * Convert inline data for Direct I/O before entering
4414                          * f2fs_direct_IO().
4415                          */
4416                         err = f2fs_convert_inline_inode(inode);
4417                         if (err)
4418                                 goto out_err;
4419                         /*
4420                          * If force_buffere_io() is true, we have to allocate
4421                          * blocks all the time, since f2fs_direct_IO will fall
4422                          * back to buffered IO.
4423                          */
4424                         if (!f2fs_force_buffered_io(inode, iocb, from) &&
4425                                         allow_outplace_dio(inode, iocb, from))
4426                                 goto write;
4427                 }
4428                 preallocated = true;
4429                 target_size = iocb->ki_pos + iov_iter_count(from);
4430
4431                 err = f2fs_preallocate_blocks(iocb, from);
4432                 if (err) {
4433 out_err:
4434                         clear_inode_flag(inode, FI_NO_PREALLOC);
4435                         inode_unlock(inode);
4436                         ret = err;
4437                         goto out;
4438                 }
4439 write:
4440                 ret = __generic_file_write_iter(iocb, from);
4441                 clear_inode_flag(inode, FI_NO_PREALLOC);
4442
4443                 /* if we couldn't write data, we should deallocate blocks. */
4444                 if (preallocated && i_size_read(inode) < target_size)
4445                         f2fs_truncate(inode);
4446
4447                 if (ret > 0)
4448                         f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4449         }
4450 unlock:
4451         inode_unlock(inode);
4452 out:
4453         trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4454                                         iov_iter_count(from), ret);
4455         if (ret > 0)
4456                 ret = generic_write_sync(iocb, ret);
4457         return ret;
4458 }
4459
4460 #ifdef CONFIG_COMPAT
4461 struct compat_f2fs_gc_range {
4462         u32 sync;
4463         compat_u64 start;
4464         compat_u64 len;
4465 };
4466 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE        _IOW(F2FS_IOCTL_MAGIC, 11,\
4467                                                 struct compat_f2fs_gc_range)
4468
4469 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4470 {
4471         struct compat_f2fs_gc_range __user *urange;
4472         struct f2fs_gc_range range;
4473         int err;
4474
4475         urange = compat_ptr(arg);
4476         err = get_user(range.sync, &urange->sync);
4477         err |= get_user(range.start, &urange->start);
4478         err |= get_user(range.len, &urange->len);
4479         if (err)
4480                 return -EFAULT;
4481
4482         return __f2fs_ioc_gc_range(file, &range);
4483 }
4484
4485 struct compat_f2fs_move_range {
4486         u32 dst_fd;
4487         compat_u64 pos_in;
4488         compat_u64 pos_out;
4489         compat_u64 len;
4490 };
4491 #define F2FS_IOC32_MOVE_RANGE           _IOWR(F2FS_IOCTL_MAGIC, 9,      \
4492                                         struct compat_f2fs_move_range)
4493
4494 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4495 {
4496         struct compat_f2fs_move_range __user *urange;
4497         struct f2fs_move_range range;
4498         int err;
4499
4500         urange = compat_ptr(arg);
4501         err = get_user(range.dst_fd, &urange->dst_fd);
4502         err |= get_user(range.pos_in, &urange->pos_in);
4503         err |= get_user(range.pos_out, &urange->pos_out);
4504         err |= get_user(range.len, &urange->len);
4505         if (err)
4506                 return -EFAULT;
4507
4508         return __f2fs_ioc_move_range(file, &range);
4509 }
4510
4511 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4512 {
4513         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4514                 return -EIO;
4515         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4516                 return -ENOSPC;
4517
4518         switch (cmd) {
4519         case FS_IOC32_GETFLAGS:
4520                 cmd = FS_IOC_GETFLAGS;
4521                 break;
4522         case FS_IOC32_SETFLAGS:
4523                 cmd = FS_IOC_SETFLAGS;
4524                 break;
4525         case FS_IOC32_GETVERSION:
4526                 cmd = FS_IOC_GETVERSION;
4527                 break;
4528         case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4529                 return f2fs_compat_ioc_gc_range(file, arg);
4530         case F2FS_IOC32_MOVE_RANGE:
4531                 return f2fs_compat_ioc_move_range(file, arg);
4532         case F2FS_IOC_START_ATOMIC_WRITE:
4533         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4534         case F2FS_IOC_START_VOLATILE_WRITE:
4535         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4536         case F2FS_IOC_ABORT_VOLATILE_WRITE:
4537         case F2FS_IOC_SHUTDOWN:
4538         case FITRIM:
4539         case FS_IOC_SET_ENCRYPTION_POLICY:
4540         case FS_IOC_GET_ENCRYPTION_PWSALT:
4541         case FS_IOC_GET_ENCRYPTION_POLICY:
4542         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4543         case FS_IOC_ADD_ENCRYPTION_KEY:
4544         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4545         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4546         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4547         case FS_IOC_GET_ENCRYPTION_NONCE:
4548         case F2FS_IOC_GARBAGE_COLLECT:
4549         case F2FS_IOC_WRITE_CHECKPOINT:
4550         case F2FS_IOC_DEFRAGMENT:
4551         case F2FS_IOC_FLUSH_DEVICE:
4552         case F2FS_IOC_GET_FEATURES:
4553         case FS_IOC_FSGETXATTR:
4554         case FS_IOC_FSSETXATTR:
4555         case F2FS_IOC_GET_PIN_FILE:
4556         case F2FS_IOC_SET_PIN_FILE:
4557         case F2FS_IOC_PRECACHE_EXTENTS:
4558         case F2FS_IOC_RESIZE_FS:
4559         case FS_IOC_ENABLE_VERITY:
4560         case FS_IOC_MEASURE_VERITY:
4561         case FS_IOC_READ_VERITY_METADATA:
4562         case FS_IOC_GETFSLABEL:
4563         case FS_IOC_SETFSLABEL:
4564         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4565         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4566         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4567         case F2FS_IOC_SEC_TRIM_FILE:
4568         case F2FS_IOC_GET_COMPRESS_OPTION:
4569         case F2FS_IOC_SET_COMPRESS_OPTION:
4570         case F2FS_IOC_DECOMPRESS_FILE:
4571         case F2FS_IOC_COMPRESS_FILE:
4572                 break;
4573         default:
4574                 return -ENOIOCTLCMD;
4575         }
4576         return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4577 }
4578 #endif
4579
4580 const struct file_operations f2fs_file_operations = {
4581         .llseek         = f2fs_llseek,
4582         .read_iter      = f2fs_file_read_iter,
4583         .write_iter     = f2fs_file_write_iter,
4584         .open           = f2fs_file_open,
4585         .release        = f2fs_release_file,
4586         .mmap           = f2fs_file_mmap,
4587         .flush          = f2fs_file_flush,
4588         .fsync          = f2fs_sync_file,
4589         .fallocate      = f2fs_fallocate,
4590         .unlocked_ioctl = f2fs_ioctl,
4591 #ifdef CONFIG_COMPAT
4592         .compat_ioctl   = f2fs_compat_ioctl,
4593 #endif
4594         .splice_read    = generic_file_splice_read,
4595         .splice_write   = iter_file_splice_write,
4596 };