f2fs: should put a page beyond EOF when preparing a write
[linux-2.6-microblaze.git] / fs / f2fs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26 #include <linux/fadvise.h>
27
28 #include "f2fs.h"
29 #include "node.h"
30 #include "segment.h"
31 #include "xattr.h"
32 #include "acl.h"
33 #include "gc.h"
34 #include "iostat.h"
35 #include <trace/events/f2fs.h>
36 #include <uapi/linux/f2fs.h>
37
38 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
39 {
40         struct inode *inode = file_inode(vmf->vma->vm_file);
41         vm_fault_t ret;
42
43         down_read(&F2FS_I(inode)->i_mmap_sem);
44         ret = filemap_fault(vmf);
45         up_read(&F2FS_I(inode)->i_mmap_sem);
46
47         if (!ret)
48                 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
49                                                         F2FS_BLKSIZE);
50
51         trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
52
53         return ret;
54 }
55
56 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
57 {
58         struct page *page = vmf->page;
59         struct inode *inode = file_inode(vmf->vma->vm_file);
60         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
61         struct dnode_of_data dn;
62         bool need_alloc = true;
63         int err = 0;
64
65         if (unlikely(IS_IMMUTABLE(inode)))
66                 return VM_FAULT_SIGBUS;
67
68         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
69                 return VM_FAULT_SIGBUS;
70
71         if (unlikely(f2fs_cp_error(sbi))) {
72                 err = -EIO;
73                 goto err;
74         }
75
76         if (!f2fs_is_checkpoint_ready(sbi)) {
77                 err = -ENOSPC;
78                 goto err;
79         }
80
81         err = f2fs_convert_inline_inode(inode);
82         if (err)
83                 goto err;
84
85 #ifdef CONFIG_F2FS_FS_COMPRESSION
86         if (f2fs_compressed_file(inode)) {
87                 int ret = f2fs_is_compressed_cluster(inode, page->index);
88
89                 if (ret < 0) {
90                         err = ret;
91                         goto err;
92                 } else if (ret) {
93                         need_alloc = false;
94                 }
95         }
96 #endif
97         /* should do out of any locked page */
98         if (need_alloc)
99                 f2fs_balance_fs(sbi, true);
100
101         sb_start_pagefault(inode->i_sb);
102
103         f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
104
105         file_update_time(vmf->vma->vm_file);
106         down_read(&F2FS_I(inode)->i_mmap_sem);
107         lock_page(page);
108         if (unlikely(page->mapping != inode->i_mapping ||
109                         page_offset(page) > i_size_read(inode) ||
110                         !PageUptodate(page))) {
111                 unlock_page(page);
112                 err = -EFAULT;
113                 goto out_sem;
114         }
115
116         if (need_alloc) {
117                 /* block allocation */
118                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
119                 set_new_dnode(&dn, inode, NULL, NULL, 0);
120                 err = f2fs_get_block(&dn, page->index);
121                 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
122         }
123
124 #ifdef CONFIG_F2FS_FS_COMPRESSION
125         if (!need_alloc) {
126                 set_new_dnode(&dn, inode, NULL, NULL, 0);
127                 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
128                 f2fs_put_dnode(&dn);
129         }
130 #endif
131         if (err) {
132                 unlock_page(page);
133                 goto out_sem;
134         }
135
136         f2fs_wait_on_page_writeback(page, DATA, false, true);
137
138         /* wait for GCed page writeback via META_MAPPING */
139         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
140
141         /*
142          * check to see if the page is mapped already (no holes)
143          */
144         if (PageMappedToDisk(page))
145                 goto out_sem;
146
147         /* page is wholly or partially inside EOF */
148         if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
149                                                 i_size_read(inode)) {
150                 loff_t offset;
151
152                 offset = i_size_read(inode) & ~PAGE_MASK;
153                 zero_user_segment(page, offset, PAGE_SIZE);
154         }
155         set_page_dirty(page);
156         if (!PageUptodate(page))
157                 SetPageUptodate(page);
158
159         f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
160         f2fs_update_time(sbi, REQ_TIME);
161
162         trace_f2fs_vm_page_mkwrite(page, DATA);
163 out_sem:
164         up_read(&F2FS_I(inode)->i_mmap_sem);
165
166         sb_end_pagefault(inode->i_sb);
167 err:
168         return block_page_mkwrite_return(err);
169 }
170
171 static const struct vm_operations_struct f2fs_file_vm_ops = {
172         .fault          = f2fs_filemap_fault,
173         .map_pages      = filemap_map_pages,
174         .page_mkwrite   = f2fs_vm_page_mkwrite,
175 };
176
177 static int get_parent_ino(struct inode *inode, nid_t *pino)
178 {
179         struct dentry *dentry;
180
181         /*
182          * Make sure to get the non-deleted alias.  The alias associated with
183          * the open file descriptor being fsync()'ed may be deleted already.
184          */
185         dentry = d_find_alias(inode);
186         if (!dentry)
187                 return 0;
188
189         *pino = parent_ino(dentry);
190         dput(dentry);
191         return 1;
192 }
193
194 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
195 {
196         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
197         enum cp_reason_type cp_reason = CP_NO_NEEDED;
198
199         if (!S_ISREG(inode->i_mode))
200                 cp_reason = CP_NON_REGULAR;
201         else if (f2fs_compressed_file(inode))
202                 cp_reason = CP_COMPRESSED;
203         else if (inode->i_nlink != 1)
204                 cp_reason = CP_HARDLINK;
205         else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
206                 cp_reason = CP_SB_NEED_CP;
207         else if (file_wrong_pino(inode))
208                 cp_reason = CP_WRONG_PINO;
209         else if (!f2fs_space_for_roll_forward(sbi))
210                 cp_reason = CP_NO_SPC_ROLL;
211         else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
212                 cp_reason = CP_NODE_NEED_CP;
213         else if (test_opt(sbi, FASTBOOT))
214                 cp_reason = CP_FASTBOOT_MODE;
215         else if (F2FS_OPTION(sbi).active_logs == 2)
216                 cp_reason = CP_SPEC_LOG_NUM;
217         else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
218                 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
219                 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
220                                                         TRANS_DIR_INO))
221                 cp_reason = CP_RECOVER_DIR;
222
223         return cp_reason;
224 }
225
226 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
227 {
228         struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
229         bool ret = false;
230         /* But we need to avoid that there are some inode updates */
231         if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
232                 ret = true;
233         f2fs_put_page(i, 0);
234         return ret;
235 }
236
237 static void try_to_fix_pino(struct inode *inode)
238 {
239         struct f2fs_inode_info *fi = F2FS_I(inode);
240         nid_t pino;
241
242         down_write(&fi->i_sem);
243         if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
244                         get_parent_ino(inode, &pino)) {
245                 f2fs_i_pino_write(inode, pino);
246                 file_got_pino(inode);
247         }
248         up_write(&fi->i_sem);
249 }
250
251 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
252                                                 int datasync, bool atomic)
253 {
254         struct inode *inode = file->f_mapping->host;
255         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
256         nid_t ino = inode->i_ino;
257         int ret = 0;
258         enum cp_reason_type cp_reason = 0;
259         struct writeback_control wbc = {
260                 .sync_mode = WB_SYNC_ALL,
261                 .nr_to_write = LONG_MAX,
262                 .for_reclaim = 0,
263         };
264         unsigned int seq_id = 0;
265
266         if (unlikely(f2fs_readonly(inode->i_sb)))
267                 return 0;
268
269         trace_f2fs_sync_file_enter(inode);
270
271         if (S_ISDIR(inode->i_mode))
272                 goto go_write;
273
274         /* if fdatasync is triggered, let's do in-place-update */
275         if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
276                 set_inode_flag(inode, FI_NEED_IPU);
277         ret = file_write_and_wait_range(file, start, end);
278         clear_inode_flag(inode, FI_NEED_IPU);
279
280         if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
281                 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
282                 return ret;
283         }
284
285         /* if the inode is dirty, let's recover all the time */
286         if (!f2fs_skip_inode_update(inode, datasync)) {
287                 f2fs_write_inode(inode, NULL);
288                 goto go_write;
289         }
290
291         /*
292          * if there is no written data, don't waste time to write recovery info.
293          */
294         if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
295                         !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
296
297                 /* it may call write_inode just prior to fsync */
298                 if (need_inode_page_update(sbi, ino))
299                         goto go_write;
300
301                 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
302                                 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
303                         goto flush_out;
304                 goto out;
305         } else {
306                 /*
307                  * for OPU case, during fsync(), node can be persisted before
308                  * data when lower device doesn't support write barrier, result
309                  * in data corruption after SPO.
310                  * So for strict fsync mode, force to use atomic write sematics
311                  * to keep write order in between data/node and last node to
312                  * avoid potential data corruption.
313                  */
314                 if (F2FS_OPTION(sbi).fsync_mode ==
315                                 FSYNC_MODE_STRICT && !atomic)
316                         atomic = true;
317         }
318 go_write:
319         /*
320          * Both of fdatasync() and fsync() are able to be recovered from
321          * sudden-power-off.
322          */
323         down_read(&F2FS_I(inode)->i_sem);
324         cp_reason = need_do_checkpoint(inode);
325         up_read(&F2FS_I(inode)->i_sem);
326
327         if (cp_reason) {
328                 /* all the dirty node pages should be flushed for POR */
329                 ret = f2fs_sync_fs(inode->i_sb, 1);
330
331                 /*
332                  * We've secured consistency through sync_fs. Following pino
333                  * will be used only for fsynced inodes after checkpoint.
334                  */
335                 try_to_fix_pino(inode);
336                 clear_inode_flag(inode, FI_APPEND_WRITE);
337                 clear_inode_flag(inode, FI_UPDATE_WRITE);
338                 goto out;
339         }
340 sync_nodes:
341         atomic_inc(&sbi->wb_sync_req[NODE]);
342         ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
343         atomic_dec(&sbi->wb_sync_req[NODE]);
344         if (ret)
345                 goto out;
346
347         /* if cp_error was enabled, we should avoid infinite loop */
348         if (unlikely(f2fs_cp_error(sbi))) {
349                 ret = -EIO;
350                 goto out;
351         }
352
353         if (f2fs_need_inode_block_update(sbi, ino)) {
354                 f2fs_mark_inode_dirty_sync(inode, true);
355                 f2fs_write_inode(inode, NULL);
356                 goto sync_nodes;
357         }
358
359         /*
360          * If it's atomic_write, it's just fine to keep write ordering. So
361          * here we don't need to wait for node write completion, since we use
362          * node chain which serializes node blocks. If one of node writes are
363          * reordered, we can see simply broken chain, resulting in stopping
364          * roll-forward recovery. It means we'll recover all or none node blocks
365          * given fsync mark.
366          */
367         if (!atomic) {
368                 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
369                 if (ret)
370                         goto out;
371         }
372
373         /* once recovery info is written, don't need to tack this */
374         f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
375         clear_inode_flag(inode, FI_APPEND_WRITE);
376 flush_out:
377         if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
378                 ret = f2fs_issue_flush(sbi, inode->i_ino);
379         if (!ret) {
380                 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
381                 clear_inode_flag(inode, FI_UPDATE_WRITE);
382                 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
383         }
384         f2fs_update_time(sbi, REQ_TIME);
385 out:
386         trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
387         return ret;
388 }
389
390 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
391 {
392         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
393                 return -EIO;
394         return f2fs_do_sync_file(file, start, end, datasync, false);
395 }
396
397 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
398                                 pgoff_t index, int whence)
399 {
400         switch (whence) {
401         case SEEK_DATA:
402                 if (__is_valid_data_blkaddr(blkaddr))
403                         return true;
404                 if (blkaddr == NEW_ADDR &&
405                     xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
406                         return true;
407                 break;
408         case SEEK_HOLE:
409                 if (blkaddr == NULL_ADDR)
410                         return true;
411                 break;
412         }
413         return false;
414 }
415
416 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
417 {
418         struct inode *inode = file->f_mapping->host;
419         loff_t maxbytes = inode->i_sb->s_maxbytes;
420         struct dnode_of_data dn;
421         pgoff_t pgofs, end_offset;
422         loff_t data_ofs = offset;
423         loff_t isize;
424         int err = 0;
425
426         inode_lock(inode);
427
428         isize = i_size_read(inode);
429         if (offset >= isize)
430                 goto fail;
431
432         /* handle inline data case */
433         if (f2fs_has_inline_data(inode)) {
434                 if (whence == SEEK_HOLE) {
435                         data_ofs = isize;
436                         goto found;
437                 } else if (whence == SEEK_DATA) {
438                         data_ofs = offset;
439                         goto found;
440                 }
441         }
442
443         pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
444
445         for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
446                 set_new_dnode(&dn, inode, NULL, NULL, 0);
447                 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
448                 if (err && err != -ENOENT) {
449                         goto fail;
450                 } else if (err == -ENOENT) {
451                         /* direct node does not exists */
452                         if (whence == SEEK_DATA) {
453                                 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
454                                 continue;
455                         } else {
456                                 goto found;
457                         }
458                 }
459
460                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
461
462                 /* find data/hole in dnode block */
463                 for (; dn.ofs_in_node < end_offset;
464                                 dn.ofs_in_node++, pgofs++,
465                                 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
466                         block_t blkaddr;
467
468                         blkaddr = f2fs_data_blkaddr(&dn);
469
470                         if (__is_valid_data_blkaddr(blkaddr) &&
471                                 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
472                                         blkaddr, DATA_GENERIC_ENHANCE)) {
473                                 f2fs_put_dnode(&dn);
474                                 goto fail;
475                         }
476
477                         if (__found_offset(file->f_mapping, blkaddr,
478                                                         pgofs, whence)) {
479                                 f2fs_put_dnode(&dn);
480                                 goto found;
481                         }
482                 }
483                 f2fs_put_dnode(&dn);
484         }
485
486         if (whence == SEEK_DATA)
487                 goto fail;
488 found:
489         if (whence == SEEK_HOLE && data_ofs > isize)
490                 data_ofs = isize;
491         inode_unlock(inode);
492         return vfs_setpos(file, data_ofs, maxbytes);
493 fail:
494         inode_unlock(inode);
495         return -ENXIO;
496 }
497
498 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
499 {
500         struct inode *inode = file->f_mapping->host;
501         loff_t maxbytes = inode->i_sb->s_maxbytes;
502
503         if (f2fs_compressed_file(inode))
504                 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
505
506         switch (whence) {
507         case SEEK_SET:
508         case SEEK_CUR:
509         case SEEK_END:
510                 return generic_file_llseek_size(file, offset, whence,
511                                                 maxbytes, i_size_read(inode));
512         case SEEK_DATA:
513         case SEEK_HOLE:
514                 if (offset < 0)
515                         return -ENXIO;
516                 return f2fs_seek_block(file, offset, whence);
517         }
518
519         return -EINVAL;
520 }
521
522 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
523 {
524         struct inode *inode = file_inode(file);
525
526         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
527                 return -EIO;
528
529         if (!f2fs_is_compress_backend_ready(inode))
530                 return -EOPNOTSUPP;
531
532         file_accessed(file);
533         vma->vm_ops = &f2fs_file_vm_ops;
534         set_inode_flag(inode, FI_MMAP_FILE);
535         return 0;
536 }
537
538 static int f2fs_file_open(struct inode *inode, struct file *filp)
539 {
540         int err = fscrypt_file_open(inode, filp);
541
542         if (err)
543                 return err;
544
545         if (!f2fs_is_compress_backend_ready(inode))
546                 return -EOPNOTSUPP;
547
548         err = fsverity_file_open(inode, filp);
549         if (err)
550                 return err;
551
552         filp->f_mode |= FMODE_NOWAIT;
553
554         return dquot_file_open(inode, filp);
555 }
556
557 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
558 {
559         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
560         struct f2fs_node *raw_node;
561         int nr_free = 0, ofs = dn->ofs_in_node, len = count;
562         __le32 *addr;
563         int base = 0;
564         bool compressed_cluster = false;
565         int cluster_index = 0, valid_blocks = 0;
566         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
567         bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
568
569         if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
570                 base = get_extra_isize(dn->inode);
571
572         raw_node = F2FS_NODE(dn->node_page);
573         addr = blkaddr_in_node(raw_node) + base + ofs;
574
575         /* Assumption: truncateion starts with cluster */
576         for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
577                 block_t blkaddr = le32_to_cpu(*addr);
578
579                 if (f2fs_compressed_file(dn->inode) &&
580                                         !(cluster_index & (cluster_size - 1))) {
581                         if (compressed_cluster)
582                                 f2fs_i_compr_blocks_update(dn->inode,
583                                                         valid_blocks, false);
584                         compressed_cluster = (blkaddr == COMPRESS_ADDR);
585                         valid_blocks = 0;
586                 }
587
588                 if (blkaddr == NULL_ADDR)
589                         continue;
590
591                 dn->data_blkaddr = NULL_ADDR;
592                 f2fs_set_data_blkaddr(dn);
593
594                 if (__is_valid_data_blkaddr(blkaddr)) {
595                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
596                                         DATA_GENERIC_ENHANCE))
597                                 continue;
598                         if (compressed_cluster)
599                                 valid_blocks++;
600                 }
601
602                 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
603                         clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
604
605                 f2fs_invalidate_blocks(sbi, blkaddr);
606
607                 if (!released || blkaddr != COMPRESS_ADDR)
608                         nr_free++;
609         }
610
611         if (compressed_cluster)
612                 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
613
614         if (nr_free) {
615                 pgoff_t fofs;
616                 /*
617                  * once we invalidate valid blkaddr in range [ofs, ofs + count],
618                  * we will invalidate all blkaddr in the whole range.
619                  */
620                 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
621                                                         dn->inode) + ofs;
622                 f2fs_update_extent_cache_range(dn, fofs, 0, len);
623                 dec_valid_block_count(sbi, dn->inode, nr_free);
624         }
625         dn->ofs_in_node = ofs;
626
627         f2fs_update_time(sbi, REQ_TIME);
628         trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
629                                          dn->ofs_in_node, nr_free);
630 }
631
632 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
633 {
634         f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
635 }
636
637 static int truncate_partial_data_page(struct inode *inode, u64 from,
638                                                                 bool cache_only)
639 {
640         loff_t offset = from & (PAGE_SIZE - 1);
641         pgoff_t index = from >> PAGE_SHIFT;
642         struct address_space *mapping = inode->i_mapping;
643         struct page *page;
644
645         if (!offset && !cache_only)
646                 return 0;
647
648         if (cache_only) {
649                 page = find_lock_page(mapping, index);
650                 if (page && PageUptodate(page))
651                         goto truncate_out;
652                 f2fs_put_page(page, 1);
653                 return 0;
654         }
655
656         page = f2fs_get_lock_data_page(inode, index, true);
657         if (IS_ERR(page))
658                 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
659 truncate_out:
660         f2fs_wait_on_page_writeback(page, DATA, true, true);
661         zero_user(page, offset, PAGE_SIZE - offset);
662
663         /* An encrypted inode should have a key and truncate the last page. */
664         f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
665         if (!cache_only)
666                 set_page_dirty(page);
667         f2fs_put_page(page, 1);
668         return 0;
669 }
670
671 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
672 {
673         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
674         struct dnode_of_data dn;
675         pgoff_t free_from;
676         int count = 0, err = 0;
677         struct page *ipage;
678         bool truncate_page = false;
679
680         trace_f2fs_truncate_blocks_enter(inode, from);
681
682         free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
683
684         if (free_from >= max_file_blocks(inode))
685                 goto free_partial;
686
687         if (lock)
688                 f2fs_lock_op(sbi);
689
690         ipage = f2fs_get_node_page(sbi, inode->i_ino);
691         if (IS_ERR(ipage)) {
692                 err = PTR_ERR(ipage);
693                 goto out;
694         }
695
696         if (f2fs_has_inline_data(inode)) {
697                 f2fs_truncate_inline_inode(inode, ipage, from);
698                 f2fs_put_page(ipage, 1);
699                 truncate_page = true;
700                 goto out;
701         }
702
703         set_new_dnode(&dn, inode, ipage, NULL, 0);
704         err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
705         if (err) {
706                 if (err == -ENOENT)
707                         goto free_next;
708                 goto out;
709         }
710
711         count = ADDRS_PER_PAGE(dn.node_page, inode);
712
713         count -= dn.ofs_in_node;
714         f2fs_bug_on(sbi, count < 0);
715
716         if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
717                 f2fs_truncate_data_blocks_range(&dn, count);
718                 free_from += count;
719         }
720
721         f2fs_put_dnode(&dn);
722 free_next:
723         err = f2fs_truncate_inode_blocks(inode, free_from);
724 out:
725         if (lock)
726                 f2fs_unlock_op(sbi);
727 free_partial:
728         /* lastly zero out the first data page */
729         if (!err)
730                 err = truncate_partial_data_page(inode, from, truncate_page);
731
732         trace_f2fs_truncate_blocks_exit(inode, err);
733         return err;
734 }
735
736 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
737 {
738         u64 free_from = from;
739         int err;
740
741 #ifdef CONFIG_F2FS_FS_COMPRESSION
742         /*
743          * for compressed file, only support cluster size
744          * aligned truncation.
745          */
746         if (f2fs_compressed_file(inode))
747                 free_from = round_up(from,
748                                 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
749 #endif
750
751         err = f2fs_do_truncate_blocks(inode, free_from, lock);
752         if (err)
753                 return err;
754
755 #ifdef CONFIG_F2FS_FS_COMPRESSION
756         /*
757          * For compressed file, after release compress blocks, don't allow write
758          * direct, but we should allow write direct after truncate to zero.
759          */
760         if (f2fs_compressed_file(inode) && !free_from
761                         && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
762                 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
763
764         if (from != free_from) {
765                 err = f2fs_truncate_partial_cluster(inode, from, lock);
766                 if (err)
767                         return err;
768         }
769 #endif
770
771         return 0;
772 }
773
774 int f2fs_truncate(struct inode *inode)
775 {
776         int err;
777
778         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
779                 return -EIO;
780
781         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
782                                 S_ISLNK(inode->i_mode)))
783                 return 0;
784
785         trace_f2fs_truncate(inode);
786
787         if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
788                 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
789                 return -EIO;
790         }
791
792         err = dquot_initialize(inode);
793         if (err)
794                 return err;
795
796         /* we should check inline_data size */
797         if (!f2fs_may_inline_data(inode)) {
798                 err = f2fs_convert_inline_inode(inode);
799                 if (err)
800                         return err;
801         }
802
803         err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
804         if (err)
805                 return err;
806
807         inode->i_mtime = inode->i_ctime = current_time(inode);
808         f2fs_mark_inode_dirty_sync(inode, false);
809         return 0;
810 }
811
812 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
813                  struct kstat *stat, u32 request_mask, unsigned int query_flags)
814 {
815         struct inode *inode = d_inode(path->dentry);
816         struct f2fs_inode_info *fi = F2FS_I(inode);
817         struct f2fs_inode *ri;
818         unsigned int flags;
819
820         if (f2fs_has_extra_attr(inode) &&
821                         f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
822                         F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
823                 stat->result_mask |= STATX_BTIME;
824                 stat->btime.tv_sec = fi->i_crtime.tv_sec;
825                 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
826         }
827
828         flags = fi->i_flags;
829         if (flags & F2FS_COMPR_FL)
830                 stat->attributes |= STATX_ATTR_COMPRESSED;
831         if (flags & F2FS_APPEND_FL)
832                 stat->attributes |= STATX_ATTR_APPEND;
833         if (IS_ENCRYPTED(inode))
834                 stat->attributes |= STATX_ATTR_ENCRYPTED;
835         if (flags & F2FS_IMMUTABLE_FL)
836                 stat->attributes |= STATX_ATTR_IMMUTABLE;
837         if (flags & F2FS_NODUMP_FL)
838                 stat->attributes |= STATX_ATTR_NODUMP;
839         if (IS_VERITY(inode))
840                 stat->attributes |= STATX_ATTR_VERITY;
841
842         stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
843                                   STATX_ATTR_APPEND |
844                                   STATX_ATTR_ENCRYPTED |
845                                   STATX_ATTR_IMMUTABLE |
846                                   STATX_ATTR_NODUMP |
847                                   STATX_ATTR_VERITY);
848
849         generic_fillattr(&init_user_ns, inode, stat);
850
851         /* we need to show initial sectors used for inline_data/dentries */
852         if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
853                                         f2fs_has_inline_dentry(inode))
854                 stat->blocks += (stat->size + 511) >> 9;
855
856         return 0;
857 }
858
859 #ifdef CONFIG_F2FS_FS_POSIX_ACL
860 static void __setattr_copy(struct user_namespace *mnt_userns,
861                            struct inode *inode, const struct iattr *attr)
862 {
863         unsigned int ia_valid = attr->ia_valid;
864
865         if (ia_valid & ATTR_UID)
866                 inode->i_uid = attr->ia_uid;
867         if (ia_valid & ATTR_GID)
868                 inode->i_gid = attr->ia_gid;
869         if (ia_valid & ATTR_ATIME)
870                 inode->i_atime = attr->ia_atime;
871         if (ia_valid & ATTR_MTIME)
872                 inode->i_mtime = attr->ia_mtime;
873         if (ia_valid & ATTR_CTIME)
874                 inode->i_ctime = attr->ia_ctime;
875         if (ia_valid & ATTR_MODE) {
876                 umode_t mode = attr->ia_mode;
877                 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
878
879                 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
880                         mode &= ~S_ISGID;
881                 set_acl_inode(inode, mode);
882         }
883 }
884 #else
885 #define __setattr_copy setattr_copy
886 #endif
887
888 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
889                  struct iattr *attr)
890 {
891         struct inode *inode = d_inode(dentry);
892         int err;
893
894         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
895                 return -EIO;
896
897         if (unlikely(IS_IMMUTABLE(inode)))
898                 return -EPERM;
899
900         if (unlikely(IS_APPEND(inode) &&
901                         (attr->ia_valid & (ATTR_MODE | ATTR_UID |
902                                   ATTR_GID | ATTR_TIMES_SET))))
903                 return -EPERM;
904
905         if ((attr->ia_valid & ATTR_SIZE) &&
906                 !f2fs_is_compress_backend_ready(inode))
907                 return -EOPNOTSUPP;
908
909         err = setattr_prepare(&init_user_ns, dentry, attr);
910         if (err)
911                 return err;
912
913         err = fscrypt_prepare_setattr(dentry, attr);
914         if (err)
915                 return err;
916
917         err = fsverity_prepare_setattr(dentry, attr);
918         if (err)
919                 return err;
920
921         if (is_quota_modification(inode, attr)) {
922                 err = dquot_initialize(inode);
923                 if (err)
924                         return err;
925         }
926         if ((attr->ia_valid & ATTR_UID &&
927                 !uid_eq(attr->ia_uid, inode->i_uid)) ||
928                 (attr->ia_valid & ATTR_GID &&
929                 !gid_eq(attr->ia_gid, inode->i_gid))) {
930                 f2fs_lock_op(F2FS_I_SB(inode));
931                 err = dquot_transfer(inode, attr);
932                 if (err) {
933                         set_sbi_flag(F2FS_I_SB(inode),
934                                         SBI_QUOTA_NEED_REPAIR);
935                         f2fs_unlock_op(F2FS_I_SB(inode));
936                         return err;
937                 }
938                 /*
939                  * update uid/gid under lock_op(), so that dquot and inode can
940                  * be updated atomically.
941                  */
942                 if (attr->ia_valid & ATTR_UID)
943                         inode->i_uid = attr->ia_uid;
944                 if (attr->ia_valid & ATTR_GID)
945                         inode->i_gid = attr->ia_gid;
946                 f2fs_mark_inode_dirty_sync(inode, true);
947                 f2fs_unlock_op(F2FS_I_SB(inode));
948         }
949
950         if (attr->ia_valid & ATTR_SIZE) {
951                 loff_t old_size = i_size_read(inode);
952
953                 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
954                         /*
955                          * should convert inline inode before i_size_write to
956                          * keep smaller than inline_data size with inline flag.
957                          */
958                         err = f2fs_convert_inline_inode(inode);
959                         if (err)
960                                 return err;
961                 }
962
963                 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
964                 down_write(&F2FS_I(inode)->i_mmap_sem);
965
966                 truncate_setsize(inode, attr->ia_size);
967
968                 if (attr->ia_size <= old_size)
969                         err = f2fs_truncate(inode);
970                 /*
971                  * do not trim all blocks after i_size if target size is
972                  * larger than i_size.
973                  */
974                 up_write(&F2FS_I(inode)->i_mmap_sem);
975                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
976                 if (err)
977                         return err;
978
979                 spin_lock(&F2FS_I(inode)->i_size_lock);
980                 inode->i_mtime = inode->i_ctime = current_time(inode);
981                 F2FS_I(inode)->last_disk_size = i_size_read(inode);
982                 spin_unlock(&F2FS_I(inode)->i_size_lock);
983         }
984
985         __setattr_copy(&init_user_ns, inode, attr);
986
987         if (attr->ia_valid & ATTR_MODE) {
988                 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
989
990                 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
991                         if (!err)
992                                 inode->i_mode = F2FS_I(inode)->i_acl_mode;
993                         clear_inode_flag(inode, FI_ACL_MODE);
994                 }
995         }
996
997         /* file size may changed here */
998         f2fs_mark_inode_dirty_sync(inode, true);
999
1000         /* inode change will produce dirty node pages flushed by checkpoint */
1001         f2fs_balance_fs(F2FS_I_SB(inode), true);
1002
1003         return err;
1004 }
1005
1006 const struct inode_operations f2fs_file_inode_operations = {
1007         .getattr        = f2fs_getattr,
1008         .setattr        = f2fs_setattr,
1009         .get_acl        = f2fs_get_acl,
1010         .set_acl        = f2fs_set_acl,
1011         .listxattr      = f2fs_listxattr,
1012         .fiemap         = f2fs_fiemap,
1013         .fileattr_get   = f2fs_fileattr_get,
1014         .fileattr_set   = f2fs_fileattr_set,
1015 };
1016
1017 static int fill_zero(struct inode *inode, pgoff_t index,
1018                                         loff_t start, loff_t len)
1019 {
1020         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1021         struct page *page;
1022
1023         if (!len)
1024                 return 0;
1025
1026         f2fs_balance_fs(sbi, true);
1027
1028         f2fs_lock_op(sbi);
1029         page = f2fs_get_new_data_page(inode, NULL, index, false);
1030         f2fs_unlock_op(sbi);
1031
1032         if (IS_ERR(page))
1033                 return PTR_ERR(page);
1034
1035         f2fs_wait_on_page_writeback(page, DATA, true, true);
1036         zero_user(page, start, len);
1037         set_page_dirty(page);
1038         f2fs_put_page(page, 1);
1039         return 0;
1040 }
1041
1042 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1043 {
1044         int err;
1045
1046         while (pg_start < pg_end) {
1047                 struct dnode_of_data dn;
1048                 pgoff_t end_offset, count;
1049
1050                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1051                 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1052                 if (err) {
1053                         if (err == -ENOENT) {
1054                                 pg_start = f2fs_get_next_page_offset(&dn,
1055                                                                 pg_start);
1056                                 continue;
1057                         }
1058                         return err;
1059                 }
1060
1061                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1062                 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1063
1064                 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1065
1066                 f2fs_truncate_data_blocks_range(&dn, count);
1067                 f2fs_put_dnode(&dn);
1068
1069                 pg_start += count;
1070         }
1071         return 0;
1072 }
1073
1074 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1075 {
1076         pgoff_t pg_start, pg_end;
1077         loff_t off_start, off_end;
1078         int ret;
1079
1080         ret = f2fs_convert_inline_inode(inode);
1081         if (ret)
1082                 return ret;
1083
1084         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1085         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1086
1087         off_start = offset & (PAGE_SIZE - 1);
1088         off_end = (offset + len) & (PAGE_SIZE - 1);
1089
1090         if (pg_start == pg_end) {
1091                 ret = fill_zero(inode, pg_start, off_start,
1092                                                 off_end - off_start);
1093                 if (ret)
1094                         return ret;
1095         } else {
1096                 if (off_start) {
1097                         ret = fill_zero(inode, pg_start++, off_start,
1098                                                 PAGE_SIZE - off_start);
1099                         if (ret)
1100                                 return ret;
1101                 }
1102                 if (off_end) {
1103                         ret = fill_zero(inode, pg_end, 0, off_end);
1104                         if (ret)
1105                                 return ret;
1106                 }
1107
1108                 if (pg_start < pg_end) {
1109                         loff_t blk_start, blk_end;
1110                         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1111
1112                         f2fs_balance_fs(sbi, true);
1113
1114                         blk_start = (loff_t)pg_start << PAGE_SHIFT;
1115                         blk_end = (loff_t)pg_end << PAGE_SHIFT;
1116
1117                         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1118                         down_write(&F2FS_I(inode)->i_mmap_sem);
1119
1120                         truncate_pagecache_range(inode, blk_start, blk_end - 1);
1121
1122                         f2fs_lock_op(sbi);
1123                         ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1124                         f2fs_unlock_op(sbi);
1125
1126                         up_write(&F2FS_I(inode)->i_mmap_sem);
1127                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1128                 }
1129         }
1130
1131         return ret;
1132 }
1133
1134 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1135                                 int *do_replace, pgoff_t off, pgoff_t len)
1136 {
1137         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1138         struct dnode_of_data dn;
1139         int ret, done, i;
1140
1141 next_dnode:
1142         set_new_dnode(&dn, inode, NULL, NULL, 0);
1143         ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1144         if (ret && ret != -ENOENT) {
1145                 return ret;
1146         } else if (ret == -ENOENT) {
1147                 if (dn.max_level == 0)
1148                         return -ENOENT;
1149                 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1150                                                 dn.ofs_in_node, len);
1151                 blkaddr += done;
1152                 do_replace += done;
1153                 goto next;
1154         }
1155
1156         done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1157                                                         dn.ofs_in_node, len);
1158         for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1159                 *blkaddr = f2fs_data_blkaddr(&dn);
1160
1161                 if (__is_valid_data_blkaddr(*blkaddr) &&
1162                         !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1163                                         DATA_GENERIC_ENHANCE)) {
1164                         f2fs_put_dnode(&dn);
1165                         return -EFSCORRUPTED;
1166                 }
1167
1168                 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1169
1170                         if (f2fs_lfs_mode(sbi)) {
1171                                 f2fs_put_dnode(&dn);
1172                                 return -EOPNOTSUPP;
1173                         }
1174
1175                         /* do not invalidate this block address */
1176                         f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1177                         *do_replace = 1;
1178                 }
1179         }
1180         f2fs_put_dnode(&dn);
1181 next:
1182         len -= done;
1183         off += done;
1184         if (len)
1185                 goto next_dnode;
1186         return 0;
1187 }
1188
1189 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1190                                 int *do_replace, pgoff_t off, int len)
1191 {
1192         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1193         struct dnode_of_data dn;
1194         int ret, i;
1195
1196         for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1197                 if (*do_replace == 0)
1198                         continue;
1199
1200                 set_new_dnode(&dn, inode, NULL, NULL, 0);
1201                 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1202                 if (ret) {
1203                         dec_valid_block_count(sbi, inode, 1);
1204                         f2fs_invalidate_blocks(sbi, *blkaddr);
1205                 } else {
1206                         f2fs_update_data_blkaddr(&dn, *blkaddr);
1207                 }
1208                 f2fs_put_dnode(&dn);
1209         }
1210         return 0;
1211 }
1212
1213 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1214                         block_t *blkaddr, int *do_replace,
1215                         pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1216 {
1217         struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1218         pgoff_t i = 0;
1219         int ret;
1220
1221         while (i < len) {
1222                 if (blkaddr[i] == NULL_ADDR && !full) {
1223                         i++;
1224                         continue;
1225                 }
1226
1227                 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1228                         struct dnode_of_data dn;
1229                         struct node_info ni;
1230                         size_t new_size;
1231                         pgoff_t ilen;
1232
1233                         set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1234                         ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1235                         if (ret)
1236                                 return ret;
1237
1238                         ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1239                         if (ret) {
1240                                 f2fs_put_dnode(&dn);
1241                                 return ret;
1242                         }
1243
1244                         ilen = min((pgoff_t)
1245                                 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1246                                                 dn.ofs_in_node, len - i);
1247                         do {
1248                                 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1249                                 f2fs_truncate_data_blocks_range(&dn, 1);
1250
1251                                 if (do_replace[i]) {
1252                                         f2fs_i_blocks_write(src_inode,
1253                                                         1, false, false);
1254                                         f2fs_i_blocks_write(dst_inode,
1255                                                         1, true, false);
1256                                         f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1257                                         blkaddr[i], ni.version, true, false);
1258
1259                                         do_replace[i] = 0;
1260                                 }
1261                                 dn.ofs_in_node++;
1262                                 i++;
1263                                 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1264                                 if (dst_inode->i_size < new_size)
1265                                         f2fs_i_size_write(dst_inode, new_size);
1266                         } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1267
1268                         f2fs_put_dnode(&dn);
1269                 } else {
1270                         struct page *psrc, *pdst;
1271
1272                         psrc = f2fs_get_lock_data_page(src_inode,
1273                                                         src + i, true);
1274                         if (IS_ERR(psrc))
1275                                 return PTR_ERR(psrc);
1276                         pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1277                                                                 true);
1278                         if (IS_ERR(pdst)) {
1279                                 f2fs_put_page(psrc, 1);
1280                                 return PTR_ERR(pdst);
1281                         }
1282                         f2fs_copy_page(psrc, pdst);
1283                         set_page_dirty(pdst);
1284                         f2fs_put_page(pdst, 1);
1285                         f2fs_put_page(psrc, 1);
1286
1287                         ret = f2fs_truncate_hole(src_inode,
1288                                                 src + i, src + i + 1);
1289                         if (ret)
1290                                 return ret;
1291                         i++;
1292                 }
1293         }
1294         return 0;
1295 }
1296
1297 static int __exchange_data_block(struct inode *src_inode,
1298                         struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1299                         pgoff_t len, bool full)
1300 {
1301         block_t *src_blkaddr;
1302         int *do_replace;
1303         pgoff_t olen;
1304         int ret;
1305
1306         while (len) {
1307                 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1308
1309                 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1310                                         array_size(olen, sizeof(block_t)),
1311                                         GFP_NOFS);
1312                 if (!src_blkaddr)
1313                         return -ENOMEM;
1314
1315                 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1316                                         array_size(olen, sizeof(int)),
1317                                         GFP_NOFS);
1318                 if (!do_replace) {
1319                         kvfree(src_blkaddr);
1320                         return -ENOMEM;
1321                 }
1322
1323                 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1324                                         do_replace, src, olen);
1325                 if (ret)
1326                         goto roll_back;
1327
1328                 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1329                                         do_replace, src, dst, olen, full);
1330                 if (ret)
1331                         goto roll_back;
1332
1333                 src += olen;
1334                 dst += olen;
1335                 len -= olen;
1336
1337                 kvfree(src_blkaddr);
1338                 kvfree(do_replace);
1339         }
1340         return 0;
1341
1342 roll_back:
1343         __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1344         kvfree(src_blkaddr);
1345         kvfree(do_replace);
1346         return ret;
1347 }
1348
1349 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1350 {
1351         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1352         pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1353         pgoff_t start = offset >> PAGE_SHIFT;
1354         pgoff_t end = (offset + len) >> PAGE_SHIFT;
1355         int ret;
1356
1357         f2fs_balance_fs(sbi, true);
1358
1359         /* avoid gc operation during block exchange */
1360         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1361         down_write(&F2FS_I(inode)->i_mmap_sem);
1362
1363         f2fs_lock_op(sbi);
1364         f2fs_drop_extent_tree(inode);
1365         truncate_pagecache(inode, offset);
1366         ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1367         f2fs_unlock_op(sbi);
1368
1369         up_write(&F2FS_I(inode)->i_mmap_sem);
1370         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1371         return ret;
1372 }
1373
1374 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1375 {
1376         loff_t new_size;
1377         int ret;
1378
1379         if (offset + len >= i_size_read(inode))
1380                 return -EINVAL;
1381
1382         /* collapse range should be aligned to block size of f2fs. */
1383         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1384                 return -EINVAL;
1385
1386         ret = f2fs_convert_inline_inode(inode);
1387         if (ret)
1388                 return ret;
1389
1390         /* write out all dirty pages from offset */
1391         ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1392         if (ret)
1393                 return ret;
1394
1395         ret = f2fs_do_collapse(inode, offset, len);
1396         if (ret)
1397                 return ret;
1398
1399         /* write out all moved pages, if possible */
1400         down_write(&F2FS_I(inode)->i_mmap_sem);
1401         filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1402         truncate_pagecache(inode, offset);
1403
1404         new_size = i_size_read(inode) - len;
1405         ret = f2fs_truncate_blocks(inode, new_size, true);
1406         up_write(&F2FS_I(inode)->i_mmap_sem);
1407         if (!ret)
1408                 f2fs_i_size_write(inode, new_size);
1409         return ret;
1410 }
1411
1412 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1413                                                                 pgoff_t end)
1414 {
1415         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1416         pgoff_t index = start;
1417         unsigned int ofs_in_node = dn->ofs_in_node;
1418         blkcnt_t count = 0;
1419         int ret;
1420
1421         for (; index < end; index++, dn->ofs_in_node++) {
1422                 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1423                         count++;
1424         }
1425
1426         dn->ofs_in_node = ofs_in_node;
1427         ret = f2fs_reserve_new_blocks(dn, count);
1428         if (ret)
1429                 return ret;
1430
1431         dn->ofs_in_node = ofs_in_node;
1432         for (index = start; index < end; index++, dn->ofs_in_node++) {
1433                 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1434                 /*
1435                  * f2fs_reserve_new_blocks will not guarantee entire block
1436                  * allocation.
1437                  */
1438                 if (dn->data_blkaddr == NULL_ADDR) {
1439                         ret = -ENOSPC;
1440                         break;
1441                 }
1442                 if (dn->data_blkaddr != NEW_ADDR) {
1443                         f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1444                         dn->data_blkaddr = NEW_ADDR;
1445                         f2fs_set_data_blkaddr(dn);
1446                 }
1447         }
1448
1449         f2fs_update_extent_cache_range(dn, start, 0, index - start);
1450
1451         return ret;
1452 }
1453
1454 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1455                                                                 int mode)
1456 {
1457         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1458         struct address_space *mapping = inode->i_mapping;
1459         pgoff_t index, pg_start, pg_end;
1460         loff_t new_size = i_size_read(inode);
1461         loff_t off_start, off_end;
1462         int ret = 0;
1463
1464         ret = inode_newsize_ok(inode, (len + offset));
1465         if (ret)
1466                 return ret;
1467
1468         ret = f2fs_convert_inline_inode(inode);
1469         if (ret)
1470                 return ret;
1471
1472         ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1473         if (ret)
1474                 return ret;
1475
1476         pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1477         pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1478
1479         off_start = offset & (PAGE_SIZE - 1);
1480         off_end = (offset + len) & (PAGE_SIZE - 1);
1481
1482         if (pg_start == pg_end) {
1483                 ret = fill_zero(inode, pg_start, off_start,
1484                                                 off_end - off_start);
1485                 if (ret)
1486                         return ret;
1487
1488                 new_size = max_t(loff_t, new_size, offset + len);
1489         } else {
1490                 if (off_start) {
1491                         ret = fill_zero(inode, pg_start++, off_start,
1492                                                 PAGE_SIZE - off_start);
1493                         if (ret)
1494                                 return ret;
1495
1496                         new_size = max_t(loff_t, new_size,
1497                                         (loff_t)pg_start << PAGE_SHIFT);
1498                 }
1499
1500                 for (index = pg_start; index < pg_end;) {
1501                         struct dnode_of_data dn;
1502                         unsigned int end_offset;
1503                         pgoff_t end;
1504
1505                         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1506                         down_write(&F2FS_I(inode)->i_mmap_sem);
1507
1508                         truncate_pagecache_range(inode,
1509                                 (loff_t)index << PAGE_SHIFT,
1510                                 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1511
1512                         f2fs_lock_op(sbi);
1513
1514                         set_new_dnode(&dn, inode, NULL, NULL, 0);
1515                         ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1516                         if (ret) {
1517                                 f2fs_unlock_op(sbi);
1518                                 up_write(&F2FS_I(inode)->i_mmap_sem);
1519                                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1520                                 goto out;
1521                         }
1522
1523                         end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1524                         end = min(pg_end, end_offset - dn.ofs_in_node + index);
1525
1526                         ret = f2fs_do_zero_range(&dn, index, end);
1527                         f2fs_put_dnode(&dn);
1528
1529                         f2fs_unlock_op(sbi);
1530                         up_write(&F2FS_I(inode)->i_mmap_sem);
1531                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1532
1533                         f2fs_balance_fs(sbi, dn.node_changed);
1534
1535                         if (ret)
1536                                 goto out;
1537
1538                         index = end;
1539                         new_size = max_t(loff_t, new_size,
1540                                         (loff_t)index << PAGE_SHIFT);
1541                 }
1542
1543                 if (off_end) {
1544                         ret = fill_zero(inode, pg_end, 0, off_end);
1545                         if (ret)
1546                                 goto out;
1547
1548                         new_size = max_t(loff_t, new_size, offset + len);
1549                 }
1550         }
1551
1552 out:
1553         if (new_size > i_size_read(inode)) {
1554                 if (mode & FALLOC_FL_KEEP_SIZE)
1555                         file_set_keep_isize(inode);
1556                 else
1557                         f2fs_i_size_write(inode, new_size);
1558         }
1559         return ret;
1560 }
1561
1562 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1563 {
1564         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1565         pgoff_t nr, pg_start, pg_end, delta, idx;
1566         loff_t new_size;
1567         int ret = 0;
1568
1569         new_size = i_size_read(inode) + len;
1570         ret = inode_newsize_ok(inode, new_size);
1571         if (ret)
1572                 return ret;
1573
1574         if (offset >= i_size_read(inode))
1575                 return -EINVAL;
1576
1577         /* insert range should be aligned to block size of f2fs. */
1578         if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1579                 return -EINVAL;
1580
1581         ret = f2fs_convert_inline_inode(inode);
1582         if (ret)
1583                 return ret;
1584
1585         f2fs_balance_fs(sbi, true);
1586
1587         down_write(&F2FS_I(inode)->i_mmap_sem);
1588         ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1589         up_write(&F2FS_I(inode)->i_mmap_sem);
1590         if (ret)
1591                 return ret;
1592
1593         /* write out all dirty pages from offset */
1594         ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1595         if (ret)
1596                 return ret;
1597
1598         pg_start = offset >> PAGE_SHIFT;
1599         pg_end = (offset + len) >> PAGE_SHIFT;
1600         delta = pg_end - pg_start;
1601         idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1602
1603         /* avoid gc operation during block exchange */
1604         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1605         down_write(&F2FS_I(inode)->i_mmap_sem);
1606         truncate_pagecache(inode, offset);
1607
1608         while (!ret && idx > pg_start) {
1609                 nr = idx - pg_start;
1610                 if (nr > delta)
1611                         nr = delta;
1612                 idx -= nr;
1613
1614                 f2fs_lock_op(sbi);
1615                 f2fs_drop_extent_tree(inode);
1616
1617                 ret = __exchange_data_block(inode, inode, idx,
1618                                         idx + delta, nr, false);
1619                 f2fs_unlock_op(sbi);
1620         }
1621         up_write(&F2FS_I(inode)->i_mmap_sem);
1622         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1623
1624         /* write out all moved pages, if possible */
1625         down_write(&F2FS_I(inode)->i_mmap_sem);
1626         filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1627         truncate_pagecache(inode, offset);
1628         up_write(&F2FS_I(inode)->i_mmap_sem);
1629
1630         if (!ret)
1631                 f2fs_i_size_write(inode, new_size);
1632         return ret;
1633 }
1634
1635 static int expand_inode_data(struct inode *inode, loff_t offset,
1636                                         loff_t len, int mode)
1637 {
1638         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1639         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1640                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1641                         .m_may_create = true };
1642         pgoff_t pg_start, pg_end;
1643         loff_t new_size = i_size_read(inode);
1644         loff_t off_end;
1645         block_t expanded = 0;
1646         int err;
1647
1648         err = inode_newsize_ok(inode, (len + offset));
1649         if (err)
1650                 return err;
1651
1652         err = f2fs_convert_inline_inode(inode);
1653         if (err)
1654                 return err;
1655
1656         f2fs_balance_fs(sbi, true);
1657
1658         pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1659         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1660         off_end = (offset + len) & (PAGE_SIZE - 1);
1661
1662         map.m_lblk = pg_start;
1663         map.m_len = pg_end - pg_start;
1664         if (off_end)
1665                 map.m_len++;
1666
1667         if (!map.m_len)
1668                 return 0;
1669
1670         if (f2fs_is_pinned_file(inode)) {
1671                 block_t sec_blks = BLKS_PER_SEC(sbi);
1672                 block_t sec_len = roundup(map.m_len, sec_blks);
1673
1674                 map.m_len = sec_blks;
1675 next_alloc:
1676                 if (has_not_enough_free_secs(sbi, 0,
1677                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1678                         down_write(&sbi->gc_lock);
1679                         err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1680                         if (err && err != -ENODATA && err != -EAGAIN)
1681                                 goto out_err;
1682                 }
1683
1684                 down_write(&sbi->pin_sem);
1685
1686                 f2fs_lock_op(sbi);
1687                 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1688                 f2fs_unlock_op(sbi);
1689
1690                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1691                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1692
1693                 up_write(&sbi->pin_sem);
1694
1695                 expanded += map.m_len;
1696                 sec_len -= map.m_len;
1697                 map.m_lblk += map.m_len;
1698                 if (!err && sec_len)
1699                         goto next_alloc;
1700
1701                 map.m_len = expanded;
1702         } else {
1703                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1704                 expanded = map.m_len;
1705         }
1706 out_err:
1707         if (err) {
1708                 pgoff_t last_off;
1709
1710                 if (!expanded)
1711                         return err;
1712
1713                 last_off = pg_start + expanded - 1;
1714
1715                 /* update new size to the failed position */
1716                 new_size = (last_off == pg_end) ? offset + len :
1717                                         (loff_t)(last_off + 1) << PAGE_SHIFT;
1718         } else {
1719                 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1720         }
1721
1722         if (new_size > i_size_read(inode)) {
1723                 if (mode & FALLOC_FL_KEEP_SIZE)
1724                         file_set_keep_isize(inode);
1725                 else
1726                         f2fs_i_size_write(inode, new_size);
1727         }
1728
1729         return err;
1730 }
1731
1732 static long f2fs_fallocate(struct file *file, int mode,
1733                                 loff_t offset, loff_t len)
1734 {
1735         struct inode *inode = file_inode(file);
1736         long ret = 0;
1737
1738         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1739                 return -EIO;
1740         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1741                 return -ENOSPC;
1742         if (!f2fs_is_compress_backend_ready(inode))
1743                 return -EOPNOTSUPP;
1744
1745         /* f2fs only support ->fallocate for regular file */
1746         if (!S_ISREG(inode->i_mode))
1747                 return -EINVAL;
1748
1749         if (IS_ENCRYPTED(inode) &&
1750                 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1751                 return -EOPNOTSUPP;
1752
1753         if (f2fs_compressed_file(inode) &&
1754                 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1755                         FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1756                 return -EOPNOTSUPP;
1757
1758         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1759                         FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1760                         FALLOC_FL_INSERT_RANGE))
1761                 return -EOPNOTSUPP;
1762
1763         inode_lock(inode);
1764
1765         if (mode & FALLOC_FL_PUNCH_HOLE) {
1766                 if (offset >= inode->i_size)
1767                         goto out;
1768
1769                 ret = punch_hole(inode, offset, len);
1770         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1771                 ret = f2fs_collapse_range(inode, offset, len);
1772         } else if (mode & FALLOC_FL_ZERO_RANGE) {
1773                 ret = f2fs_zero_range(inode, offset, len, mode);
1774         } else if (mode & FALLOC_FL_INSERT_RANGE) {
1775                 ret = f2fs_insert_range(inode, offset, len);
1776         } else {
1777                 ret = expand_inode_data(inode, offset, len, mode);
1778         }
1779
1780         if (!ret) {
1781                 inode->i_mtime = inode->i_ctime = current_time(inode);
1782                 f2fs_mark_inode_dirty_sync(inode, false);
1783                 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1784         }
1785
1786 out:
1787         inode_unlock(inode);
1788
1789         trace_f2fs_fallocate(inode, mode, offset, len, ret);
1790         return ret;
1791 }
1792
1793 static int f2fs_release_file(struct inode *inode, struct file *filp)
1794 {
1795         /*
1796          * f2fs_relase_file is called at every close calls. So we should
1797          * not drop any inmemory pages by close called by other process.
1798          */
1799         if (!(filp->f_mode & FMODE_WRITE) ||
1800                         atomic_read(&inode->i_writecount) != 1)
1801                 return 0;
1802
1803         /* some remained atomic pages should discarded */
1804         if (f2fs_is_atomic_file(inode))
1805                 f2fs_drop_inmem_pages(inode);
1806         if (f2fs_is_volatile_file(inode)) {
1807                 set_inode_flag(inode, FI_DROP_CACHE);
1808                 filemap_fdatawrite(inode->i_mapping);
1809                 clear_inode_flag(inode, FI_DROP_CACHE);
1810                 clear_inode_flag(inode, FI_VOLATILE_FILE);
1811                 stat_dec_volatile_write(inode);
1812         }
1813         return 0;
1814 }
1815
1816 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1817 {
1818         struct inode *inode = file_inode(file);
1819
1820         /*
1821          * If the process doing a transaction is crashed, we should do
1822          * roll-back. Otherwise, other reader/write can see corrupted database
1823          * until all the writers close its file. Since this should be done
1824          * before dropping file lock, it needs to do in ->flush.
1825          */
1826         if (f2fs_is_atomic_file(inode) &&
1827                         F2FS_I(inode)->inmem_task == current)
1828                 f2fs_drop_inmem_pages(inode);
1829         return 0;
1830 }
1831
1832 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1833 {
1834         struct f2fs_inode_info *fi = F2FS_I(inode);
1835         u32 masked_flags = fi->i_flags & mask;
1836
1837         /* mask can be shrunk by flags_valid selector */
1838         iflags &= mask;
1839
1840         /* Is it quota file? Do not allow user to mess with it */
1841         if (IS_NOQUOTA(inode))
1842                 return -EPERM;
1843
1844         if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1845                 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1846                         return -EOPNOTSUPP;
1847                 if (!f2fs_empty_dir(inode))
1848                         return -ENOTEMPTY;
1849         }
1850
1851         if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1852                 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1853                         return -EOPNOTSUPP;
1854                 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1855                         return -EINVAL;
1856         }
1857
1858         if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1859                 if (masked_flags & F2FS_COMPR_FL) {
1860                         if (!f2fs_disable_compressed_file(inode))
1861                                 return -EINVAL;
1862                 }
1863                 if (iflags & F2FS_NOCOMP_FL)
1864                         return -EINVAL;
1865                 if (iflags & F2FS_COMPR_FL) {
1866                         if (!f2fs_may_compress(inode))
1867                                 return -EINVAL;
1868                         if (S_ISREG(inode->i_mode) && inode->i_size)
1869                                 return -EINVAL;
1870
1871                         set_compress_context(inode);
1872                 }
1873         }
1874         if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1875                 if (masked_flags & F2FS_COMPR_FL)
1876                         return -EINVAL;
1877         }
1878
1879         fi->i_flags = iflags | (fi->i_flags & ~mask);
1880         f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1881                                         (fi->i_flags & F2FS_NOCOMP_FL));
1882
1883         if (fi->i_flags & F2FS_PROJINHERIT_FL)
1884                 set_inode_flag(inode, FI_PROJ_INHERIT);
1885         else
1886                 clear_inode_flag(inode, FI_PROJ_INHERIT);
1887
1888         inode->i_ctime = current_time(inode);
1889         f2fs_set_inode_flags(inode);
1890         f2fs_mark_inode_dirty_sync(inode, true);
1891         return 0;
1892 }
1893
1894 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1895
1896 /*
1897  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1898  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1899  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1900  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1901  *
1902  * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1903  * FS_IOC_FSSETXATTR is done by the VFS.
1904  */
1905
1906 static const struct {
1907         u32 iflag;
1908         u32 fsflag;
1909 } f2fs_fsflags_map[] = {
1910         { F2FS_COMPR_FL,        FS_COMPR_FL },
1911         { F2FS_SYNC_FL,         FS_SYNC_FL },
1912         { F2FS_IMMUTABLE_FL,    FS_IMMUTABLE_FL },
1913         { F2FS_APPEND_FL,       FS_APPEND_FL },
1914         { F2FS_NODUMP_FL,       FS_NODUMP_FL },
1915         { F2FS_NOATIME_FL,      FS_NOATIME_FL },
1916         { F2FS_NOCOMP_FL,       FS_NOCOMP_FL },
1917         { F2FS_INDEX_FL,        FS_INDEX_FL },
1918         { F2FS_DIRSYNC_FL,      FS_DIRSYNC_FL },
1919         { F2FS_PROJINHERIT_FL,  FS_PROJINHERIT_FL },
1920         { F2FS_CASEFOLD_FL,     FS_CASEFOLD_FL },
1921 };
1922
1923 #define F2FS_GETTABLE_FS_FL (           \
1924                 FS_COMPR_FL |           \
1925                 FS_SYNC_FL |            \
1926                 FS_IMMUTABLE_FL |       \
1927                 FS_APPEND_FL |          \
1928                 FS_NODUMP_FL |          \
1929                 FS_NOATIME_FL |         \
1930                 FS_NOCOMP_FL |          \
1931                 FS_INDEX_FL |           \
1932                 FS_DIRSYNC_FL |         \
1933                 FS_PROJINHERIT_FL |     \
1934                 FS_ENCRYPT_FL |         \
1935                 FS_INLINE_DATA_FL |     \
1936                 FS_NOCOW_FL |           \
1937                 FS_VERITY_FL |          \
1938                 FS_CASEFOLD_FL)
1939
1940 #define F2FS_SETTABLE_FS_FL (           \
1941                 FS_COMPR_FL |           \
1942                 FS_SYNC_FL |            \
1943                 FS_IMMUTABLE_FL |       \
1944                 FS_APPEND_FL |          \
1945                 FS_NODUMP_FL |          \
1946                 FS_NOATIME_FL |         \
1947                 FS_NOCOMP_FL |          \
1948                 FS_DIRSYNC_FL |         \
1949                 FS_PROJINHERIT_FL |     \
1950                 FS_CASEFOLD_FL)
1951
1952 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1953 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1954 {
1955         u32 fsflags = 0;
1956         int i;
1957
1958         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1959                 if (iflags & f2fs_fsflags_map[i].iflag)
1960                         fsflags |= f2fs_fsflags_map[i].fsflag;
1961
1962         return fsflags;
1963 }
1964
1965 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1966 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1967 {
1968         u32 iflags = 0;
1969         int i;
1970
1971         for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1972                 if (fsflags & f2fs_fsflags_map[i].fsflag)
1973                         iflags |= f2fs_fsflags_map[i].iflag;
1974
1975         return iflags;
1976 }
1977
1978 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1979 {
1980         struct inode *inode = file_inode(filp);
1981
1982         return put_user(inode->i_generation, (int __user *)arg);
1983 }
1984
1985 static int f2fs_ioc_start_atomic_write(struct file *filp)
1986 {
1987         struct inode *inode = file_inode(filp);
1988         struct f2fs_inode_info *fi = F2FS_I(inode);
1989         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1990         int ret;
1991
1992         if (!inode_owner_or_capable(&init_user_ns, inode))
1993                 return -EACCES;
1994
1995         if (!S_ISREG(inode->i_mode))
1996                 return -EINVAL;
1997
1998         if (filp->f_flags & O_DIRECT)
1999                 return -EINVAL;
2000
2001         ret = mnt_want_write_file(filp);
2002         if (ret)
2003                 return ret;
2004
2005         inode_lock(inode);
2006
2007         f2fs_disable_compressed_file(inode);
2008
2009         if (f2fs_is_atomic_file(inode)) {
2010                 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2011                         ret = -EINVAL;
2012                 goto out;
2013         }
2014
2015         ret = f2fs_convert_inline_inode(inode);
2016         if (ret)
2017                 goto out;
2018
2019         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2020
2021         /*
2022          * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2023          * f2fs_is_atomic_file.
2024          */
2025         if (get_dirty_pages(inode))
2026                 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2027                           inode->i_ino, get_dirty_pages(inode));
2028         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2029         if (ret) {
2030                 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2031                 goto out;
2032         }
2033
2034         spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2035         if (list_empty(&fi->inmem_ilist))
2036                 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2037         sbi->atomic_files++;
2038         spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2039
2040         /* add inode in inmem_list first and set atomic_file */
2041         set_inode_flag(inode, FI_ATOMIC_FILE);
2042         clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2043         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2044
2045         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2046         F2FS_I(inode)->inmem_task = current;
2047         stat_update_max_atomic_write(inode);
2048 out:
2049         inode_unlock(inode);
2050         mnt_drop_write_file(filp);
2051         return ret;
2052 }
2053
2054 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2055 {
2056         struct inode *inode = file_inode(filp);
2057         int ret;
2058
2059         if (!inode_owner_or_capable(&init_user_ns, inode))
2060                 return -EACCES;
2061
2062         ret = mnt_want_write_file(filp);
2063         if (ret)
2064                 return ret;
2065
2066         f2fs_balance_fs(F2FS_I_SB(inode), true);
2067
2068         inode_lock(inode);
2069
2070         if (f2fs_is_volatile_file(inode)) {
2071                 ret = -EINVAL;
2072                 goto err_out;
2073         }
2074
2075         if (f2fs_is_atomic_file(inode)) {
2076                 ret = f2fs_commit_inmem_pages(inode);
2077                 if (ret)
2078                         goto err_out;
2079
2080                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2081                 if (!ret)
2082                         f2fs_drop_inmem_pages(inode);
2083         } else {
2084                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2085         }
2086 err_out:
2087         if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2088                 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2089                 ret = -EINVAL;
2090         }
2091         inode_unlock(inode);
2092         mnt_drop_write_file(filp);
2093         return ret;
2094 }
2095
2096 static int f2fs_ioc_start_volatile_write(struct file *filp)
2097 {
2098         struct inode *inode = file_inode(filp);
2099         int ret;
2100
2101         if (!inode_owner_or_capable(&init_user_ns, inode))
2102                 return -EACCES;
2103
2104         if (!S_ISREG(inode->i_mode))
2105                 return -EINVAL;
2106
2107         ret = mnt_want_write_file(filp);
2108         if (ret)
2109                 return ret;
2110
2111         inode_lock(inode);
2112
2113         if (f2fs_is_volatile_file(inode))
2114                 goto out;
2115
2116         ret = f2fs_convert_inline_inode(inode);
2117         if (ret)
2118                 goto out;
2119
2120         stat_inc_volatile_write(inode);
2121         stat_update_max_volatile_write(inode);
2122
2123         set_inode_flag(inode, FI_VOLATILE_FILE);
2124         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2125 out:
2126         inode_unlock(inode);
2127         mnt_drop_write_file(filp);
2128         return ret;
2129 }
2130
2131 static int f2fs_ioc_release_volatile_write(struct file *filp)
2132 {
2133         struct inode *inode = file_inode(filp);
2134         int ret;
2135
2136         if (!inode_owner_or_capable(&init_user_ns, inode))
2137                 return -EACCES;
2138
2139         ret = mnt_want_write_file(filp);
2140         if (ret)
2141                 return ret;
2142
2143         inode_lock(inode);
2144
2145         if (!f2fs_is_volatile_file(inode))
2146                 goto out;
2147
2148         if (!f2fs_is_first_block_written(inode)) {
2149                 ret = truncate_partial_data_page(inode, 0, true);
2150                 goto out;
2151         }
2152
2153         ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2154 out:
2155         inode_unlock(inode);
2156         mnt_drop_write_file(filp);
2157         return ret;
2158 }
2159
2160 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2161 {
2162         struct inode *inode = file_inode(filp);
2163         int ret;
2164
2165         if (!inode_owner_or_capable(&init_user_ns, inode))
2166                 return -EACCES;
2167
2168         ret = mnt_want_write_file(filp);
2169         if (ret)
2170                 return ret;
2171
2172         inode_lock(inode);
2173
2174         if (f2fs_is_atomic_file(inode))
2175                 f2fs_drop_inmem_pages(inode);
2176         if (f2fs_is_volatile_file(inode)) {
2177                 clear_inode_flag(inode, FI_VOLATILE_FILE);
2178                 stat_dec_volatile_write(inode);
2179                 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2180         }
2181
2182         clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2183
2184         inode_unlock(inode);
2185
2186         mnt_drop_write_file(filp);
2187         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2188         return ret;
2189 }
2190
2191 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2192 {
2193         struct inode *inode = file_inode(filp);
2194         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2195         struct super_block *sb = sbi->sb;
2196         __u32 in;
2197         int ret = 0;
2198
2199         if (!capable(CAP_SYS_ADMIN))
2200                 return -EPERM;
2201
2202         if (get_user(in, (__u32 __user *)arg))
2203                 return -EFAULT;
2204
2205         if (in != F2FS_GOING_DOWN_FULLSYNC) {
2206                 ret = mnt_want_write_file(filp);
2207                 if (ret) {
2208                         if (ret == -EROFS) {
2209                                 ret = 0;
2210                                 f2fs_stop_checkpoint(sbi, false);
2211                                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2212                                 trace_f2fs_shutdown(sbi, in, ret);
2213                         }
2214                         return ret;
2215                 }
2216         }
2217
2218         switch (in) {
2219         case F2FS_GOING_DOWN_FULLSYNC:
2220                 ret = freeze_bdev(sb->s_bdev);
2221                 if (ret)
2222                         goto out;
2223                 f2fs_stop_checkpoint(sbi, false);
2224                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2225                 thaw_bdev(sb->s_bdev);
2226                 break;
2227         case F2FS_GOING_DOWN_METASYNC:
2228                 /* do checkpoint only */
2229                 ret = f2fs_sync_fs(sb, 1);
2230                 if (ret)
2231                         goto out;
2232                 f2fs_stop_checkpoint(sbi, false);
2233                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2234                 break;
2235         case F2FS_GOING_DOWN_NOSYNC:
2236                 f2fs_stop_checkpoint(sbi, false);
2237                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2238                 break;
2239         case F2FS_GOING_DOWN_METAFLUSH:
2240                 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2241                 f2fs_stop_checkpoint(sbi, false);
2242                 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2243                 break;
2244         case F2FS_GOING_DOWN_NEED_FSCK:
2245                 set_sbi_flag(sbi, SBI_NEED_FSCK);
2246                 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2247                 set_sbi_flag(sbi, SBI_IS_DIRTY);
2248                 /* do checkpoint only */
2249                 ret = f2fs_sync_fs(sb, 1);
2250                 goto out;
2251         default:
2252                 ret = -EINVAL;
2253                 goto out;
2254         }
2255
2256         f2fs_stop_gc_thread(sbi);
2257         f2fs_stop_discard_thread(sbi);
2258
2259         f2fs_drop_discard_cmd(sbi);
2260         clear_opt(sbi, DISCARD);
2261
2262         f2fs_update_time(sbi, REQ_TIME);
2263 out:
2264         if (in != F2FS_GOING_DOWN_FULLSYNC)
2265                 mnt_drop_write_file(filp);
2266
2267         trace_f2fs_shutdown(sbi, in, ret);
2268
2269         return ret;
2270 }
2271
2272 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2273 {
2274         struct inode *inode = file_inode(filp);
2275         struct super_block *sb = inode->i_sb;
2276         struct request_queue *q = bdev_get_queue(sb->s_bdev);
2277         struct fstrim_range range;
2278         int ret;
2279
2280         if (!capable(CAP_SYS_ADMIN))
2281                 return -EPERM;
2282
2283         if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2284                 return -EOPNOTSUPP;
2285
2286         if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2287                                 sizeof(range)))
2288                 return -EFAULT;
2289
2290         ret = mnt_want_write_file(filp);
2291         if (ret)
2292                 return ret;
2293
2294         range.minlen = max((unsigned int)range.minlen,
2295                                 q->limits.discard_granularity);
2296         ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2297         mnt_drop_write_file(filp);
2298         if (ret < 0)
2299                 return ret;
2300
2301         if (copy_to_user((struct fstrim_range __user *)arg, &range,
2302                                 sizeof(range)))
2303                 return -EFAULT;
2304         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2305         return 0;
2306 }
2307
2308 static bool uuid_is_nonzero(__u8 u[16])
2309 {
2310         int i;
2311
2312         for (i = 0; i < 16; i++)
2313                 if (u[i])
2314                         return true;
2315         return false;
2316 }
2317
2318 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2319 {
2320         struct inode *inode = file_inode(filp);
2321
2322         if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2323                 return -EOPNOTSUPP;
2324
2325         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2326
2327         return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2328 }
2329
2330 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2331 {
2332         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2333                 return -EOPNOTSUPP;
2334         return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2335 }
2336
2337 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2338 {
2339         struct inode *inode = file_inode(filp);
2340         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2341         int err;
2342
2343         if (!f2fs_sb_has_encrypt(sbi))
2344                 return -EOPNOTSUPP;
2345
2346         err = mnt_want_write_file(filp);
2347         if (err)
2348                 return err;
2349
2350         down_write(&sbi->sb_lock);
2351
2352         if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2353                 goto got_it;
2354
2355         /* update superblock with uuid */
2356         generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2357
2358         err = f2fs_commit_super(sbi, false);
2359         if (err) {
2360                 /* undo new data */
2361                 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2362                 goto out_err;
2363         }
2364 got_it:
2365         if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2366                                                                         16))
2367                 err = -EFAULT;
2368 out_err:
2369         up_write(&sbi->sb_lock);
2370         mnt_drop_write_file(filp);
2371         return err;
2372 }
2373
2374 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2375                                              unsigned long arg)
2376 {
2377         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2378                 return -EOPNOTSUPP;
2379
2380         return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2381 }
2382
2383 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2384 {
2385         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2386                 return -EOPNOTSUPP;
2387
2388         return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2389 }
2390
2391 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2392 {
2393         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2394                 return -EOPNOTSUPP;
2395
2396         return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2397 }
2398
2399 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2400                                                     unsigned long arg)
2401 {
2402         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2403                 return -EOPNOTSUPP;
2404
2405         return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2406 }
2407
2408 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2409                                               unsigned long arg)
2410 {
2411         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2412                 return -EOPNOTSUPP;
2413
2414         return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2415 }
2416
2417 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2418 {
2419         if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2420                 return -EOPNOTSUPP;
2421
2422         return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2423 }
2424
2425 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2426 {
2427         struct inode *inode = file_inode(filp);
2428         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2429         __u32 sync;
2430         int ret;
2431
2432         if (!capable(CAP_SYS_ADMIN))
2433                 return -EPERM;
2434
2435         if (get_user(sync, (__u32 __user *)arg))
2436                 return -EFAULT;
2437
2438         if (f2fs_readonly(sbi->sb))
2439                 return -EROFS;
2440
2441         ret = mnt_want_write_file(filp);
2442         if (ret)
2443                 return ret;
2444
2445         if (!sync) {
2446                 if (!down_write_trylock(&sbi->gc_lock)) {
2447                         ret = -EBUSY;
2448                         goto out;
2449                 }
2450         } else {
2451                 down_write(&sbi->gc_lock);
2452         }
2453
2454         ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2455 out:
2456         mnt_drop_write_file(filp);
2457         return ret;
2458 }
2459
2460 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2461 {
2462         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2463         u64 end;
2464         int ret;
2465
2466         if (!capable(CAP_SYS_ADMIN))
2467                 return -EPERM;
2468         if (f2fs_readonly(sbi->sb))
2469                 return -EROFS;
2470
2471         end = range->start + range->len;
2472         if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2473                                         end >= MAX_BLKADDR(sbi))
2474                 return -EINVAL;
2475
2476         ret = mnt_want_write_file(filp);
2477         if (ret)
2478                 return ret;
2479
2480 do_more:
2481         if (!range->sync) {
2482                 if (!down_write_trylock(&sbi->gc_lock)) {
2483                         ret = -EBUSY;
2484                         goto out;
2485                 }
2486         } else {
2487                 down_write(&sbi->gc_lock);
2488         }
2489
2490         ret = f2fs_gc(sbi, range->sync, true, false,
2491                                 GET_SEGNO(sbi, range->start));
2492         if (ret) {
2493                 if (ret == -EBUSY)
2494                         ret = -EAGAIN;
2495                 goto out;
2496         }
2497         range->start += BLKS_PER_SEC(sbi);
2498         if (range->start <= end)
2499                 goto do_more;
2500 out:
2501         mnt_drop_write_file(filp);
2502         return ret;
2503 }
2504
2505 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2506 {
2507         struct f2fs_gc_range range;
2508
2509         if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2510                                                         sizeof(range)))
2511                 return -EFAULT;
2512         return __f2fs_ioc_gc_range(filp, &range);
2513 }
2514
2515 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2516 {
2517         struct inode *inode = file_inode(filp);
2518         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2519         int ret;
2520
2521         if (!capable(CAP_SYS_ADMIN))
2522                 return -EPERM;
2523
2524         if (f2fs_readonly(sbi->sb))
2525                 return -EROFS;
2526
2527         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2528                 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2529                 return -EINVAL;
2530         }
2531
2532         ret = mnt_want_write_file(filp);
2533         if (ret)
2534                 return ret;
2535
2536         ret = f2fs_sync_fs(sbi->sb, 1);
2537
2538         mnt_drop_write_file(filp);
2539         return ret;
2540 }
2541
2542 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2543                                         struct file *filp,
2544                                         struct f2fs_defragment *range)
2545 {
2546         struct inode *inode = file_inode(filp);
2547         struct f2fs_map_blocks map = { .m_next_extent = NULL,
2548                                         .m_seg_type = NO_CHECK_TYPE,
2549                                         .m_may_create = false };
2550         struct extent_info ei = {0, 0, 0};
2551         pgoff_t pg_start, pg_end, next_pgofs;
2552         unsigned int blk_per_seg = sbi->blocks_per_seg;
2553         unsigned int total = 0, sec_num;
2554         block_t blk_end = 0;
2555         bool fragmented = false;
2556         int err;
2557
2558         /* if in-place-update policy is enabled, don't waste time here */
2559         if (f2fs_should_update_inplace(inode, NULL))
2560                 return -EINVAL;
2561
2562         pg_start = range->start >> PAGE_SHIFT;
2563         pg_end = (range->start + range->len) >> PAGE_SHIFT;
2564
2565         f2fs_balance_fs(sbi, true);
2566
2567         inode_lock(inode);
2568
2569         /* writeback all dirty pages in the range */
2570         err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2571                                                 range->start + range->len - 1);
2572         if (err)
2573                 goto out;
2574
2575         /*
2576          * lookup mapping info in extent cache, skip defragmenting if physical
2577          * block addresses are continuous.
2578          */
2579         if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2580                 if (ei.fofs + ei.len >= pg_end)
2581                         goto out;
2582         }
2583
2584         map.m_lblk = pg_start;
2585         map.m_next_pgofs = &next_pgofs;
2586
2587         /*
2588          * lookup mapping info in dnode page cache, skip defragmenting if all
2589          * physical block addresses are continuous even if there are hole(s)
2590          * in logical blocks.
2591          */
2592         while (map.m_lblk < pg_end) {
2593                 map.m_len = pg_end - map.m_lblk;
2594                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2595                 if (err)
2596                         goto out;
2597
2598                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2599                         map.m_lblk = next_pgofs;
2600                         continue;
2601                 }
2602
2603                 if (blk_end && blk_end != map.m_pblk)
2604                         fragmented = true;
2605
2606                 /* record total count of block that we're going to move */
2607                 total += map.m_len;
2608
2609                 blk_end = map.m_pblk + map.m_len;
2610
2611                 map.m_lblk += map.m_len;
2612         }
2613
2614         if (!fragmented) {
2615                 total = 0;
2616                 goto out;
2617         }
2618
2619         sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2620
2621         /*
2622          * make sure there are enough free section for LFS allocation, this can
2623          * avoid defragment running in SSR mode when free section are allocated
2624          * intensively
2625          */
2626         if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2627                 err = -EAGAIN;
2628                 goto out;
2629         }
2630
2631         map.m_lblk = pg_start;
2632         map.m_len = pg_end - pg_start;
2633         total = 0;
2634
2635         while (map.m_lblk < pg_end) {
2636                 pgoff_t idx;
2637                 int cnt = 0;
2638
2639 do_map:
2640                 map.m_len = pg_end - map.m_lblk;
2641                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2642                 if (err)
2643                         goto clear_out;
2644
2645                 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2646                         map.m_lblk = next_pgofs;
2647                         goto check;
2648                 }
2649
2650                 set_inode_flag(inode, FI_DO_DEFRAG);
2651
2652                 idx = map.m_lblk;
2653                 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2654                         struct page *page;
2655
2656                         page = f2fs_get_lock_data_page(inode, idx, true);
2657                         if (IS_ERR(page)) {
2658                                 err = PTR_ERR(page);
2659                                 goto clear_out;
2660                         }
2661
2662                         set_page_dirty(page);
2663                         f2fs_put_page(page, 1);
2664
2665                         idx++;
2666                         cnt++;
2667                         total++;
2668                 }
2669
2670                 map.m_lblk = idx;
2671 check:
2672                 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2673                         goto do_map;
2674
2675                 clear_inode_flag(inode, FI_DO_DEFRAG);
2676
2677                 err = filemap_fdatawrite(inode->i_mapping);
2678                 if (err)
2679                         goto out;
2680         }
2681 clear_out:
2682         clear_inode_flag(inode, FI_DO_DEFRAG);
2683 out:
2684         inode_unlock(inode);
2685         if (!err)
2686                 range->len = (u64)total << PAGE_SHIFT;
2687         return err;
2688 }
2689
2690 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2691 {
2692         struct inode *inode = file_inode(filp);
2693         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2694         struct f2fs_defragment range;
2695         int err;
2696
2697         if (!capable(CAP_SYS_ADMIN))
2698                 return -EPERM;
2699
2700         if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2701                 return -EINVAL;
2702
2703         if (f2fs_readonly(sbi->sb))
2704                 return -EROFS;
2705
2706         if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2707                                                         sizeof(range)))
2708                 return -EFAULT;
2709
2710         /* verify alignment of offset & size */
2711         if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2712                 return -EINVAL;
2713
2714         if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2715                                         max_file_blocks(inode)))
2716                 return -EINVAL;
2717
2718         err = mnt_want_write_file(filp);
2719         if (err)
2720                 return err;
2721
2722         err = f2fs_defragment_range(sbi, filp, &range);
2723         mnt_drop_write_file(filp);
2724
2725         f2fs_update_time(sbi, REQ_TIME);
2726         if (err < 0)
2727                 return err;
2728
2729         if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2730                                                         sizeof(range)))
2731                 return -EFAULT;
2732
2733         return 0;
2734 }
2735
2736 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2737                         struct file *file_out, loff_t pos_out, size_t len)
2738 {
2739         struct inode *src = file_inode(file_in);
2740         struct inode *dst = file_inode(file_out);
2741         struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2742         size_t olen = len, dst_max_i_size = 0;
2743         size_t dst_osize;
2744         int ret;
2745
2746         if (file_in->f_path.mnt != file_out->f_path.mnt ||
2747                                 src->i_sb != dst->i_sb)
2748                 return -EXDEV;
2749
2750         if (unlikely(f2fs_readonly(src->i_sb)))
2751                 return -EROFS;
2752
2753         if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2754                 return -EINVAL;
2755
2756         if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2757                 return -EOPNOTSUPP;
2758
2759         if (pos_out < 0 || pos_in < 0)
2760                 return -EINVAL;
2761
2762         if (src == dst) {
2763                 if (pos_in == pos_out)
2764                         return 0;
2765                 if (pos_out > pos_in && pos_out < pos_in + len)
2766                         return -EINVAL;
2767         }
2768
2769         inode_lock(src);
2770         if (src != dst) {
2771                 ret = -EBUSY;
2772                 if (!inode_trylock(dst))
2773                         goto out;
2774         }
2775
2776         ret = -EINVAL;
2777         if (pos_in + len > src->i_size || pos_in + len < pos_in)
2778                 goto out_unlock;
2779         if (len == 0)
2780                 olen = len = src->i_size - pos_in;
2781         if (pos_in + len == src->i_size)
2782                 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2783         if (len == 0) {
2784                 ret = 0;
2785                 goto out_unlock;
2786         }
2787
2788         dst_osize = dst->i_size;
2789         if (pos_out + olen > dst->i_size)
2790                 dst_max_i_size = pos_out + olen;
2791
2792         /* verify the end result is block aligned */
2793         if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2794                         !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2795                         !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2796                 goto out_unlock;
2797
2798         ret = f2fs_convert_inline_inode(src);
2799         if (ret)
2800                 goto out_unlock;
2801
2802         ret = f2fs_convert_inline_inode(dst);
2803         if (ret)
2804                 goto out_unlock;
2805
2806         /* write out all dirty pages from offset */
2807         ret = filemap_write_and_wait_range(src->i_mapping,
2808                                         pos_in, pos_in + len);
2809         if (ret)
2810                 goto out_unlock;
2811
2812         ret = filemap_write_and_wait_range(dst->i_mapping,
2813                                         pos_out, pos_out + len);
2814         if (ret)
2815                 goto out_unlock;
2816
2817         f2fs_balance_fs(sbi, true);
2818
2819         down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2820         if (src != dst) {
2821                 ret = -EBUSY;
2822                 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2823                         goto out_src;
2824         }
2825
2826         f2fs_lock_op(sbi);
2827         ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2828                                 pos_out >> F2FS_BLKSIZE_BITS,
2829                                 len >> F2FS_BLKSIZE_BITS, false);
2830
2831         if (!ret) {
2832                 if (dst_max_i_size)
2833                         f2fs_i_size_write(dst, dst_max_i_size);
2834                 else if (dst_osize != dst->i_size)
2835                         f2fs_i_size_write(dst, dst_osize);
2836         }
2837         f2fs_unlock_op(sbi);
2838
2839         if (src != dst)
2840                 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2841 out_src:
2842         up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2843 out_unlock:
2844         if (src != dst)
2845                 inode_unlock(dst);
2846 out:
2847         inode_unlock(src);
2848         return ret;
2849 }
2850
2851 static int __f2fs_ioc_move_range(struct file *filp,
2852                                 struct f2fs_move_range *range)
2853 {
2854         struct fd dst;
2855         int err;
2856
2857         if (!(filp->f_mode & FMODE_READ) ||
2858                         !(filp->f_mode & FMODE_WRITE))
2859                 return -EBADF;
2860
2861         dst = fdget(range->dst_fd);
2862         if (!dst.file)
2863                 return -EBADF;
2864
2865         if (!(dst.file->f_mode & FMODE_WRITE)) {
2866                 err = -EBADF;
2867                 goto err_out;
2868         }
2869
2870         err = mnt_want_write_file(filp);
2871         if (err)
2872                 goto err_out;
2873
2874         err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2875                                         range->pos_out, range->len);
2876
2877         mnt_drop_write_file(filp);
2878 err_out:
2879         fdput(dst);
2880         return err;
2881 }
2882
2883 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2884 {
2885         struct f2fs_move_range range;
2886
2887         if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2888                                                         sizeof(range)))
2889                 return -EFAULT;
2890         return __f2fs_ioc_move_range(filp, &range);
2891 }
2892
2893 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2894 {
2895         struct inode *inode = file_inode(filp);
2896         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2897         struct sit_info *sm = SIT_I(sbi);
2898         unsigned int start_segno = 0, end_segno = 0;
2899         unsigned int dev_start_segno = 0, dev_end_segno = 0;
2900         struct f2fs_flush_device range;
2901         int ret;
2902
2903         if (!capable(CAP_SYS_ADMIN))
2904                 return -EPERM;
2905
2906         if (f2fs_readonly(sbi->sb))
2907                 return -EROFS;
2908
2909         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2910                 return -EINVAL;
2911
2912         if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2913                                                         sizeof(range)))
2914                 return -EFAULT;
2915
2916         if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2917                         __is_large_section(sbi)) {
2918                 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2919                           range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2920                 return -EINVAL;
2921         }
2922
2923         ret = mnt_want_write_file(filp);
2924         if (ret)
2925                 return ret;
2926
2927         if (range.dev_num != 0)
2928                 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2929         dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2930
2931         start_segno = sm->last_victim[FLUSH_DEVICE];
2932         if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2933                 start_segno = dev_start_segno;
2934         end_segno = min(start_segno + range.segments, dev_end_segno);
2935
2936         while (start_segno < end_segno) {
2937                 if (!down_write_trylock(&sbi->gc_lock)) {
2938                         ret = -EBUSY;
2939                         goto out;
2940                 }
2941                 sm->last_victim[GC_CB] = end_segno + 1;
2942                 sm->last_victim[GC_GREEDY] = end_segno + 1;
2943                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2944                 ret = f2fs_gc(sbi, true, true, true, start_segno);
2945                 if (ret == -EAGAIN)
2946                         ret = 0;
2947                 else if (ret < 0)
2948                         break;
2949                 start_segno++;
2950         }
2951 out:
2952         mnt_drop_write_file(filp);
2953         return ret;
2954 }
2955
2956 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2957 {
2958         struct inode *inode = file_inode(filp);
2959         u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2960
2961         /* Must validate to set it with SQLite behavior in Android. */
2962         sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2963
2964         return put_user(sb_feature, (u32 __user *)arg);
2965 }
2966
2967 #ifdef CONFIG_QUOTA
2968 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2969 {
2970         struct dquot *transfer_to[MAXQUOTAS] = {};
2971         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2972         struct super_block *sb = sbi->sb;
2973         int err = 0;
2974
2975         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2976         if (!IS_ERR(transfer_to[PRJQUOTA])) {
2977                 err = __dquot_transfer(inode, transfer_to);
2978                 if (err)
2979                         set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2980                 dqput(transfer_to[PRJQUOTA]);
2981         }
2982         return err;
2983 }
2984
2985 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2986 {
2987         struct f2fs_inode_info *fi = F2FS_I(inode);
2988         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2989         struct page *ipage;
2990         kprojid_t kprojid;
2991         int err;
2992
2993         if (!f2fs_sb_has_project_quota(sbi)) {
2994                 if (projid != F2FS_DEF_PROJID)
2995                         return -EOPNOTSUPP;
2996                 else
2997                         return 0;
2998         }
2999
3000         if (!f2fs_has_extra_attr(inode))
3001                 return -EOPNOTSUPP;
3002
3003         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3004
3005         if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3006                 return 0;
3007
3008         err = -EPERM;
3009         /* Is it quota file? Do not allow user to mess with it */
3010         if (IS_NOQUOTA(inode))
3011                 return err;
3012
3013         ipage = f2fs_get_node_page(sbi, inode->i_ino);
3014         if (IS_ERR(ipage))
3015                 return PTR_ERR(ipage);
3016
3017         if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3018                                                                 i_projid)) {
3019                 err = -EOVERFLOW;
3020                 f2fs_put_page(ipage, 1);
3021                 return err;
3022         }
3023         f2fs_put_page(ipage, 1);
3024
3025         err = dquot_initialize(inode);
3026         if (err)
3027                 return err;
3028
3029         f2fs_lock_op(sbi);
3030         err = f2fs_transfer_project_quota(inode, kprojid);
3031         if (err)
3032                 goto out_unlock;
3033
3034         F2FS_I(inode)->i_projid = kprojid;
3035         inode->i_ctime = current_time(inode);
3036         f2fs_mark_inode_dirty_sync(inode, true);
3037 out_unlock:
3038         f2fs_unlock_op(sbi);
3039         return err;
3040 }
3041 #else
3042 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3043 {
3044         return 0;
3045 }
3046
3047 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3048 {
3049         if (projid != F2FS_DEF_PROJID)
3050                 return -EOPNOTSUPP;
3051         return 0;
3052 }
3053 #endif
3054
3055 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3056 {
3057         struct inode *inode = d_inode(dentry);
3058         struct f2fs_inode_info *fi = F2FS_I(inode);
3059         u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3060
3061         if (IS_ENCRYPTED(inode))
3062                 fsflags |= FS_ENCRYPT_FL;
3063         if (IS_VERITY(inode))
3064                 fsflags |= FS_VERITY_FL;
3065         if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3066                 fsflags |= FS_INLINE_DATA_FL;
3067         if (is_inode_flag_set(inode, FI_PIN_FILE))
3068                 fsflags |= FS_NOCOW_FL;
3069
3070         fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3071
3072         if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3073                 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3074
3075         return 0;
3076 }
3077
3078 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3079                       struct dentry *dentry, struct fileattr *fa)
3080 {
3081         struct inode *inode = d_inode(dentry);
3082         u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3083         u32 iflags;
3084         int err;
3085
3086         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3087                 return -EIO;
3088         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3089                 return -ENOSPC;
3090         if (fsflags & ~F2FS_GETTABLE_FS_FL)
3091                 return -EOPNOTSUPP;
3092         fsflags &= F2FS_SETTABLE_FS_FL;
3093         if (!fa->flags_valid)
3094                 mask &= FS_COMMON_FL;
3095
3096         iflags = f2fs_fsflags_to_iflags(fsflags);
3097         if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3098                 return -EOPNOTSUPP;
3099
3100         err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3101         if (!err)
3102                 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3103
3104         return err;
3105 }
3106
3107 int f2fs_pin_file_control(struct inode *inode, bool inc)
3108 {
3109         struct f2fs_inode_info *fi = F2FS_I(inode);
3110         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3111
3112         /* Use i_gc_failures for normal file as a risk signal. */
3113         if (inc)
3114                 f2fs_i_gc_failures_write(inode,
3115                                 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3116
3117         if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3118                 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3119                           __func__, inode->i_ino,
3120                           fi->i_gc_failures[GC_FAILURE_PIN]);
3121                 clear_inode_flag(inode, FI_PIN_FILE);
3122                 return -EAGAIN;
3123         }
3124         return 0;
3125 }
3126
3127 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3128 {
3129         struct inode *inode = file_inode(filp);
3130         __u32 pin;
3131         int ret = 0;
3132
3133         if (get_user(pin, (__u32 __user *)arg))
3134                 return -EFAULT;
3135
3136         if (!S_ISREG(inode->i_mode))
3137                 return -EINVAL;
3138
3139         if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3140                 return -EROFS;
3141
3142         ret = mnt_want_write_file(filp);
3143         if (ret)
3144                 return ret;
3145
3146         inode_lock(inode);
3147
3148         if (f2fs_should_update_outplace(inode, NULL)) {
3149                 ret = -EINVAL;
3150                 goto out;
3151         }
3152
3153         if (!pin) {
3154                 clear_inode_flag(inode, FI_PIN_FILE);
3155                 f2fs_i_gc_failures_write(inode, 0);
3156                 goto done;
3157         }
3158
3159         if (f2fs_pin_file_control(inode, false)) {
3160                 ret = -EAGAIN;
3161                 goto out;
3162         }
3163
3164         ret = f2fs_convert_inline_inode(inode);
3165         if (ret)
3166                 goto out;
3167
3168         if (!f2fs_disable_compressed_file(inode)) {
3169                 ret = -EOPNOTSUPP;
3170                 goto out;
3171         }
3172
3173         set_inode_flag(inode, FI_PIN_FILE);
3174         ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3175 done:
3176         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3177 out:
3178         inode_unlock(inode);
3179         mnt_drop_write_file(filp);
3180         return ret;
3181 }
3182
3183 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3184 {
3185         struct inode *inode = file_inode(filp);
3186         __u32 pin = 0;
3187
3188         if (is_inode_flag_set(inode, FI_PIN_FILE))
3189                 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3190         return put_user(pin, (u32 __user *)arg);
3191 }
3192
3193 int f2fs_precache_extents(struct inode *inode)
3194 {
3195         struct f2fs_inode_info *fi = F2FS_I(inode);
3196         struct f2fs_map_blocks map;
3197         pgoff_t m_next_extent;
3198         loff_t end;
3199         int err;
3200
3201         if (is_inode_flag_set(inode, FI_NO_EXTENT))
3202                 return -EOPNOTSUPP;
3203
3204         map.m_lblk = 0;
3205         map.m_next_pgofs = NULL;
3206         map.m_next_extent = &m_next_extent;
3207         map.m_seg_type = NO_CHECK_TYPE;
3208         map.m_may_create = false;
3209         end = max_file_blocks(inode);
3210
3211         while (map.m_lblk < end) {
3212                 map.m_len = end - map.m_lblk;
3213
3214                 down_write(&fi->i_gc_rwsem[WRITE]);
3215                 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3216                 up_write(&fi->i_gc_rwsem[WRITE]);
3217                 if (err)
3218                         return err;
3219
3220                 map.m_lblk = m_next_extent;
3221         }
3222
3223         return 0;
3224 }
3225
3226 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3227 {
3228         return f2fs_precache_extents(file_inode(filp));
3229 }
3230
3231 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3232 {
3233         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3234         __u64 block_count;
3235
3236         if (!capable(CAP_SYS_ADMIN))
3237                 return -EPERM;
3238
3239         if (f2fs_readonly(sbi->sb))
3240                 return -EROFS;
3241
3242         if (copy_from_user(&block_count, (void __user *)arg,
3243                            sizeof(block_count)))
3244                 return -EFAULT;
3245
3246         return f2fs_resize_fs(sbi, block_count);
3247 }
3248
3249 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3250 {
3251         struct inode *inode = file_inode(filp);
3252
3253         f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3254
3255         if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3256                 f2fs_warn(F2FS_I_SB(inode),
3257                           "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3258                           inode->i_ino);
3259                 return -EOPNOTSUPP;
3260         }
3261
3262         return fsverity_ioctl_enable(filp, (const void __user *)arg);
3263 }
3264
3265 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3266 {
3267         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3268                 return -EOPNOTSUPP;
3269
3270         return fsverity_ioctl_measure(filp, (void __user *)arg);
3271 }
3272
3273 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3274 {
3275         if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3276                 return -EOPNOTSUPP;
3277
3278         return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3279 }
3280
3281 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3282 {
3283         struct inode *inode = file_inode(filp);
3284         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3285         char *vbuf;
3286         int count;
3287         int err = 0;
3288
3289         vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3290         if (!vbuf)
3291                 return -ENOMEM;
3292
3293         down_read(&sbi->sb_lock);
3294         count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3295                         ARRAY_SIZE(sbi->raw_super->volume_name),
3296                         UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3297         up_read(&sbi->sb_lock);
3298
3299         if (copy_to_user((char __user *)arg, vbuf,
3300                                 min(FSLABEL_MAX, count)))
3301                 err = -EFAULT;
3302
3303         kfree(vbuf);
3304         return err;
3305 }
3306
3307 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3308 {
3309         struct inode *inode = file_inode(filp);
3310         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3311         char *vbuf;
3312         int err = 0;
3313
3314         if (!capable(CAP_SYS_ADMIN))
3315                 return -EPERM;
3316
3317         vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3318         if (IS_ERR(vbuf))
3319                 return PTR_ERR(vbuf);
3320
3321         err = mnt_want_write_file(filp);
3322         if (err)
3323                 goto out;
3324
3325         down_write(&sbi->sb_lock);
3326
3327         memset(sbi->raw_super->volume_name, 0,
3328                         sizeof(sbi->raw_super->volume_name));
3329         utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3330                         sbi->raw_super->volume_name,
3331                         ARRAY_SIZE(sbi->raw_super->volume_name));
3332
3333         err = f2fs_commit_super(sbi, false);
3334
3335         up_write(&sbi->sb_lock);
3336
3337         mnt_drop_write_file(filp);
3338 out:
3339         kfree(vbuf);
3340         return err;
3341 }
3342
3343 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3344 {
3345         struct inode *inode = file_inode(filp);
3346         __u64 blocks;
3347
3348         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3349                 return -EOPNOTSUPP;
3350
3351         if (!f2fs_compressed_file(inode))
3352                 return -EINVAL;
3353
3354         blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3355         return put_user(blocks, (u64 __user *)arg);
3356 }
3357
3358 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3359 {
3360         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3361         unsigned int released_blocks = 0;
3362         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3363         block_t blkaddr;
3364         int i;
3365
3366         for (i = 0; i < count; i++) {
3367                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3368                                                 dn->ofs_in_node + i);
3369
3370                 if (!__is_valid_data_blkaddr(blkaddr))
3371                         continue;
3372                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3373                                         DATA_GENERIC_ENHANCE)))
3374                         return -EFSCORRUPTED;
3375         }
3376
3377         while (count) {
3378                 int compr_blocks = 0;
3379
3380                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3381                         blkaddr = f2fs_data_blkaddr(dn);
3382
3383                         if (i == 0) {
3384                                 if (blkaddr == COMPRESS_ADDR)
3385                                         continue;
3386                                 dn->ofs_in_node += cluster_size;
3387                                 goto next;
3388                         }
3389
3390                         if (__is_valid_data_blkaddr(blkaddr))
3391                                 compr_blocks++;
3392
3393                         if (blkaddr != NEW_ADDR)
3394                                 continue;
3395
3396                         dn->data_blkaddr = NULL_ADDR;
3397                         f2fs_set_data_blkaddr(dn);
3398                 }
3399
3400                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3401                 dec_valid_block_count(sbi, dn->inode,
3402                                         cluster_size - compr_blocks);
3403
3404                 released_blocks += cluster_size - compr_blocks;
3405 next:
3406                 count -= cluster_size;
3407         }
3408
3409         return released_blocks;
3410 }
3411
3412 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3413 {
3414         struct inode *inode = file_inode(filp);
3415         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3416         pgoff_t page_idx = 0, last_idx;
3417         unsigned int released_blocks = 0;
3418         int ret;
3419         int writecount;
3420
3421         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3422                 return -EOPNOTSUPP;
3423
3424         if (!f2fs_compressed_file(inode))
3425                 return -EINVAL;
3426
3427         if (f2fs_readonly(sbi->sb))
3428                 return -EROFS;
3429
3430         ret = mnt_want_write_file(filp);
3431         if (ret)
3432                 return ret;
3433
3434         f2fs_balance_fs(F2FS_I_SB(inode), true);
3435
3436         inode_lock(inode);
3437
3438         writecount = atomic_read(&inode->i_writecount);
3439         if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3440                         (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3441                 ret = -EBUSY;
3442                 goto out;
3443         }
3444
3445         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3446                 ret = -EINVAL;
3447                 goto out;
3448         }
3449
3450         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3451         if (ret)
3452                 goto out;
3453
3454         set_inode_flag(inode, FI_COMPRESS_RELEASED);
3455         inode->i_ctime = current_time(inode);
3456         f2fs_mark_inode_dirty_sync(inode, true);
3457
3458         if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3459                 goto out;
3460
3461         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3462         down_write(&F2FS_I(inode)->i_mmap_sem);
3463
3464         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3465
3466         while (page_idx < last_idx) {
3467                 struct dnode_of_data dn;
3468                 pgoff_t end_offset, count;
3469
3470                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3471                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3472                 if (ret) {
3473                         if (ret == -ENOENT) {
3474                                 page_idx = f2fs_get_next_page_offset(&dn,
3475                                                                 page_idx);
3476                                 ret = 0;
3477                                 continue;
3478                         }
3479                         break;
3480                 }
3481
3482                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3483                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3484                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3485
3486                 ret = release_compress_blocks(&dn, count);
3487
3488                 f2fs_put_dnode(&dn);
3489
3490                 if (ret < 0)
3491                         break;
3492
3493                 page_idx += count;
3494                 released_blocks += ret;
3495         }
3496
3497         up_write(&F2FS_I(inode)->i_mmap_sem);
3498         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3499 out:
3500         inode_unlock(inode);
3501
3502         mnt_drop_write_file(filp);
3503
3504         if (ret >= 0) {
3505                 ret = put_user(released_blocks, (u64 __user *)arg);
3506         } else if (released_blocks &&
3507                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3508                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3509                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3510                         "iblocks=%llu, released=%u, compr_blocks=%u, "
3511                         "run fsck to fix.",
3512                         __func__, inode->i_ino, inode->i_blocks,
3513                         released_blocks,
3514                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3515         }
3516
3517         return ret;
3518 }
3519
3520 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3521 {
3522         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3523         unsigned int reserved_blocks = 0;
3524         int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3525         block_t blkaddr;
3526         int i;
3527
3528         for (i = 0; i < count; i++) {
3529                 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3530                                                 dn->ofs_in_node + i);
3531
3532                 if (!__is_valid_data_blkaddr(blkaddr))
3533                         continue;
3534                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3535                                         DATA_GENERIC_ENHANCE)))
3536                         return -EFSCORRUPTED;
3537         }
3538
3539         while (count) {
3540                 int compr_blocks = 0;
3541                 blkcnt_t reserved;
3542                 int ret;
3543
3544                 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3545                         blkaddr = f2fs_data_blkaddr(dn);
3546
3547                         if (i == 0) {
3548                                 if (blkaddr == COMPRESS_ADDR)
3549                                         continue;
3550                                 dn->ofs_in_node += cluster_size;
3551                                 goto next;
3552                         }
3553
3554                         if (__is_valid_data_blkaddr(blkaddr)) {
3555                                 compr_blocks++;
3556                                 continue;
3557                         }
3558
3559                         dn->data_blkaddr = NEW_ADDR;
3560                         f2fs_set_data_blkaddr(dn);
3561                 }
3562
3563                 reserved = cluster_size - compr_blocks;
3564                 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3565                 if (ret)
3566                         return ret;
3567
3568                 if (reserved != cluster_size - compr_blocks)
3569                         return -ENOSPC;
3570
3571                 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3572
3573                 reserved_blocks += reserved;
3574 next:
3575                 count -= cluster_size;
3576         }
3577
3578         return reserved_blocks;
3579 }
3580
3581 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3582 {
3583         struct inode *inode = file_inode(filp);
3584         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3585         pgoff_t page_idx = 0, last_idx;
3586         unsigned int reserved_blocks = 0;
3587         int ret;
3588
3589         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3590                 return -EOPNOTSUPP;
3591
3592         if (!f2fs_compressed_file(inode))
3593                 return -EINVAL;
3594
3595         if (f2fs_readonly(sbi->sb))
3596                 return -EROFS;
3597
3598         ret = mnt_want_write_file(filp);
3599         if (ret)
3600                 return ret;
3601
3602         if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3603                 goto out;
3604
3605         f2fs_balance_fs(F2FS_I_SB(inode), true);
3606
3607         inode_lock(inode);
3608
3609         if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3610                 ret = -EINVAL;
3611                 goto unlock_inode;
3612         }
3613
3614         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3615         down_write(&F2FS_I(inode)->i_mmap_sem);
3616
3617         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3618
3619         while (page_idx < last_idx) {
3620                 struct dnode_of_data dn;
3621                 pgoff_t end_offset, count;
3622
3623                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3624                 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3625                 if (ret) {
3626                         if (ret == -ENOENT) {
3627                                 page_idx = f2fs_get_next_page_offset(&dn,
3628                                                                 page_idx);
3629                                 ret = 0;
3630                                 continue;
3631                         }
3632                         break;
3633                 }
3634
3635                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3636                 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3637                 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3638
3639                 ret = reserve_compress_blocks(&dn, count);
3640
3641                 f2fs_put_dnode(&dn);
3642
3643                 if (ret < 0)
3644                         break;
3645
3646                 page_idx += count;
3647                 reserved_blocks += ret;
3648         }
3649
3650         up_write(&F2FS_I(inode)->i_mmap_sem);
3651         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3652
3653         if (ret >= 0) {
3654                 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3655                 inode->i_ctime = current_time(inode);
3656                 f2fs_mark_inode_dirty_sync(inode, true);
3657         }
3658 unlock_inode:
3659         inode_unlock(inode);
3660 out:
3661         mnt_drop_write_file(filp);
3662
3663         if (ret >= 0) {
3664                 ret = put_user(reserved_blocks, (u64 __user *)arg);
3665         } else if (reserved_blocks &&
3666                         atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3667                 set_sbi_flag(sbi, SBI_NEED_FSCK);
3668                 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3669                         "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3670                         "run fsck to fix.",
3671                         __func__, inode->i_ino, inode->i_blocks,
3672                         reserved_blocks,
3673                         atomic_read(&F2FS_I(inode)->i_compr_blocks));
3674         }
3675
3676         return ret;
3677 }
3678
3679 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3680                 pgoff_t off, block_t block, block_t len, u32 flags)
3681 {
3682         struct request_queue *q = bdev_get_queue(bdev);
3683         sector_t sector = SECTOR_FROM_BLOCK(block);
3684         sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3685         int ret = 0;
3686
3687         if (!q)
3688                 return -ENXIO;
3689
3690         if (flags & F2FS_TRIM_FILE_DISCARD)
3691                 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3692                                                 blk_queue_secure_erase(q) ?
3693                                                 BLKDEV_DISCARD_SECURE : 0);
3694
3695         if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3696                 if (IS_ENCRYPTED(inode))
3697                         ret = fscrypt_zeroout_range(inode, off, block, len);
3698                 else
3699                         ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3700                                         GFP_NOFS, 0);
3701         }
3702
3703         return ret;
3704 }
3705
3706 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3707 {
3708         struct inode *inode = file_inode(filp);
3709         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3710         struct address_space *mapping = inode->i_mapping;
3711         struct block_device *prev_bdev = NULL;
3712         struct f2fs_sectrim_range range;
3713         pgoff_t index, pg_end, prev_index = 0;
3714         block_t prev_block = 0, len = 0;
3715         loff_t end_addr;
3716         bool to_end = false;
3717         int ret = 0;
3718
3719         if (!(filp->f_mode & FMODE_WRITE))
3720                 return -EBADF;
3721
3722         if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3723                                 sizeof(range)))
3724                 return -EFAULT;
3725
3726         if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3727                         !S_ISREG(inode->i_mode))
3728                 return -EINVAL;
3729
3730         if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3731                         !f2fs_hw_support_discard(sbi)) ||
3732                         ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3733                          IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3734                 return -EOPNOTSUPP;
3735
3736         file_start_write(filp);
3737         inode_lock(inode);
3738
3739         if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3740                         range.start >= inode->i_size) {
3741                 ret = -EINVAL;
3742                 goto err;
3743         }
3744
3745         if (range.len == 0)
3746                 goto err;
3747
3748         if (inode->i_size - range.start > range.len) {
3749                 end_addr = range.start + range.len;
3750         } else {
3751                 end_addr = range.len == (u64)-1 ?
3752                         sbi->sb->s_maxbytes : inode->i_size;
3753                 to_end = true;
3754         }
3755
3756         if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3757                         (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3758                 ret = -EINVAL;
3759                 goto err;
3760         }
3761
3762         index = F2FS_BYTES_TO_BLK(range.start);
3763         pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3764
3765         ret = f2fs_convert_inline_inode(inode);
3766         if (ret)
3767                 goto err;
3768
3769         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3770         down_write(&F2FS_I(inode)->i_mmap_sem);
3771
3772         ret = filemap_write_and_wait_range(mapping, range.start,
3773                         to_end ? LLONG_MAX : end_addr - 1);
3774         if (ret)
3775                 goto out;
3776
3777         truncate_inode_pages_range(mapping, range.start,
3778                         to_end ? -1 : end_addr - 1);
3779
3780         while (index < pg_end) {
3781                 struct dnode_of_data dn;
3782                 pgoff_t end_offset, count;
3783                 int i;
3784
3785                 set_new_dnode(&dn, inode, NULL, NULL, 0);
3786                 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3787                 if (ret) {
3788                         if (ret == -ENOENT) {
3789                                 index = f2fs_get_next_page_offset(&dn, index);
3790                                 continue;
3791                         }
3792                         goto out;
3793                 }
3794
3795                 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3796                 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3797                 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3798                         struct block_device *cur_bdev;
3799                         block_t blkaddr = f2fs_data_blkaddr(&dn);
3800
3801                         if (!__is_valid_data_blkaddr(blkaddr))
3802                                 continue;
3803
3804                         if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3805                                                 DATA_GENERIC_ENHANCE)) {
3806                                 ret = -EFSCORRUPTED;
3807                                 f2fs_put_dnode(&dn);
3808                                 goto out;
3809                         }
3810
3811                         cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3812                         if (f2fs_is_multi_device(sbi)) {
3813                                 int di = f2fs_target_device_index(sbi, blkaddr);
3814
3815                                 blkaddr -= FDEV(di).start_blk;
3816                         }
3817
3818                         if (len) {
3819                                 if (prev_bdev == cur_bdev &&
3820                                                 index == prev_index + len &&
3821                                                 blkaddr == prev_block + len) {
3822                                         len++;
3823                                 } else {
3824                                         ret = f2fs_secure_erase(prev_bdev,
3825                                                 inode, prev_index, prev_block,
3826                                                 len, range.flags);
3827                                         if (ret) {
3828                                                 f2fs_put_dnode(&dn);
3829                                                 goto out;
3830                                         }
3831
3832                                         len = 0;
3833                                 }
3834                         }
3835
3836                         if (!len) {
3837                                 prev_bdev = cur_bdev;
3838                                 prev_index = index;
3839                                 prev_block = blkaddr;
3840                                 len = 1;
3841                         }
3842                 }
3843
3844                 f2fs_put_dnode(&dn);
3845
3846                 if (fatal_signal_pending(current)) {
3847                         ret = -EINTR;
3848                         goto out;
3849                 }
3850                 cond_resched();
3851         }
3852
3853         if (len)
3854                 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3855                                 prev_block, len, range.flags);
3856 out:
3857         up_write(&F2FS_I(inode)->i_mmap_sem);
3858         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3859 err:
3860         inode_unlock(inode);
3861         file_end_write(filp);
3862
3863         return ret;
3864 }
3865
3866 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3867 {
3868         struct inode *inode = file_inode(filp);
3869         struct f2fs_comp_option option;
3870
3871         if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3872                 return -EOPNOTSUPP;
3873
3874         inode_lock_shared(inode);
3875
3876         if (!f2fs_compressed_file(inode)) {
3877                 inode_unlock_shared(inode);
3878                 return -ENODATA;
3879         }
3880
3881         option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3882         option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3883
3884         inode_unlock_shared(inode);
3885
3886         if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3887                                 sizeof(option)))
3888                 return -EFAULT;
3889
3890         return 0;
3891 }
3892
3893 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3894 {
3895         struct inode *inode = file_inode(filp);
3896         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3897         struct f2fs_comp_option option;
3898         int ret = 0;
3899
3900         if (!f2fs_sb_has_compression(sbi))
3901                 return -EOPNOTSUPP;
3902
3903         if (!(filp->f_mode & FMODE_WRITE))
3904                 return -EBADF;
3905
3906         if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3907                                 sizeof(option)))
3908                 return -EFAULT;
3909
3910         if (!f2fs_compressed_file(inode) ||
3911                         option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3912                         option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3913                         option.algorithm >= COMPRESS_MAX)
3914                 return -EINVAL;
3915
3916         file_start_write(filp);
3917         inode_lock(inode);
3918
3919         if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3920                 ret = -EBUSY;
3921                 goto out;
3922         }
3923
3924         if (inode->i_size != 0) {
3925                 ret = -EFBIG;
3926                 goto out;
3927         }
3928
3929         F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3930         F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3931         F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3932         f2fs_mark_inode_dirty_sync(inode, true);
3933
3934         if (!f2fs_is_compress_backend_ready(inode))
3935                 f2fs_warn(sbi, "compression algorithm is successfully set, "
3936                         "but current kernel doesn't support this algorithm.");
3937 out:
3938         inode_unlock(inode);
3939         file_end_write(filp);
3940
3941         return ret;
3942 }
3943
3944 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3945 {
3946         DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3947         struct address_space *mapping = inode->i_mapping;
3948         struct page *page;
3949         pgoff_t redirty_idx = page_idx;
3950         int i, page_len = 0, ret = 0;
3951
3952         page_cache_ra_unbounded(&ractl, len, 0);
3953
3954         for (i = 0; i < len; i++, page_idx++) {
3955                 page = read_cache_page(mapping, page_idx, NULL, NULL);
3956                 if (IS_ERR(page)) {
3957                         ret = PTR_ERR(page);
3958                         break;
3959                 }
3960                 page_len++;
3961         }
3962
3963         for (i = 0; i < page_len; i++, redirty_idx++) {
3964                 page = find_lock_page(mapping, redirty_idx);
3965                 if (!page) {
3966                         ret = -ENOMEM;
3967                         break;
3968                 }
3969                 set_page_dirty(page);
3970                 f2fs_put_page(page, 1);
3971                 f2fs_put_page(page, 0);
3972         }
3973
3974         return ret;
3975 }
3976
3977 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3978 {
3979         struct inode *inode = file_inode(filp);
3980         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3981         struct f2fs_inode_info *fi = F2FS_I(inode);
3982         pgoff_t page_idx = 0, last_idx;
3983         unsigned int blk_per_seg = sbi->blocks_per_seg;
3984         int cluster_size = F2FS_I(inode)->i_cluster_size;
3985         int count, ret;
3986
3987         if (!f2fs_sb_has_compression(sbi) ||
3988                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3989                 return -EOPNOTSUPP;
3990
3991         if (!(filp->f_mode & FMODE_WRITE))
3992                 return -EBADF;
3993
3994         if (!f2fs_compressed_file(inode))
3995                 return -EINVAL;
3996
3997         f2fs_balance_fs(F2FS_I_SB(inode), true);
3998
3999         file_start_write(filp);
4000         inode_lock(inode);
4001
4002         if (!f2fs_is_compress_backend_ready(inode)) {
4003                 ret = -EOPNOTSUPP;
4004                 goto out;
4005         }
4006
4007         if (f2fs_is_mmap_file(inode)) {
4008                 ret = -EBUSY;
4009                 goto out;
4010         }
4011
4012         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4013         if (ret)
4014                 goto out;
4015
4016         if (!atomic_read(&fi->i_compr_blocks))
4017                 goto out;
4018
4019         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4020
4021         count = last_idx - page_idx;
4022         while (count) {
4023                 int len = min(cluster_size, count);
4024
4025                 ret = redirty_blocks(inode, page_idx, len);
4026                 if (ret < 0)
4027                         break;
4028
4029                 if (get_dirty_pages(inode) >= blk_per_seg)
4030                         filemap_fdatawrite(inode->i_mapping);
4031
4032                 count -= len;
4033                 page_idx += len;
4034         }
4035
4036         if (!ret)
4037                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4038                                                         LLONG_MAX);
4039
4040         if (ret)
4041                 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4042                           __func__, ret);
4043 out:
4044         inode_unlock(inode);
4045         file_end_write(filp);
4046
4047         return ret;
4048 }
4049
4050 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4051 {
4052         struct inode *inode = file_inode(filp);
4053         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4054         pgoff_t page_idx = 0, last_idx;
4055         unsigned int blk_per_seg = sbi->blocks_per_seg;
4056         int cluster_size = F2FS_I(inode)->i_cluster_size;
4057         int count, ret;
4058
4059         if (!f2fs_sb_has_compression(sbi) ||
4060                         F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4061                 return -EOPNOTSUPP;
4062
4063         if (!(filp->f_mode & FMODE_WRITE))
4064                 return -EBADF;
4065
4066         if (!f2fs_compressed_file(inode))
4067                 return -EINVAL;
4068
4069         f2fs_balance_fs(F2FS_I_SB(inode), true);
4070
4071         file_start_write(filp);
4072         inode_lock(inode);
4073
4074         if (!f2fs_is_compress_backend_ready(inode)) {
4075                 ret = -EOPNOTSUPP;
4076                 goto out;
4077         }
4078
4079         if (f2fs_is_mmap_file(inode)) {
4080                 ret = -EBUSY;
4081                 goto out;
4082         }
4083
4084         ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4085         if (ret)
4086                 goto out;
4087
4088         set_inode_flag(inode, FI_ENABLE_COMPRESS);
4089
4090         last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4091
4092         count = last_idx - page_idx;
4093         while (count) {
4094                 int len = min(cluster_size, count);
4095
4096                 ret = redirty_blocks(inode, page_idx, len);
4097                 if (ret < 0)
4098                         break;
4099
4100                 if (get_dirty_pages(inode) >= blk_per_seg)
4101                         filemap_fdatawrite(inode->i_mapping);
4102
4103                 count -= len;
4104                 page_idx += len;
4105         }
4106
4107         if (!ret)
4108                 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4109                                                         LLONG_MAX);
4110
4111         clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4112
4113         if (ret)
4114                 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4115                           __func__, ret);
4116 out:
4117         inode_unlock(inode);
4118         file_end_write(filp);
4119
4120         return ret;
4121 }
4122
4123 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4124 {
4125         switch (cmd) {
4126         case FS_IOC_GETVERSION:
4127                 return f2fs_ioc_getversion(filp, arg);
4128         case F2FS_IOC_START_ATOMIC_WRITE:
4129                 return f2fs_ioc_start_atomic_write(filp);
4130         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4131                 return f2fs_ioc_commit_atomic_write(filp);
4132         case F2FS_IOC_START_VOLATILE_WRITE:
4133                 return f2fs_ioc_start_volatile_write(filp);
4134         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4135                 return f2fs_ioc_release_volatile_write(filp);
4136         case F2FS_IOC_ABORT_VOLATILE_WRITE:
4137                 return f2fs_ioc_abort_volatile_write(filp);
4138         case F2FS_IOC_SHUTDOWN:
4139                 return f2fs_ioc_shutdown(filp, arg);
4140         case FITRIM:
4141                 return f2fs_ioc_fitrim(filp, arg);
4142         case FS_IOC_SET_ENCRYPTION_POLICY:
4143                 return f2fs_ioc_set_encryption_policy(filp, arg);
4144         case FS_IOC_GET_ENCRYPTION_POLICY:
4145                 return f2fs_ioc_get_encryption_policy(filp, arg);
4146         case FS_IOC_GET_ENCRYPTION_PWSALT:
4147                 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4148         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4149                 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4150         case FS_IOC_ADD_ENCRYPTION_KEY:
4151                 return f2fs_ioc_add_encryption_key(filp, arg);
4152         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4153                 return f2fs_ioc_remove_encryption_key(filp, arg);
4154         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4155                 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4156         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4157                 return f2fs_ioc_get_encryption_key_status(filp, arg);
4158         case FS_IOC_GET_ENCRYPTION_NONCE:
4159                 return f2fs_ioc_get_encryption_nonce(filp, arg);
4160         case F2FS_IOC_GARBAGE_COLLECT:
4161                 return f2fs_ioc_gc(filp, arg);
4162         case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4163                 return f2fs_ioc_gc_range(filp, arg);
4164         case F2FS_IOC_WRITE_CHECKPOINT:
4165                 return f2fs_ioc_write_checkpoint(filp, arg);
4166         case F2FS_IOC_DEFRAGMENT:
4167                 return f2fs_ioc_defragment(filp, arg);
4168         case F2FS_IOC_MOVE_RANGE:
4169                 return f2fs_ioc_move_range(filp, arg);
4170         case F2FS_IOC_FLUSH_DEVICE:
4171                 return f2fs_ioc_flush_device(filp, arg);
4172         case F2FS_IOC_GET_FEATURES:
4173                 return f2fs_ioc_get_features(filp, arg);
4174         case F2FS_IOC_GET_PIN_FILE:
4175                 return f2fs_ioc_get_pin_file(filp, arg);
4176         case F2FS_IOC_SET_PIN_FILE:
4177                 return f2fs_ioc_set_pin_file(filp, arg);
4178         case F2FS_IOC_PRECACHE_EXTENTS:
4179                 return f2fs_ioc_precache_extents(filp, arg);
4180         case F2FS_IOC_RESIZE_FS:
4181                 return f2fs_ioc_resize_fs(filp, arg);
4182         case FS_IOC_ENABLE_VERITY:
4183                 return f2fs_ioc_enable_verity(filp, arg);
4184         case FS_IOC_MEASURE_VERITY:
4185                 return f2fs_ioc_measure_verity(filp, arg);
4186         case FS_IOC_READ_VERITY_METADATA:
4187                 return f2fs_ioc_read_verity_metadata(filp, arg);
4188         case FS_IOC_GETFSLABEL:
4189                 return f2fs_ioc_getfslabel(filp, arg);
4190         case FS_IOC_SETFSLABEL:
4191                 return f2fs_ioc_setfslabel(filp, arg);
4192         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4193                 return f2fs_get_compress_blocks(filp, arg);
4194         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4195                 return f2fs_release_compress_blocks(filp, arg);
4196         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4197                 return f2fs_reserve_compress_blocks(filp, arg);
4198         case F2FS_IOC_SEC_TRIM_FILE:
4199                 return f2fs_sec_trim_file(filp, arg);
4200         case F2FS_IOC_GET_COMPRESS_OPTION:
4201                 return f2fs_ioc_get_compress_option(filp, arg);
4202         case F2FS_IOC_SET_COMPRESS_OPTION:
4203                 return f2fs_ioc_set_compress_option(filp, arg);
4204         case F2FS_IOC_DECOMPRESS_FILE:
4205                 return f2fs_ioc_decompress_file(filp, arg);
4206         case F2FS_IOC_COMPRESS_FILE:
4207                 return f2fs_ioc_compress_file(filp, arg);
4208         default:
4209                 return -ENOTTY;
4210         }
4211 }
4212
4213 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4214 {
4215         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4216                 return -EIO;
4217         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4218                 return -ENOSPC;
4219
4220         return __f2fs_ioctl(filp, cmd, arg);
4221 }
4222
4223 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4224 {
4225         struct file *file = iocb->ki_filp;
4226         struct inode *inode = file_inode(file);
4227         int ret;
4228
4229         if (!f2fs_is_compress_backend_ready(inode))
4230                 return -EOPNOTSUPP;
4231
4232         ret = generic_file_read_iter(iocb, iter);
4233
4234         if (ret > 0)
4235                 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4236
4237         return ret;
4238 }
4239
4240 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4241 {
4242         struct file *file = iocb->ki_filp;
4243         struct inode *inode = file_inode(file);
4244         ssize_t ret;
4245
4246         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4247                 ret = -EIO;
4248                 goto out;
4249         }
4250
4251         if (!f2fs_is_compress_backend_ready(inode)) {
4252                 ret = -EOPNOTSUPP;
4253                 goto out;
4254         }
4255
4256         if (iocb->ki_flags & IOCB_NOWAIT) {
4257                 if (!inode_trylock(inode)) {
4258                         ret = -EAGAIN;
4259                         goto out;
4260                 }
4261         } else {
4262                 inode_lock(inode);
4263         }
4264
4265         if (unlikely(IS_IMMUTABLE(inode))) {
4266                 ret = -EPERM;
4267                 goto unlock;
4268         }
4269
4270         if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4271                 ret = -EPERM;
4272                 goto unlock;
4273         }
4274
4275         ret = generic_write_checks(iocb, from);
4276         if (ret > 0) {
4277                 bool preallocated = false;
4278                 size_t target_size = 0;
4279                 int err;
4280
4281                 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4282                         set_inode_flag(inode, FI_NO_PREALLOC);
4283
4284                 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4285                         if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4286                                                 iov_iter_count(from)) ||
4287                                 f2fs_has_inline_data(inode) ||
4288                                 f2fs_force_buffered_io(inode, iocb, from)) {
4289                                 clear_inode_flag(inode, FI_NO_PREALLOC);
4290                                 inode_unlock(inode);
4291                                 ret = -EAGAIN;
4292                                 goto out;
4293                         }
4294                         goto write;
4295                 }
4296
4297                 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4298                         goto write;
4299
4300                 if (iocb->ki_flags & IOCB_DIRECT) {
4301                         /*
4302                          * Convert inline data for Direct I/O before entering
4303                          * f2fs_direct_IO().
4304                          */
4305                         err = f2fs_convert_inline_inode(inode);
4306                         if (err)
4307                                 goto out_err;
4308                         /*
4309                          * If force_buffere_io() is true, we have to allocate
4310                          * blocks all the time, since f2fs_direct_IO will fall
4311                          * back to buffered IO.
4312                          */
4313                         if (!f2fs_force_buffered_io(inode, iocb, from) &&
4314                                         f2fs_lfs_mode(F2FS_I_SB(inode)))
4315                                 goto write;
4316                 }
4317                 preallocated = true;
4318                 target_size = iocb->ki_pos + iov_iter_count(from);
4319
4320                 err = f2fs_preallocate_blocks(iocb, from);
4321                 if (err) {
4322 out_err:
4323                         clear_inode_flag(inode, FI_NO_PREALLOC);
4324                         inode_unlock(inode);
4325                         ret = err;
4326                         goto out;
4327                 }
4328 write:
4329                 ret = __generic_file_write_iter(iocb, from);
4330                 clear_inode_flag(inode, FI_NO_PREALLOC);
4331
4332                 /* if we couldn't write data, we should deallocate blocks. */
4333                 if (preallocated && i_size_read(inode) < target_size) {
4334                         down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4335                         down_write(&F2FS_I(inode)->i_mmap_sem);
4336                         f2fs_truncate(inode);
4337                         up_write(&F2FS_I(inode)->i_mmap_sem);
4338                         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4339                 }
4340
4341                 if (ret > 0)
4342                         f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4343         }
4344 unlock:
4345         inode_unlock(inode);
4346 out:
4347         trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4348                                         iov_iter_count(from), ret);
4349         if (ret > 0)
4350                 ret = generic_write_sync(iocb, ret);
4351         return ret;
4352 }
4353
4354 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4355                 int advice)
4356 {
4357         struct inode *inode;
4358         struct address_space *mapping;
4359         struct backing_dev_info *bdi;
4360
4361         if (advice == POSIX_FADV_SEQUENTIAL) {
4362                 inode = file_inode(filp);
4363                 if (S_ISFIFO(inode->i_mode))
4364                         return -ESPIPE;
4365
4366                 mapping = filp->f_mapping;
4367                 if (!mapping || len < 0)
4368                         return -EINVAL;
4369
4370                 bdi = inode_to_bdi(mapping->host);
4371                 filp->f_ra.ra_pages = bdi->ra_pages *
4372                         F2FS_I_SB(inode)->seq_file_ra_mul;
4373                 spin_lock(&filp->f_lock);
4374                 filp->f_mode &= ~FMODE_RANDOM;
4375                 spin_unlock(&filp->f_lock);
4376                 return 0;
4377         }
4378
4379         return generic_fadvise(filp, offset, len, advice);
4380 }
4381
4382 #ifdef CONFIG_COMPAT
4383 struct compat_f2fs_gc_range {
4384         u32 sync;
4385         compat_u64 start;
4386         compat_u64 len;
4387 };
4388 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE        _IOW(F2FS_IOCTL_MAGIC, 11,\
4389                                                 struct compat_f2fs_gc_range)
4390
4391 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4392 {
4393         struct compat_f2fs_gc_range __user *urange;
4394         struct f2fs_gc_range range;
4395         int err;
4396
4397         urange = compat_ptr(arg);
4398         err = get_user(range.sync, &urange->sync);
4399         err |= get_user(range.start, &urange->start);
4400         err |= get_user(range.len, &urange->len);
4401         if (err)
4402                 return -EFAULT;
4403
4404         return __f2fs_ioc_gc_range(file, &range);
4405 }
4406
4407 struct compat_f2fs_move_range {
4408         u32 dst_fd;
4409         compat_u64 pos_in;
4410         compat_u64 pos_out;
4411         compat_u64 len;
4412 };
4413 #define F2FS_IOC32_MOVE_RANGE           _IOWR(F2FS_IOCTL_MAGIC, 9,      \
4414                                         struct compat_f2fs_move_range)
4415
4416 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4417 {
4418         struct compat_f2fs_move_range __user *urange;
4419         struct f2fs_move_range range;
4420         int err;
4421
4422         urange = compat_ptr(arg);
4423         err = get_user(range.dst_fd, &urange->dst_fd);
4424         err |= get_user(range.pos_in, &urange->pos_in);
4425         err |= get_user(range.pos_out, &urange->pos_out);
4426         err |= get_user(range.len, &urange->len);
4427         if (err)
4428                 return -EFAULT;
4429
4430         return __f2fs_ioc_move_range(file, &range);
4431 }
4432
4433 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4434 {
4435         if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4436                 return -EIO;
4437         if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4438                 return -ENOSPC;
4439
4440         switch (cmd) {
4441         case FS_IOC32_GETVERSION:
4442                 cmd = FS_IOC_GETVERSION;
4443                 break;
4444         case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4445                 return f2fs_compat_ioc_gc_range(file, arg);
4446         case F2FS_IOC32_MOVE_RANGE:
4447                 return f2fs_compat_ioc_move_range(file, arg);
4448         case F2FS_IOC_START_ATOMIC_WRITE:
4449         case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4450         case F2FS_IOC_START_VOLATILE_WRITE:
4451         case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4452         case F2FS_IOC_ABORT_VOLATILE_WRITE:
4453         case F2FS_IOC_SHUTDOWN:
4454         case FITRIM:
4455         case FS_IOC_SET_ENCRYPTION_POLICY:
4456         case FS_IOC_GET_ENCRYPTION_PWSALT:
4457         case FS_IOC_GET_ENCRYPTION_POLICY:
4458         case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4459         case FS_IOC_ADD_ENCRYPTION_KEY:
4460         case FS_IOC_REMOVE_ENCRYPTION_KEY:
4461         case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4462         case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4463         case FS_IOC_GET_ENCRYPTION_NONCE:
4464         case F2FS_IOC_GARBAGE_COLLECT:
4465         case F2FS_IOC_WRITE_CHECKPOINT:
4466         case F2FS_IOC_DEFRAGMENT:
4467         case F2FS_IOC_FLUSH_DEVICE:
4468         case F2FS_IOC_GET_FEATURES:
4469         case F2FS_IOC_GET_PIN_FILE:
4470         case F2FS_IOC_SET_PIN_FILE:
4471         case F2FS_IOC_PRECACHE_EXTENTS:
4472         case F2FS_IOC_RESIZE_FS:
4473         case FS_IOC_ENABLE_VERITY:
4474         case FS_IOC_MEASURE_VERITY:
4475         case FS_IOC_READ_VERITY_METADATA:
4476         case FS_IOC_GETFSLABEL:
4477         case FS_IOC_SETFSLABEL:
4478         case F2FS_IOC_GET_COMPRESS_BLOCKS:
4479         case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4480         case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4481         case F2FS_IOC_SEC_TRIM_FILE:
4482         case F2FS_IOC_GET_COMPRESS_OPTION:
4483         case F2FS_IOC_SET_COMPRESS_OPTION:
4484         case F2FS_IOC_DECOMPRESS_FILE:
4485         case F2FS_IOC_COMPRESS_FILE:
4486                 break;
4487         default:
4488                 return -ENOIOCTLCMD;
4489         }
4490         return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4491 }
4492 #endif
4493
4494 const struct file_operations f2fs_file_operations = {
4495         .llseek         = f2fs_llseek,
4496         .read_iter      = f2fs_file_read_iter,
4497         .write_iter     = f2fs_file_write_iter,
4498         .open           = f2fs_file_open,
4499         .release        = f2fs_release_file,
4500         .mmap           = f2fs_file_mmap,
4501         .flush          = f2fs_file_flush,
4502         .fsync          = f2fs_sync_file,
4503         .fallocate      = f2fs_fallocate,
4504         .unlocked_ioctl = f2fs_ioctl,
4505 #ifdef CONFIG_COMPAT
4506         .compat_ioctl   = f2fs_compat_ioctl,
4507 #endif
4508         .splice_read    = generic_file_splice_read,
4509         .splice_write   = iter_file_splice_write,
4510         .fadvise        = f2fs_file_fadvise,
4511 };