perf data: Add is_perf_data function
[linux-2.6-microblaze.git] / fs / ext4 / migrate.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  * Copyright IBM Corporation, 2007
4  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5  *
6  */
7
8 #include <linux/slab.h>
9 #include "ext4_jbd2.h"
10 #include "ext4_extents.h"
11
12 /*
13  * The contiguous blocks details which can be
14  * represented by a single extent
15  */
16 struct migrate_struct {
17         ext4_lblk_t first_block, last_block, curr_block;
18         ext4_fsblk_t first_pblock, last_pblock;
19 };
20
21 static int finish_range(handle_t *handle, struct inode *inode,
22                                 struct migrate_struct *lb)
23
24 {
25         int retval = 0, needed;
26         struct ext4_extent newext;
27         struct ext4_ext_path *path;
28         if (lb->first_pblock == 0)
29                 return 0;
30
31         /* Add the extent to temp inode*/
32         newext.ee_block = cpu_to_le32(lb->first_block);
33         newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
34         ext4_ext_store_pblock(&newext, lb->first_pblock);
35         /* Locking only for convinience since we are operating on temp inode */
36         down_write(&EXT4_I(inode)->i_data_sem);
37         path = ext4_find_extent(inode, lb->first_block, NULL, 0);
38         if (IS_ERR(path)) {
39                 retval = PTR_ERR(path);
40                 path = NULL;
41                 goto err_out;
42         }
43
44         /*
45          * Calculate the credit needed to inserting this extent
46          * Since we are doing this in loop we may accumalate extra
47          * credit. But below we try to not accumalate too much
48          * of them by restarting the journal.
49          */
50         needed = ext4_ext_calc_credits_for_single_extent(inode,
51                     lb->last_block - lb->first_block + 1, path);
52
53         retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
54         if (retval < 0)
55                 goto err_out;
56         retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
57 err_out:
58         up_write((&EXT4_I(inode)->i_data_sem));
59         ext4_ext_drop_refs(path);
60         kfree(path);
61         lb->first_pblock = 0;
62         return retval;
63 }
64
65 static int update_extent_range(handle_t *handle, struct inode *inode,
66                                ext4_fsblk_t pblock, struct migrate_struct *lb)
67 {
68         int retval;
69         /*
70          * See if we can add on to the existing range (if it exists)
71          */
72         if (lb->first_pblock &&
73                 (lb->last_pblock+1 == pblock) &&
74                 (lb->last_block+1 == lb->curr_block)) {
75                 lb->last_pblock = pblock;
76                 lb->last_block = lb->curr_block;
77                 lb->curr_block++;
78                 return 0;
79         }
80         /*
81          * Start a new range.
82          */
83         retval = finish_range(handle, inode, lb);
84         lb->first_pblock = lb->last_pblock = pblock;
85         lb->first_block = lb->last_block = lb->curr_block;
86         lb->curr_block++;
87         return retval;
88 }
89
90 static int update_ind_extent_range(handle_t *handle, struct inode *inode,
91                                    ext4_fsblk_t pblock,
92                                    struct migrate_struct *lb)
93 {
94         struct buffer_head *bh;
95         __le32 *i_data;
96         int i, retval = 0;
97         unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
98
99         bh = ext4_sb_bread(inode->i_sb, pblock, 0);
100         if (IS_ERR(bh))
101                 return PTR_ERR(bh);
102
103         i_data = (__le32 *)bh->b_data;
104         for (i = 0; i < max_entries; i++) {
105                 if (i_data[i]) {
106                         retval = update_extent_range(handle, inode,
107                                                 le32_to_cpu(i_data[i]), lb);
108                         if (retval)
109                                 break;
110                 } else {
111                         lb->curr_block++;
112                 }
113         }
114         put_bh(bh);
115         return retval;
116
117 }
118
119 static int update_dind_extent_range(handle_t *handle, struct inode *inode,
120                                     ext4_fsblk_t pblock,
121                                     struct migrate_struct *lb)
122 {
123         struct buffer_head *bh;
124         __le32 *i_data;
125         int i, retval = 0;
126         unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
127
128         bh = ext4_sb_bread(inode->i_sb, pblock, 0);
129         if (IS_ERR(bh))
130                 return PTR_ERR(bh);
131
132         i_data = (__le32 *)bh->b_data;
133         for (i = 0; i < max_entries; i++) {
134                 if (i_data[i]) {
135                         retval = update_ind_extent_range(handle, inode,
136                                                 le32_to_cpu(i_data[i]), lb);
137                         if (retval)
138                                 break;
139                 } else {
140                         /* Only update the file block number */
141                         lb->curr_block += max_entries;
142                 }
143         }
144         put_bh(bh);
145         return retval;
146
147 }
148
149 static int update_tind_extent_range(handle_t *handle, struct inode *inode,
150                                     ext4_fsblk_t pblock,
151                                     struct migrate_struct *lb)
152 {
153         struct buffer_head *bh;
154         __le32 *i_data;
155         int i, retval = 0;
156         unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
157
158         bh = ext4_sb_bread(inode->i_sb, pblock, 0);
159         if (IS_ERR(bh))
160                 return PTR_ERR(bh);
161
162         i_data = (__le32 *)bh->b_data;
163         for (i = 0; i < max_entries; i++) {
164                 if (i_data[i]) {
165                         retval = update_dind_extent_range(handle, inode,
166                                                 le32_to_cpu(i_data[i]), lb);
167                         if (retval)
168                                 break;
169                 } else {
170                         /* Only update the file block number */
171                         lb->curr_block += max_entries * max_entries;
172                 }
173         }
174         put_bh(bh);
175         return retval;
176
177 }
178
179 static int free_dind_blocks(handle_t *handle,
180                                 struct inode *inode, __le32 i_data)
181 {
182         int i;
183         __le32 *tmp_idata;
184         struct buffer_head *bh;
185         struct super_block *sb = inode->i_sb;
186         unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
187         int err;
188
189         bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
190         if (IS_ERR(bh))
191                 return PTR_ERR(bh);
192
193         tmp_idata = (__le32 *)bh->b_data;
194         for (i = 0; i < max_entries; i++) {
195                 if (tmp_idata[i]) {
196                         err = ext4_journal_ensure_credits(handle,
197                                 EXT4_RESERVE_TRANS_BLOCKS,
198                                 ext4_free_metadata_revoke_credits(sb, 1));
199                         if (err < 0) {
200                                 put_bh(bh);
201                                 return err;
202                         }
203                         ext4_free_blocks(handle, inode, NULL,
204                                          le32_to_cpu(tmp_idata[i]), 1,
205                                          EXT4_FREE_BLOCKS_METADATA |
206                                          EXT4_FREE_BLOCKS_FORGET);
207                 }
208         }
209         put_bh(bh);
210         err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
211                                 ext4_free_metadata_revoke_credits(sb, 1));
212         if (err < 0)
213                 return err;
214         ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
215                          EXT4_FREE_BLOCKS_METADATA |
216                          EXT4_FREE_BLOCKS_FORGET);
217         return 0;
218 }
219
220 static int free_tind_blocks(handle_t *handle,
221                                 struct inode *inode, __le32 i_data)
222 {
223         int i, retval = 0;
224         __le32 *tmp_idata;
225         struct buffer_head *bh;
226         unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
227
228         bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
229         if (IS_ERR(bh))
230                 return PTR_ERR(bh);
231
232         tmp_idata = (__le32 *)bh->b_data;
233         for (i = 0; i < max_entries; i++) {
234                 if (tmp_idata[i]) {
235                         retval = free_dind_blocks(handle,
236                                         inode, tmp_idata[i]);
237                         if (retval) {
238                                 put_bh(bh);
239                                 return retval;
240                         }
241                 }
242         }
243         put_bh(bh);
244         retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
245                         ext4_free_metadata_revoke_credits(inode->i_sb, 1));
246         if (retval < 0)
247                 return retval;
248         ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
249                          EXT4_FREE_BLOCKS_METADATA |
250                          EXT4_FREE_BLOCKS_FORGET);
251         return 0;
252 }
253
254 static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
255 {
256         int retval;
257
258         /* ei->i_data[EXT4_IND_BLOCK] */
259         if (i_data[0]) {
260                 retval = ext4_journal_ensure_credits(handle,
261                         EXT4_RESERVE_TRANS_BLOCKS,
262                         ext4_free_metadata_revoke_credits(inode->i_sb, 1));
263                 if (retval < 0)
264                         return retval;
265                 ext4_free_blocks(handle, inode, NULL,
266                                 le32_to_cpu(i_data[0]), 1,
267                                  EXT4_FREE_BLOCKS_METADATA |
268                                  EXT4_FREE_BLOCKS_FORGET);
269         }
270
271         /* ei->i_data[EXT4_DIND_BLOCK] */
272         if (i_data[1]) {
273                 retval = free_dind_blocks(handle, inode, i_data[1]);
274                 if (retval)
275                         return retval;
276         }
277
278         /* ei->i_data[EXT4_TIND_BLOCK] */
279         if (i_data[2]) {
280                 retval = free_tind_blocks(handle, inode, i_data[2]);
281                 if (retval)
282                         return retval;
283         }
284         return 0;
285 }
286
287 static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
288                                                 struct inode *tmp_inode)
289 {
290         int retval, retval2 = 0;
291         __le32  i_data[3];
292         struct ext4_inode_info *ei = EXT4_I(inode);
293         struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
294
295         /*
296          * One credit accounted for writing the
297          * i_data field of the original inode
298          */
299         retval = ext4_journal_ensure_credits(handle, 1, 0);
300         if (retval < 0)
301                 goto err_out;
302
303         i_data[0] = ei->i_data[EXT4_IND_BLOCK];
304         i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
305         i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
306
307         down_write(&EXT4_I(inode)->i_data_sem);
308         /*
309          * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
310          * happened after we started the migrate. We need to
311          * fail the migrate
312          */
313         if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
314                 retval = -EAGAIN;
315                 up_write(&EXT4_I(inode)->i_data_sem);
316                 goto err_out;
317         } else
318                 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
319         /*
320          * We have the extent map build with the tmp inode.
321          * Now copy the i_data across
322          */
323         ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
324         memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
325
326         /*
327          * Update i_blocks with the new blocks that got
328          * allocated while adding extents for extent index
329          * blocks.
330          *
331          * While converting to extents we need not
332          * update the original inode i_blocks for extent blocks
333          * via quota APIs. The quota update happened via tmp_inode already.
334          */
335         spin_lock(&inode->i_lock);
336         inode->i_blocks += tmp_inode->i_blocks;
337         spin_unlock(&inode->i_lock);
338         up_write(&EXT4_I(inode)->i_data_sem);
339
340         /*
341          * We mark the inode dirty after, because we decrement the
342          * i_blocks when freeing the indirect meta-data blocks
343          */
344         retval = free_ind_block(handle, inode, i_data);
345         retval2 = ext4_mark_inode_dirty(handle, inode);
346         if (unlikely(retval2 && !retval))
347                 retval = retval2;
348
349 err_out:
350         return retval;
351 }
352
353 static int free_ext_idx(handle_t *handle, struct inode *inode,
354                                         struct ext4_extent_idx *ix)
355 {
356         int i, retval = 0;
357         ext4_fsblk_t block;
358         struct buffer_head *bh;
359         struct ext4_extent_header *eh;
360
361         block = ext4_idx_pblock(ix);
362         bh = ext4_sb_bread(inode->i_sb, block, 0);
363         if (IS_ERR(bh))
364                 return PTR_ERR(bh);
365
366         eh = (struct ext4_extent_header *)bh->b_data;
367         if (eh->eh_depth != 0) {
368                 ix = EXT_FIRST_INDEX(eh);
369                 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
370                         retval = free_ext_idx(handle, inode, ix);
371                         if (retval) {
372                                 put_bh(bh);
373                                 return retval;
374                         }
375                 }
376         }
377         put_bh(bh);
378         retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
379                         ext4_free_metadata_revoke_credits(inode->i_sb, 1));
380         if (retval < 0)
381                 return retval;
382         ext4_free_blocks(handle, inode, NULL, block, 1,
383                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
384         return 0;
385 }
386
387 /*
388  * Free the extent meta data blocks only
389  */
390 static int free_ext_block(handle_t *handle, struct inode *inode)
391 {
392         int i, retval = 0;
393         struct ext4_inode_info *ei = EXT4_I(inode);
394         struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
395         struct ext4_extent_idx *ix;
396         if (eh->eh_depth == 0)
397                 /*
398                  * No extra blocks allocated for extent meta data
399                  */
400                 return 0;
401         ix = EXT_FIRST_INDEX(eh);
402         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
403                 retval = free_ext_idx(handle, inode, ix);
404                 if (retval)
405                         return retval;
406         }
407         return retval;
408 }
409
410 int ext4_ext_migrate(struct inode *inode)
411 {
412         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
413         handle_t *handle;
414         int retval = 0, i;
415         __le32 *i_data;
416         struct ext4_inode_info *ei;
417         struct inode *tmp_inode = NULL;
418         struct migrate_struct lb;
419         unsigned long max_entries;
420         __u32 goal;
421         uid_t owner[2];
422
423         /*
424          * If the filesystem does not support extents, or the inode
425          * already is extent-based, error out.
426          */
427         if (!ext4_has_feature_extents(inode->i_sb) ||
428             (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
429                 return -EINVAL;
430
431         if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
432                 /*
433                  * don't migrate fast symlink
434                  */
435                 return retval;
436
437         percpu_down_write(&sbi->s_writepages_rwsem);
438
439         /*
440          * Worst case we can touch the allocation bitmaps, a bgd
441          * block, and a block to link in the orphan list.  We do need
442          * need to worry about credits for modifying the quota inode.
443          */
444         handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
445                 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
446
447         if (IS_ERR(handle)) {
448                 retval = PTR_ERR(handle);
449                 goto out_unlock;
450         }
451         goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
452                 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
453         owner[0] = i_uid_read(inode);
454         owner[1] = i_gid_read(inode);
455         tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
456                                    S_IFREG, NULL, goal, owner, 0);
457         if (IS_ERR(tmp_inode)) {
458                 retval = PTR_ERR(tmp_inode);
459                 ext4_journal_stop(handle);
460                 goto out_unlock;
461         }
462         i_size_write(tmp_inode, i_size_read(inode));
463         /*
464          * Set the i_nlink to zero so it will be deleted later
465          * when we drop inode reference.
466          */
467         clear_nlink(tmp_inode);
468
469         ext4_ext_tree_init(handle, tmp_inode);
470         ext4_orphan_add(handle, tmp_inode);
471         ext4_journal_stop(handle);
472
473         /*
474          * start with one credit accounted for
475          * superblock modification.
476          *
477          * For the tmp_inode we already have committed the
478          * transaction that created the inode. Later as and
479          * when we add extents we extent the journal
480          */
481         /*
482          * Even though we take i_mutex we can still cause block
483          * allocation via mmap write to holes. If we have allocated
484          * new blocks we fail migrate.  New block allocation will
485          * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
486          * with i_data_sem held to prevent racing with block
487          * allocation.
488          */
489         down_read(&EXT4_I(inode)->i_data_sem);
490         ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
491         up_read((&EXT4_I(inode)->i_data_sem));
492
493         handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
494         if (IS_ERR(handle)) {
495                 /*
496                  * It is impossible to update on-disk structures without
497                  * a handle, so just rollback in-core changes and live other
498                  * work to orphan_list_cleanup()
499                  */
500                 ext4_orphan_del(NULL, tmp_inode);
501                 retval = PTR_ERR(handle);
502                 goto out_tmp_inode;
503         }
504
505         ei = EXT4_I(inode);
506         i_data = ei->i_data;
507         memset(&lb, 0, sizeof(lb));
508
509         /* 32 bit block address 4 bytes */
510         max_entries = inode->i_sb->s_blocksize >> 2;
511         for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
512                 if (i_data[i]) {
513                         retval = update_extent_range(handle, tmp_inode,
514                                                 le32_to_cpu(i_data[i]), &lb);
515                         if (retval)
516                                 goto err_out;
517                 } else
518                         lb.curr_block++;
519         }
520         if (i_data[EXT4_IND_BLOCK]) {
521                 retval = update_ind_extent_range(handle, tmp_inode,
522                                 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
523                 if (retval)
524                         goto err_out;
525         } else
526                 lb.curr_block += max_entries;
527         if (i_data[EXT4_DIND_BLOCK]) {
528                 retval = update_dind_extent_range(handle, tmp_inode,
529                                 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
530                 if (retval)
531                         goto err_out;
532         } else
533                 lb.curr_block += max_entries * max_entries;
534         if (i_data[EXT4_TIND_BLOCK]) {
535                 retval = update_tind_extent_range(handle, tmp_inode,
536                                 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
537                 if (retval)
538                         goto err_out;
539         }
540         /*
541          * Build the last extent
542          */
543         retval = finish_range(handle, tmp_inode, &lb);
544 err_out:
545         if (retval)
546                 /*
547                  * Failure case delete the extent information with the
548                  * tmp_inode
549                  */
550                 free_ext_block(handle, tmp_inode);
551         else {
552                 retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
553                 if (retval)
554                         /*
555                          * if we fail to swap inode data free the extent
556                          * details of the tmp inode
557                          */
558                         free_ext_block(handle, tmp_inode);
559         }
560
561         /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
562         retval = ext4_journal_ensure_credits(handle, 1, 0);
563         if (retval < 0)
564                 goto out_stop;
565         /*
566          * Mark the tmp_inode as of size zero
567          */
568         i_size_write(tmp_inode, 0);
569
570         /*
571          * set the  i_blocks count to zero
572          * so that the ext4_evict_inode() does the
573          * right job
574          *
575          * We don't need to take the i_lock because
576          * the inode is not visible to user space.
577          */
578         tmp_inode->i_blocks = 0;
579
580         /* Reset the extent details */
581         ext4_ext_tree_init(handle, tmp_inode);
582 out_stop:
583         ext4_journal_stop(handle);
584 out_tmp_inode:
585         unlock_new_inode(tmp_inode);
586         iput(tmp_inode);
587 out_unlock:
588         percpu_up_write(&sbi->s_writepages_rwsem);
589         return retval;
590 }
591
592 /*
593  * Migrate a simple extent-based inode to use the i_blocks[] array
594  */
595 int ext4_ind_migrate(struct inode *inode)
596 {
597         struct ext4_extent_header       *eh;
598         struct ext4_sb_info             *sbi = EXT4_SB(inode->i_sb);
599         struct ext4_super_block         *es = sbi->s_es;
600         struct ext4_inode_info          *ei = EXT4_I(inode);
601         struct ext4_extent              *ex;
602         unsigned int                    i, len;
603         ext4_lblk_t                     start, end;
604         ext4_fsblk_t                    blk;
605         handle_t                        *handle;
606         int                             ret, ret2 = 0;
607
608         if (!ext4_has_feature_extents(inode->i_sb) ||
609             (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
610                 return -EINVAL;
611
612         if (ext4_has_feature_bigalloc(inode->i_sb))
613                 return -EOPNOTSUPP;
614
615         /*
616          * In order to get correct extent info, force all delayed allocation
617          * blocks to be allocated, otherwise delayed allocation blocks may not
618          * be reflected and bypass the checks on extent header.
619          */
620         if (test_opt(inode->i_sb, DELALLOC))
621                 ext4_alloc_da_blocks(inode);
622
623         percpu_down_write(&sbi->s_writepages_rwsem);
624
625         handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
626         if (IS_ERR(handle)) {
627                 ret = PTR_ERR(handle);
628                 goto out_unlock;
629         }
630
631         down_write(&EXT4_I(inode)->i_data_sem);
632         ret = ext4_ext_check_inode(inode);
633         if (ret)
634                 goto errout;
635
636         eh = ext_inode_hdr(inode);
637         ex  = EXT_FIRST_EXTENT(eh);
638         if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
639             eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
640                 ret = -EOPNOTSUPP;
641                 goto errout;
642         }
643         if (eh->eh_entries == 0)
644                 blk = len = start = end = 0;
645         else {
646                 len = le16_to_cpu(ex->ee_len);
647                 blk = ext4_ext_pblock(ex);
648                 start = le32_to_cpu(ex->ee_block);
649                 end = start + len - 1;
650                 if (end >= EXT4_NDIR_BLOCKS) {
651                         ret = -EOPNOTSUPP;
652                         goto errout;
653                 }
654         }
655
656         ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
657         memset(ei->i_data, 0, sizeof(ei->i_data));
658         for (i = start; i <= end; i++)
659                 ei->i_data[i] = cpu_to_le32(blk++);
660         ret2 = ext4_mark_inode_dirty(handle, inode);
661         if (unlikely(ret2 && !ret))
662                 ret = ret2;
663 errout:
664         ext4_journal_stop(handle);
665         up_write(&EXT4_I(inode)->i_data_sem);
666 out_unlock:
667         percpu_up_write(&sbi->s_writepages_rwsem);
668         return ret;
669 }