Merge tag 'platform-drivers-x86-v6.5-1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / fs / nilfs2 / segment.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS segment constructor.
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi.
8  *
9  */
10
11 #include <linux/pagemap.h>
12 #include <linux/buffer_head.h>
13 #include <linux/writeback.h>
14 #include <linux/bitops.h>
15 #include <linux/bio.h>
16 #include <linux/completion.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/crc32.h>
22 #include <linux/pagevec.h>
23 #include <linux/slab.h>
24 #include <linux/sched/signal.h>
25
26 #include "nilfs.h"
27 #include "btnode.h"
28 #include "page.h"
29 #include "segment.h"
30 #include "sufile.h"
31 #include "cpfile.h"
32 #include "ifile.h"
33 #include "segbuf.h"
34
35
36 /*
37  * Segment constructor
38  */
39 #define SC_N_INODEVEC   16   /* Size of locally allocated inode vector */
40
41 #define SC_MAX_SEGDELTA 64   /*
42                               * Upper limit of the number of segments
43                               * appended in collection retry loop
44                               */
45
46 /* Construction mode */
47 enum {
48         SC_LSEG_SR = 1, /* Make a logical segment having a super root */
49         SC_LSEG_DSYNC,  /*
50                          * Flush data blocks of a given file and make
51                          * a logical segment without a super root.
52                          */
53         SC_FLUSH_FILE,  /*
54                          * Flush data files, leads to segment writes without
55                          * creating a checkpoint.
56                          */
57         SC_FLUSH_DAT,   /*
58                          * Flush DAT file.  This also creates segments
59                          * without a checkpoint.
60                          */
61 };
62
63 /* Stage numbers of dirty block collection */
64 enum {
65         NILFS_ST_INIT = 0,
66         NILFS_ST_GC,            /* Collecting dirty blocks for GC */
67         NILFS_ST_FILE,
68         NILFS_ST_IFILE,
69         NILFS_ST_CPFILE,
70         NILFS_ST_SUFILE,
71         NILFS_ST_DAT,
72         NILFS_ST_SR,            /* Super root */
73         NILFS_ST_DSYNC,         /* Data sync blocks */
74         NILFS_ST_DONE,
75 };
76
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/nilfs2.h>
79
80 /*
81  * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82  * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83  * the variable must use them because transition of stage count must involve
84  * trace events (trace_nilfs2_collection_stage_transition).
85  *
86  * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87  * produce tracepoint events. It is provided just for making the intention
88  * clear.
89  */
90 static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91 {
92         sci->sc_stage.scnt++;
93         trace_nilfs2_collection_stage_transition(sci);
94 }
95
96 static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97 {
98         sci->sc_stage.scnt = next_scnt;
99         trace_nilfs2_collection_stage_transition(sci);
100 }
101
102 static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103 {
104         return sci->sc_stage.scnt;
105 }
106
107 /* State flags of collection */
108 #define NILFS_CF_NODE           0x0001  /* Collecting node blocks */
109 #define NILFS_CF_IFILE_STARTED  0x0002  /* IFILE stage has started */
110 #define NILFS_CF_SUFREED        0x0004  /* segment usages has been freed */
111 #define NILFS_CF_HISTORY_MASK   (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113 /* Operations depending on the construction mode and file type */
114 struct nilfs_sc_operations {
115         int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116                             struct inode *);
117         int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118                             struct inode *);
119         int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120                             struct inode *);
121         void (*write_data_binfo)(struct nilfs_sc_info *,
122                                  struct nilfs_segsum_pointer *,
123                                  union nilfs_binfo *);
124         void (*write_node_binfo)(struct nilfs_sc_info *,
125                                  struct nilfs_segsum_pointer *,
126                                  union nilfs_binfo *);
127 };
128
129 /*
130  * Other definitions
131  */
132 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137 #define nilfs_cnt32_ge(a, b)   \
138         (typecheck(__u32, a) && typecheck(__u32, b) && \
139          ((__s32)(a) - (__s32)(b) >= 0))
140
141 static int nilfs_prepare_segment_lock(struct super_block *sb,
142                                       struct nilfs_transaction_info *ti)
143 {
144         struct nilfs_transaction_info *cur_ti = current->journal_info;
145         void *save = NULL;
146
147         if (cur_ti) {
148                 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
149                         return ++cur_ti->ti_count;
150
151                 /*
152                  * If journal_info field is occupied by other FS,
153                  * it is saved and will be restored on
154                  * nilfs_transaction_commit().
155                  */
156                 nilfs_warn(sb, "journal info from a different FS");
157                 save = current->journal_info;
158         }
159         if (!ti) {
160                 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
161                 if (!ti)
162                         return -ENOMEM;
163                 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
164         } else {
165                 ti->ti_flags = 0;
166         }
167         ti->ti_count = 0;
168         ti->ti_save = save;
169         ti->ti_magic = NILFS_TI_MAGIC;
170         current->journal_info = ti;
171         return 0;
172 }
173
174 /**
175  * nilfs_transaction_begin - start indivisible file operations.
176  * @sb: super block
177  * @ti: nilfs_transaction_info
178  * @vacancy_check: flags for vacancy rate checks
179  *
180  * nilfs_transaction_begin() acquires a reader/writer semaphore, called
181  * the segment semaphore, to make a segment construction and write tasks
182  * exclusive.  The function is used with nilfs_transaction_commit() in pairs.
183  * The region enclosed by these two functions can be nested.  To avoid a
184  * deadlock, the semaphore is only acquired or released in the outermost call.
185  *
186  * This function allocates a nilfs_transaction_info struct to keep context
187  * information on it.  It is initialized and hooked onto the current task in
188  * the outermost call.  If a pre-allocated struct is given to @ti, it is used
189  * instead; otherwise a new struct is assigned from a slab.
190  *
191  * When @vacancy_check flag is set, this function will check the amount of
192  * free space, and will wait for the GC to reclaim disk space if low capacity.
193  *
194  * Return Value: On success, 0 is returned. On error, one of the following
195  * negative error code is returned.
196  *
197  * %-ENOMEM - Insufficient memory available.
198  *
199  * %-ENOSPC - No space left on device
200  */
201 int nilfs_transaction_begin(struct super_block *sb,
202                             struct nilfs_transaction_info *ti,
203                             int vacancy_check)
204 {
205         struct the_nilfs *nilfs;
206         int ret = nilfs_prepare_segment_lock(sb, ti);
207         struct nilfs_transaction_info *trace_ti;
208
209         if (unlikely(ret < 0))
210                 return ret;
211         if (ret > 0) {
212                 trace_ti = current->journal_info;
213
214                 trace_nilfs2_transaction_transition(sb, trace_ti,
215                                     trace_ti->ti_count, trace_ti->ti_flags,
216                                     TRACE_NILFS2_TRANSACTION_BEGIN);
217                 return 0;
218         }
219
220         sb_start_intwrite(sb);
221
222         nilfs = sb->s_fs_info;
223         down_read(&nilfs->ns_segctor_sem);
224         if (vacancy_check && nilfs_near_disk_full(nilfs)) {
225                 up_read(&nilfs->ns_segctor_sem);
226                 ret = -ENOSPC;
227                 goto failed;
228         }
229
230         trace_ti = current->journal_info;
231         trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
232                                             trace_ti->ti_flags,
233                                             TRACE_NILFS2_TRANSACTION_BEGIN);
234         return 0;
235
236  failed:
237         ti = current->journal_info;
238         current->journal_info = ti->ti_save;
239         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
240                 kmem_cache_free(nilfs_transaction_cachep, ti);
241         sb_end_intwrite(sb);
242         return ret;
243 }
244
245 /**
246  * nilfs_transaction_commit - commit indivisible file operations.
247  * @sb: super block
248  *
249  * nilfs_transaction_commit() releases the read semaphore which is
250  * acquired by nilfs_transaction_begin(). This is only performed
251  * in outermost call of this function.  If a commit flag is set,
252  * nilfs_transaction_commit() sets a timer to start the segment
253  * constructor.  If a sync flag is set, it starts construction
254  * directly.
255  */
256 int nilfs_transaction_commit(struct super_block *sb)
257 {
258         struct nilfs_transaction_info *ti = current->journal_info;
259         struct the_nilfs *nilfs = sb->s_fs_info;
260         int err = 0;
261
262         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
263         ti->ti_flags |= NILFS_TI_COMMIT;
264         if (ti->ti_count > 0) {
265                 ti->ti_count--;
266                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
267                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
268                 return 0;
269         }
270         if (nilfs->ns_writer) {
271                 struct nilfs_sc_info *sci = nilfs->ns_writer;
272
273                 if (ti->ti_flags & NILFS_TI_COMMIT)
274                         nilfs_segctor_start_timer(sci);
275                 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
276                         nilfs_segctor_do_flush(sci, 0);
277         }
278         up_read(&nilfs->ns_segctor_sem);
279         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
280                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
281
282         current->journal_info = ti->ti_save;
283
284         if (ti->ti_flags & NILFS_TI_SYNC)
285                 err = nilfs_construct_segment(sb);
286         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287                 kmem_cache_free(nilfs_transaction_cachep, ti);
288         sb_end_intwrite(sb);
289         return err;
290 }
291
292 void nilfs_transaction_abort(struct super_block *sb)
293 {
294         struct nilfs_transaction_info *ti = current->journal_info;
295         struct the_nilfs *nilfs = sb->s_fs_info;
296
297         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
298         if (ti->ti_count > 0) {
299                 ti->ti_count--;
300                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
301                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
302                 return;
303         }
304         up_read(&nilfs->ns_segctor_sem);
305
306         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307                     ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308
309         current->journal_info = ti->ti_save;
310         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
311                 kmem_cache_free(nilfs_transaction_cachep, ti);
312         sb_end_intwrite(sb);
313 }
314
315 void nilfs_relax_pressure_in_lock(struct super_block *sb)
316 {
317         struct the_nilfs *nilfs = sb->s_fs_info;
318         struct nilfs_sc_info *sci = nilfs->ns_writer;
319
320         if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
321                 return;
322
323         set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
324         up_read(&nilfs->ns_segctor_sem);
325
326         down_write(&nilfs->ns_segctor_sem);
327         if (sci->sc_flush_request &&
328             test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
329                 struct nilfs_transaction_info *ti = current->journal_info;
330
331                 ti->ti_flags |= NILFS_TI_WRITER;
332                 nilfs_segctor_do_immediate_flush(sci);
333                 ti->ti_flags &= ~NILFS_TI_WRITER;
334         }
335         downgrade_write(&nilfs->ns_segctor_sem);
336 }
337
338 static void nilfs_transaction_lock(struct super_block *sb,
339                                    struct nilfs_transaction_info *ti,
340                                    int gcflag)
341 {
342         struct nilfs_transaction_info *cur_ti = current->journal_info;
343         struct the_nilfs *nilfs = sb->s_fs_info;
344         struct nilfs_sc_info *sci = nilfs->ns_writer;
345
346         WARN_ON(cur_ti);
347         ti->ti_flags = NILFS_TI_WRITER;
348         ti->ti_count = 0;
349         ti->ti_save = cur_ti;
350         ti->ti_magic = NILFS_TI_MAGIC;
351         current->journal_info = ti;
352
353         for (;;) {
354                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
355                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
356
357                 down_write(&nilfs->ns_segctor_sem);
358                 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
359                         break;
360
361                 nilfs_segctor_do_immediate_flush(sci);
362
363                 up_write(&nilfs->ns_segctor_sem);
364                 cond_resched();
365         }
366         if (gcflag)
367                 ti->ti_flags |= NILFS_TI_GC;
368
369         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
370                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
371 }
372
373 static void nilfs_transaction_unlock(struct super_block *sb)
374 {
375         struct nilfs_transaction_info *ti = current->journal_info;
376         struct the_nilfs *nilfs = sb->s_fs_info;
377
378         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
379         BUG_ON(ti->ti_count > 0);
380
381         up_write(&nilfs->ns_segctor_sem);
382         current->journal_info = ti->ti_save;
383
384         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
385                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
386 }
387
388 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
389                                             struct nilfs_segsum_pointer *ssp,
390                                             unsigned int bytes)
391 {
392         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
393         unsigned int blocksize = sci->sc_super->s_blocksize;
394         void *p;
395
396         if (unlikely(ssp->offset + bytes > blocksize)) {
397                 ssp->offset = 0;
398                 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
399                                                &segbuf->sb_segsum_buffers));
400                 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
401         }
402         p = ssp->bh->b_data + ssp->offset;
403         ssp->offset += bytes;
404         return p;
405 }
406
407 /**
408  * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
409  * @sci: nilfs_sc_info
410  */
411 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
412 {
413         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
414         struct buffer_head *sumbh;
415         unsigned int sumbytes;
416         unsigned int flags = 0;
417         int err;
418
419         if (nilfs_doing_gc())
420                 flags = NILFS_SS_GC;
421         err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
422         if (unlikely(err))
423                 return err;
424
425         sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
426         sumbytes = segbuf->sb_sum.sumbytes;
427         sci->sc_finfo_ptr.bh = sumbh;  sci->sc_finfo_ptr.offset = sumbytes;
428         sci->sc_binfo_ptr.bh = sumbh;  sci->sc_binfo_ptr.offset = sumbytes;
429         sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
430         return 0;
431 }
432
433 /**
434  * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
435  * @sci: segment constructor object
436  *
437  * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
438  * the current segment summary block.
439  */
440 static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
441 {
442         struct nilfs_segsum_pointer *ssp;
443
444         ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
445         if (ssp->offset < ssp->bh->b_size)
446                 memset(ssp->bh->b_data + ssp->offset, 0,
447                        ssp->bh->b_size - ssp->offset);
448 }
449
450 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
451 {
452         sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
453         if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
454                 return -E2BIG; /*
455                                 * The current segment is filled up
456                                 * (internal code)
457                                 */
458         nilfs_segctor_zeropad_segsum(sci);
459         sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
460         return nilfs_segctor_reset_segment_buffer(sci);
461 }
462
463 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
464 {
465         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
466         int err;
467
468         if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
469                 err = nilfs_segctor_feed_segment(sci);
470                 if (err)
471                         return err;
472                 segbuf = sci->sc_curseg;
473         }
474         err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
475         if (likely(!err))
476                 segbuf->sb_sum.flags |= NILFS_SS_SR;
477         return err;
478 }
479
480 /*
481  * Functions for making segment summary and payloads
482  */
483 static int nilfs_segctor_segsum_block_required(
484         struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
485         unsigned int binfo_size)
486 {
487         unsigned int blocksize = sci->sc_super->s_blocksize;
488         /* Size of finfo and binfo is enough small against blocksize */
489
490         return ssp->offset + binfo_size +
491                 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
492                 blocksize;
493 }
494
495 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
496                                       struct inode *inode)
497 {
498         sci->sc_curseg->sb_sum.nfinfo++;
499         sci->sc_binfo_ptr = sci->sc_finfo_ptr;
500         nilfs_segctor_map_segsum_entry(
501                 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
502
503         if (NILFS_I(inode)->i_root &&
504             !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
505                 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
506         /* skip finfo */
507 }
508
509 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
510                                     struct inode *inode)
511 {
512         struct nilfs_finfo *finfo;
513         struct nilfs_inode_info *ii;
514         struct nilfs_segment_buffer *segbuf;
515         __u64 cno;
516
517         if (sci->sc_blk_cnt == 0)
518                 return;
519
520         ii = NILFS_I(inode);
521
522         if (test_bit(NILFS_I_GCINODE, &ii->i_state))
523                 cno = ii->i_cno;
524         else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
525                 cno = 0;
526         else
527                 cno = sci->sc_cno;
528
529         finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
530                                                  sizeof(*finfo));
531         finfo->fi_ino = cpu_to_le64(inode->i_ino);
532         finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
533         finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
534         finfo->fi_cno = cpu_to_le64(cno);
535
536         segbuf = sci->sc_curseg;
537         segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
538                 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
539         sci->sc_finfo_ptr = sci->sc_binfo_ptr;
540         sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
541 }
542
543 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
544                                         struct buffer_head *bh,
545                                         struct inode *inode,
546                                         unsigned int binfo_size)
547 {
548         struct nilfs_segment_buffer *segbuf;
549         int required, err = 0;
550
551  retry:
552         segbuf = sci->sc_curseg;
553         required = nilfs_segctor_segsum_block_required(
554                 sci, &sci->sc_binfo_ptr, binfo_size);
555         if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
556                 nilfs_segctor_end_finfo(sci, inode);
557                 err = nilfs_segctor_feed_segment(sci);
558                 if (err)
559                         return err;
560                 goto retry;
561         }
562         if (unlikely(required)) {
563                 nilfs_segctor_zeropad_segsum(sci);
564                 err = nilfs_segbuf_extend_segsum(segbuf);
565                 if (unlikely(err))
566                         goto failed;
567         }
568         if (sci->sc_blk_cnt == 0)
569                 nilfs_segctor_begin_finfo(sci, inode);
570
571         nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
572         /* Substitution to vblocknr is delayed until update_blocknr() */
573         nilfs_segbuf_add_file_buffer(segbuf, bh);
574         sci->sc_blk_cnt++;
575  failed:
576         return err;
577 }
578
579 /*
580  * Callback functions that enumerate, mark, and collect dirty blocks
581  */
582 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
583                                    struct buffer_head *bh, struct inode *inode)
584 {
585         int err;
586
587         err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
588         if (err < 0)
589                 return err;
590
591         err = nilfs_segctor_add_file_block(sci, bh, inode,
592                                            sizeof(struct nilfs_binfo_v));
593         if (!err)
594                 sci->sc_datablk_cnt++;
595         return err;
596 }
597
598 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
599                                    struct buffer_head *bh,
600                                    struct inode *inode)
601 {
602         return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
603 }
604
605 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
606                                    struct buffer_head *bh,
607                                    struct inode *inode)
608 {
609         WARN_ON(!buffer_dirty(bh));
610         return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
611 }
612
613 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
614                                         struct nilfs_segsum_pointer *ssp,
615                                         union nilfs_binfo *binfo)
616 {
617         struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
618                 sci, ssp, sizeof(*binfo_v));
619         *binfo_v = binfo->bi_v;
620 }
621
622 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
623                                         struct nilfs_segsum_pointer *ssp,
624                                         union nilfs_binfo *binfo)
625 {
626         __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
627                 sci, ssp, sizeof(*vblocknr));
628         *vblocknr = binfo->bi_v.bi_vblocknr;
629 }
630
631 static const struct nilfs_sc_operations nilfs_sc_file_ops = {
632         .collect_data = nilfs_collect_file_data,
633         .collect_node = nilfs_collect_file_node,
634         .collect_bmap = nilfs_collect_file_bmap,
635         .write_data_binfo = nilfs_write_file_data_binfo,
636         .write_node_binfo = nilfs_write_file_node_binfo,
637 };
638
639 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
640                                   struct buffer_head *bh, struct inode *inode)
641 {
642         int err;
643
644         err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
645         if (err < 0)
646                 return err;
647
648         err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
649         if (!err)
650                 sci->sc_datablk_cnt++;
651         return err;
652 }
653
654 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
655                                   struct buffer_head *bh, struct inode *inode)
656 {
657         WARN_ON(!buffer_dirty(bh));
658         return nilfs_segctor_add_file_block(sci, bh, inode,
659                                             sizeof(struct nilfs_binfo_dat));
660 }
661
662 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
663                                        struct nilfs_segsum_pointer *ssp,
664                                        union nilfs_binfo *binfo)
665 {
666         __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
667                                                           sizeof(*blkoff));
668         *blkoff = binfo->bi_dat.bi_blkoff;
669 }
670
671 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
672                                        struct nilfs_segsum_pointer *ssp,
673                                        union nilfs_binfo *binfo)
674 {
675         struct nilfs_binfo_dat *binfo_dat =
676                 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
677         *binfo_dat = binfo->bi_dat;
678 }
679
680 static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
681         .collect_data = nilfs_collect_dat_data,
682         .collect_node = nilfs_collect_file_node,
683         .collect_bmap = nilfs_collect_dat_bmap,
684         .write_data_binfo = nilfs_write_dat_data_binfo,
685         .write_node_binfo = nilfs_write_dat_node_binfo,
686 };
687
688 static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
689         .collect_data = nilfs_collect_file_data,
690         .collect_node = NULL,
691         .collect_bmap = NULL,
692         .write_data_binfo = nilfs_write_file_data_binfo,
693         .write_node_binfo = NULL,
694 };
695
696 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
697                                               struct list_head *listp,
698                                               size_t nlimit,
699                                               loff_t start, loff_t end)
700 {
701         struct address_space *mapping = inode->i_mapping;
702         struct folio_batch fbatch;
703         pgoff_t index = 0, last = ULONG_MAX;
704         size_t ndirties = 0;
705         int i;
706
707         if (unlikely(start != 0 || end != LLONG_MAX)) {
708                 /*
709                  * A valid range is given for sync-ing data pages. The
710                  * range is rounded to per-page; extra dirty buffers
711                  * may be included if blocksize < pagesize.
712                  */
713                 index = start >> PAGE_SHIFT;
714                 last = end >> PAGE_SHIFT;
715         }
716         folio_batch_init(&fbatch);
717  repeat:
718         if (unlikely(index > last) ||
719               !filemap_get_folios_tag(mapping, &index, last,
720                       PAGECACHE_TAG_DIRTY, &fbatch))
721                 return ndirties;
722
723         for (i = 0; i < folio_batch_count(&fbatch); i++) {
724                 struct buffer_head *bh, *head;
725                 struct folio *folio = fbatch.folios[i];
726
727                 folio_lock(folio);
728                 head = folio_buffers(folio);
729                 if (!head) {
730                         create_empty_buffers(&folio->page, i_blocksize(inode), 0);
731                         head = folio_buffers(folio);
732                 }
733                 folio_unlock(folio);
734
735                 bh = head;
736                 do {
737                         if (!buffer_dirty(bh) || buffer_async_write(bh))
738                                 continue;
739                         get_bh(bh);
740                         list_add_tail(&bh->b_assoc_buffers, listp);
741                         ndirties++;
742                         if (unlikely(ndirties >= nlimit)) {
743                                 folio_batch_release(&fbatch);
744                                 cond_resched();
745                                 return ndirties;
746                         }
747                 } while (bh = bh->b_this_page, bh != head);
748         }
749         folio_batch_release(&fbatch);
750         cond_resched();
751         goto repeat;
752 }
753
754 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
755                                             struct list_head *listp)
756 {
757         struct nilfs_inode_info *ii = NILFS_I(inode);
758         struct inode *btnc_inode = ii->i_assoc_inode;
759         struct folio_batch fbatch;
760         struct buffer_head *bh, *head;
761         unsigned int i;
762         pgoff_t index = 0;
763
764         if (!btnc_inode)
765                 return;
766         folio_batch_init(&fbatch);
767
768         while (filemap_get_folios_tag(btnc_inode->i_mapping, &index,
769                                 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) {
770                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
771                         bh = head = folio_buffers(fbatch.folios[i]);
772                         do {
773                                 if (buffer_dirty(bh) &&
774                                                 !buffer_async_write(bh)) {
775                                         get_bh(bh);
776                                         list_add_tail(&bh->b_assoc_buffers,
777                                                       listp);
778                                 }
779                                 bh = bh->b_this_page;
780                         } while (bh != head);
781                 }
782                 folio_batch_release(&fbatch);
783                 cond_resched();
784         }
785 }
786
787 static void nilfs_dispose_list(struct the_nilfs *nilfs,
788                                struct list_head *head, int force)
789 {
790         struct nilfs_inode_info *ii, *n;
791         struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
792         unsigned int nv = 0;
793
794         while (!list_empty(head)) {
795                 spin_lock(&nilfs->ns_inode_lock);
796                 list_for_each_entry_safe(ii, n, head, i_dirty) {
797                         list_del_init(&ii->i_dirty);
798                         if (force) {
799                                 if (unlikely(ii->i_bh)) {
800                                         brelse(ii->i_bh);
801                                         ii->i_bh = NULL;
802                                 }
803                         } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
804                                 set_bit(NILFS_I_QUEUED, &ii->i_state);
805                                 list_add_tail(&ii->i_dirty,
806                                               &nilfs->ns_dirty_files);
807                                 continue;
808                         }
809                         ivec[nv++] = ii;
810                         if (nv == SC_N_INODEVEC)
811                                 break;
812                 }
813                 spin_unlock(&nilfs->ns_inode_lock);
814
815                 for (pii = ivec; nv > 0; pii++, nv--)
816                         iput(&(*pii)->vfs_inode);
817         }
818 }
819
820 static void nilfs_iput_work_func(struct work_struct *work)
821 {
822         struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
823                                                  sc_iput_work);
824         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
825
826         nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
827 }
828
829 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
830                                      struct nilfs_root *root)
831 {
832         int ret = 0;
833
834         if (nilfs_mdt_fetch_dirty(root->ifile))
835                 ret++;
836         if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
837                 ret++;
838         if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
839                 ret++;
840         if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
841                 ret++;
842         return ret;
843 }
844
845 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
846 {
847         return list_empty(&sci->sc_dirty_files) &&
848                 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
849                 sci->sc_nfreesegs == 0 &&
850                 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
851 }
852
853 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
854 {
855         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
856         int ret = 0;
857
858         if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
859                 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
860
861         spin_lock(&nilfs->ns_inode_lock);
862         if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
863                 ret++;
864
865         spin_unlock(&nilfs->ns_inode_lock);
866         return ret;
867 }
868
869 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
870 {
871         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
872
873         nilfs_mdt_clear_dirty(sci->sc_root->ifile);
874         nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
875         nilfs_mdt_clear_dirty(nilfs->ns_sufile);
876         nilfs_mdt_clear_dirty(nilfs->ns_dat);
877 }
878
879 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
880 {
881         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
882         struct buffer_head *bh_cp;
883         struct nilfs_checkpoint *raw_cp;
884         int err;
885
886         /* XXX: this interface will be changed */
887         err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
888                                           &raw_cp, &bh_cp);
889         if (likely(!err)) {
890                 /*
891                  * The following code is duplicated with cpfile.  But, it is
892                  * needed to collect the checkpoint even if it was not newly
893                  * created.
894                  */
895                 mark_buffer_dirty(bh_cp);
896                 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
897                 nilfs_cpfile_put_checkpoint(
898                         nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
899         } else if (err == -EINVAL || err == -ENOENT) {
900                 nilfs_error(sci->sc_super,
901                             "checkpoint creation failed due to metadata corruption.");
902                 err = -EIO;
903         }
904         return err;
905 }
906
907 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
908 {
909         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
910         struct buffer_head *bh_cp;
911         struct nilfs_checkpoint *raw_cp;
912         int err;
913
914         err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
915                                           &raw_cp, &bh_cp);
916         if (unlikely(err)) {
917                 if (err == -EINVAL || err == -ENOENT) {
918                         nilfs_error(sci->sc_super,
919                                     "checkpoint finalization failed due to metadata corruption.");
920                         err = -EIO;
921                 }
922                 goto failed_ibh;
923         }
924         raw_cp->cp_snapshot_list.ssl_next = 0;
925         raw_cp->cp_snapshot_list.ssl_prev = 0;
926         raw_cp->cp_inodes_count =
927                 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
928         raw_cp->cp_blocks_count =
929                 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
930         raw_cp->cp_nblk_inc =
931                 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
932         raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
933         raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
934
935         if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
936                 nilfs_checkpoint_clear_minor(raw_cp);
937         else
938                 nilfs_checkpoint_set_minor(raw_cp);
939
940         nilfs_write_inode_common(sci->sc_root->ifile,
941                                  &raw_cp->cp_ifile_inode, 1);
942         nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
943         return 0;
944
945  failed_ibh:
946         return err;
947 }
948
949 static void nilfs_fill_in_file_bmap(struct inode *ifile,
950                                     struct nilfs_inode_info *ii)
951
952 {
953         struct buffer_head *ibh;
954         struct nilfs_inode *raw_inode;
955
956         if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
957                 ibh = ii->i_bh;
958                 BUG_ON(!ibh);
959                 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
960                                                   ibh);
961                 nilfs_bmap_write(ii->i_bmap, raw_inode);
962                 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
963         }
964 }
965
966 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
967 {
968         struct nilfs_inode_info *ii;
969
970         list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
971                 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
972                 set_bit(NILFS_I_COLLECTED, &ii->i_state);
973         }
974 }
975
976 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
977                                              struct the_nilfs *nilfs)
978 {
979         struct buffer_head *bh_sr;
980         struct nilfs_super_root *raw_sr;
981         unsigned int isz, srsz;
982
983         bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
984
985         lock_buffer(bh_sr);
986         raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
987         isz = nilfs->ns_inode_size;
988         srsz = NILFS_SR_BYTES(isz);
989
990         raw_sr->sr_sum = 0;  /* Ensure initialization within this update */
991         raw_sr->sr_bytes = cpu_to_le16(srsz);
992         raw_sr->sr_nongc_ctime
993                 = cpu_to_le64(nilfs_doing_gc() ?
994                               nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
995         raw_sr->sr_flags = 0;
996
997         nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
998                                  NILFS_SR_DAT_OFFSET(isz), 1);
999         nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
1000                                  NILFS_SR_CPFILE_OFFSET(isz), 1);
1001         nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
1002                                  NILFS_SR_SUFILE_OFFSET(isz), 1);
1003         memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
1004         set_buffer_uptodate(bh_sr);
1005         unlock_buffer(bh_sr);
1006 }
1007
1008 static void nilfs_redirty_inodes(struct list_head *head)
1009 {
1010         struct nilfs_inode_info *ii;
1011
1012         list_for_each_entry(ii, head, i_dirty) {
1013                 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
1014                         clear_bit(NILFS_I_COLLECTED, &ii->i_state);
1015         }
1016 }
1017
1018 static void nilfs_drop_collected_inodes(struct list_head *head)
1019 {
1020         struct nilfs_inode_info *ii;
1021
1022         list_for_each_entry(ii, head, i_dirty) {
1023                 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1024                         continue;
1025
1026                 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
1027                 set_bit(NILFS_I_UPDATED, &ii->i_state);
1028         }
1029 }
1030
1031 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1032                                        struct inode *inode,
1033                                        struct list_head *listp,
1034                                        int (*collect)(struct nilfs_sc_info *,
1035                                                       struct buffer_head *,
1036                                                       struct inode *))
1037 {
1038         struct buffer_head *bh, *n;
1039         int err = 0;
1040
1041         if (collect) {
1042                 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1043                         list_del_init(&bh->b_assoc_buffers);
1044                         err = collect(sci, bh, inode);
1045                         brelse(bh);
1046                         if (unlikely(err))
1047                                 goto dispose_buffers;
1048                 }
1049                 return 0;
1050         }
1051
1052  dispose_buffers:
1053         while (!list_empty(listp)) {
1054                 bh = list_first_entry(listp, struct buffer_head,
1055                                       b_assoc_buffers);
1056                 list_del_init(&bh->b_assoc_buffers);
1057                 brelse(bh);
1058         }
1059         return err;
1060 }
1061
1062 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1063 {
1064         /* Remaining number of blocks within segment buffer */
1065         return sci->sc_segbuf_nblocks -
1066                 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1067 }
1068
1069 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1070                                    struct inode *inode,
1071                                    const struct nilfs_sc_operations *sc_ops)
1072 {
1073         LIST_HEAD(data_buffers);
1074         LIST_HEAD(node_buffers);
1075         int err;
1076
1077         if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1078                 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1079
1080                 n = nilfs_lookup_dirty_data_buffers(
1081                         inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1082                 if (n > rest) {
1083                         err = nilfs_segctor_apply_buffers(
1084                                 sci, inode, &data_buffers,
1085                                 sc_ops->collect_data);
1086                         BUG_ON(!err); /* always receive -E2BIG or true error */
1087                         goto break_or_fail;
1088                 }
1089         }
1090         nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1091
1092         if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1093                 err = nilfs_segctor_apply_buffers(
1094                         sci, inode, &data_buffers, sc_ops->collect_data);
1095                 if (unlikely(err)) {
1096                         /* dispose node list */
1097                         nilfs_segctor_apply_buffers(
1098                                 sci, inode, &node_buffers, NULL);
1099                         goto break_or_fail;
1100                 }
1101                 sci->sc_stage.flags |= NILFS_CF_NODE;
1102         }
1103         /* Collect node */
1104         err = nilfs_segctor_apply_buffers(
1105                 sci, inode, &node_buffers, sc_ops->collect_node);
1106         if (unlikely(err))
1107                 goto break_or_fail;
1108
1109         nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1110         err = nilfs_segctor_apply_buffers(
1111                 sci, inode, &node_buffers, sc_ops->collect_bmap);
1112         if (unlikely(err))
1113                 goto break_or_fail;
1114
1115         nilfs_segctor_end_finfo(sci, inode);
1116         sci->sc_stage.flags &= ~NILFS_CF_NODE;
1117
1118  break_or_fail:
1119         return err;
1120 }
1121
1122 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1123                                          struct inode *inode)
1124 {
1125         LIST_HEAD(data_buffers);
1126         size_t n, rest = nilfs_segctor_buffer_rest(sci);
1127         int err;
1128
1129         n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1130                                             sci->sc_dsync_start,
1131                                             sci->sc_dsync_end);
1132
1133         err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1134                                           nilfs_collect_file_data);
1135         if (!err) {
1136                 nilfs_segctor_end_finfo(sci, inode);
1137                 BUG_ON(n > rest);
1138                 /* always receive -E2BIG or true error if n > rest */
1139         }
1140         return err;
1141 }
1142
1143 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1144 {
1145         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1146         struct list_head *head;
1147         struct nilfs_inode_info *ii;
1148         size_t ndone;
1149         int err = 0;
1150
1151         switch (nilfs_sc_cstage_get(sci)) {
1152         case NILFS_ST_INIT:
1153                 /* Pre-processes */
1154                 sci->sc_stage.flags = 0;
1155
1156                 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1157                         sci->sc_nblk_inc = 0;
1158                         sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1159                         if (mode == SC_LSEG_DSYNC) {
1160                                 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1161                                 goto dsync_mode;
1162                         }
1163                 }
1164
1165                 sci->sc_stage.dirty_file_ptr = NULL;
1166                 sci->sc_stage.gc_inode_ptr = NULL;
1167                 if (mode == SC_FLUSH_DAT) {
1168                         nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1169                         goto dat_stage;
1170                 }
1171                 nilfs_sc_cstage_inc(sci);
1172                 fallthrough;
1173         case NILFS_ST_GC:
1174                 if (nilfs_doing_gc()) {
1175                         head = &sci->sc_gc_inodes;
1176                         ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1177                                                 head, i_dirty);
1178                         list_for_each_entry_continue(ii, head, i_dirty) {
1179                                 err = nilfs_segctor_scan_file(
1180                                         sci, &ii->vfs_inode,
1181                                         &nilfs_sc_file_ops);
1182                                 if (unlikely(err)) {
1183                                         sci->sc_stage.gc_inode_ptr = list_entry(
1184                                                 ii->i_dirty.prev,
1185                                                 struct nilfs_inode_info,
1186                                                 i_dirty);
1187                                         goto break_or_fail;
1188                                 }
1189                                 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1190                         }
1191                         sci->sc_stage.gc_inode_ptr = NULL;
1192                 }
1193                 nilfs_sc_cstage_inc(sci);
1194                 fallthrough;
1195         case NILFS_ST_FILE:
1196                 head = &sci->sc_dirty_files;
1197                 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1198                                         i_dirty);
1199                 list_for_each_entry_continue(ii, head, i_dirty) {
1200                         clear_bit(NILFS_I_DIRTY, &ii->i_state);
1201
1202                         err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1203                                                       &nilfs_sc_file_ops);
1204                         if (unlikely(err)) {
1205                                 sci->sc_stage.dirty_file_ptr =
1206                                         list_entry(ii->i_dirty.prev,
1207                                                    struct nilfs_inode_info,
1208                                                    i_dirty);
1209                                 goto break_or_fail;
1210                         }
1211                         /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1212                         /* XXX: required ? */
1213                 }
1214                 sci->sc_stage.dirty_file_ptr = NULL;
1215                 if (mode == SC_FLUSH_FILE) {
1216                         nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1217                         return 0;
1218                 }
1219                 nilfs_sc_cstage_inc(sci);
1220                 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1221                 fallthrough;
1222         case NILFS_ST_IFILE:
1223                 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1224                                               &nilfs_sc_file_ops);
1225                 if (unlikely(err))
1226                         break;
1227                 nilfs_sc_cstage_inc(sci);
1228                 /* Creating a checkpoint */
1229                 err = nilfs_segctor_create_checkpoint(sci);
1230                 if (unlikely(err))
1231                         break;
1232                 fallthrough;
1233         case NILFS_ST_CPFILE:
1234                 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1235                                               &nilfs_sc_file_ops);
1236                 if (unlikely(err))
1237                         break;
1238                 nilfs_sc_cstage_inc(sci);
1239                 fallthrough;
1240         case NILFS_ST_SUFILE:
1241                 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1242                                          sci->sc_nfreesegs, &ndone);
1243                 if (unlikely(err)) {
1244                         nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1245                                                   sci->sc_freesegs, ndone,
1246                                                   NULL);
1247                         break;
1248                 }
1249                 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1250
1251                 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1252                                               &nilfs_sc_file_ops);
1253                 if (unlikely(err))
1254                         break;
1255                 nilfs_sc_cstage_inc(sci);
1256                 fallthrough;
1257         case NILFS_ST_DAT:
1258  dat_stage:
1259                 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1260                                               &nilfs_sc_dat_ops);
1261                 if (unlikely(err))
1262                         break;
1263                 if (mode == SC_FLUSH_DAT) {
1264                         nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1265                         return 0;
1266                 }
1267                 nilfs_sc_cstage_inc(sci);
1268                 fallthrough;
1269         case NILFS_ST_SR:
1270                 if (mode == SC_LSEG_SR) {
1271                         /* Appending a super root */
1272                         err = nilfs_segctor_add_super_root(sci);
1273                         if (unlikely(err))
1274                                 break;
1275                 }
1276                 /* End of a logical segment */
1277                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1278                 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1279                 return 0;
1280         case NILFS_ST_DSYNC:
1281  dsync_mode:
1282                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1283                 ii = sci->sc_dsync_inode;
1284                 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1285                         break;
1286
1287                 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1288                 if (unlikely(err))
1289                         break;
1290                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1291                 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1292                 return 0;
1293         case NILFS_ST_DONE:
1294                 return 0;
1295         default:
1296                 BUG();
1297         }
1298
1299  break_or_fail:
1300         return err;
1301 }
1302
1303 /**
1304  * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1305  * @sci: nilfs_sc_info
1306  * @nilfs: nilfs object
1307  */
1308 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1309                                             struct the_nilfs *nilfs)
1310 {
1311         struct nilfs_segment_buffer *segbuf, *prev;
1312         __u64 nextnum;
1313         int err, alloc = 0;
1314
1315         segbuf = nilfs_segbuf_new(sci->sc_super);
1316         if (unlikely(!segbuf))
1317                 return -ENOMEM;
1318
1319         if (list_empty(&sci->sc_write_logs)) {
1320                 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1321                                  nilfs->ns_pseg_offset, nilfs);
1322                 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1323                         nilfs_shift_to_next_segment(nilfs);
1324                         nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1325                 }
1326
1327                 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1328                 nextnum = nilfs->ns_nextnum;
1329
1330                 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1331                         /* Start from the head of a new full segment */
1332                         alloc++;
1333         } else {
1334                 /* Continue logs */
1335                 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1336                 nilfs_segbuf_map_cont(segbuf, prev);
1337                 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1338                 nextnum = prev->sb_nextnum;
1339
1340                 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1341                         nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1342                         segbuf->sb_sum.seg_seq++;
1343                         alloc++;
1344                 }
1345         }
1346
1347         err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1348         if (err)
1349                 goto failed;
1350
1351         if (alloc) {
1352                 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1353                 if (err)
1354                         goto failed;
1355         }
1356         nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1357
1358         BUG_ON(!list_empty(&sci->sc_segbufs));
1359         list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1360         sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1361         return 0;
1362
1363  failed:
1364         nilfs_segbuf_free(segbuf);
1365         return err;
1366 }
1367
1368 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1369                                          struct the_nilfs *nilfs, int nadd)
1370 {
1371         struct nilfs_segment_buffer *segbuf, *prev;
1372         struct inode *sufile = nilfs->ns_sufile;
1373         __u64 nextnextnum;
1374         LIST_HEAD(list);
1375         int err, ret, i;
1376
1377         prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1378         /*
1379          * Since the segment specified with nextnum might be allocated during
1380          * the previous construction, the buffer including its segusage may
1381          * not be dirty.  The following call ensures that the buffer is dirty
1382          * and will pin the buffer on memory until the sufile is written.
1383          */
1384         err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1385         if (unlikely(err))
1386                 return err;
1387
1388         for (i = 0; i < nadd; i++) {
1389                 /* extend segment info */
1390                 err = -ENOMEM;
1391                 segbuf = nilfs_segbuf_new(sci->sc_super);
1392                 if (unlikely(!segbuf))
1393                         goto failed;
1394
1395                 /* map this buffer to region of segment on-disk */
1396                 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1397                 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1398
1399                 /* allocate the next next full segment */
1400                 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1401                 if (unlikely(err))
1402                         goto failed_segbuf;
1403
1404                 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1405                 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1406
1407                 list_add_tail(&segbuf->sb_list, &list);
1408                 prev = segbuf;
1409         }
1410         list_splice_tail(&list, &sci->sc_segbufs);
1411         return 0;
1412
1413  failed_segbuf:
1414         nilfs_segbuf_free(segbuf);
1415  failed:
1416         list_for_each_entry(segbuf, &list, sb_list) {
1417                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1418                 WARN_ON(ret); /* never fails */
1419         }
1420         nilfs_destroy_logs(&list);
1421         return err;
1422 }
1423
1424 static void nilfs_free_incomplete_logs(struct list_head *logs,
1425                                        struct the_nilfs *nilfs)
1426 {
1427         struct nilfs_segment_buffer *segbuf, *prev;
1428         struct inode *sufile = nilfs->ns_sufile;
1429         int ret;
1430
1431         segbuf = NILFS_FIRST_SEGBUF(logs);
1432         if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1433                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1434                 WARN_ON(ret); /* never fails */
1435         }
1436         if (atomic_read(&segbuf->sb_err)) {
1437                 /* Case 1: The first segment failed */
1438                 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1439                         /*
1440                          * Case 1a:  Partial segment appended into an existing
1441                          * segment
1442                          */
1443                         nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1444                                                 segbuf->sb_fseg_end);
1445                 else /* Case 1b:  New full segment */
1446                         set_nilfs_discontinued(nilfs);
1447         }
1448
1449         prev = segbuf;
1450         list_for_each_entry_continue(segbuf, logs, sb_list) {
1451                 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1452                         ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1453                         WARN_ON(ret); /* never fails */
1454                 }
1455                 if (atomic_read(&segbuf->sb_err) &&
1456                     segbuf->sb_segnum != nilfs->ns_nextnum)
1457                         /* Case 2: extended segment (!= next) failed */
1458                         nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1459                 prev = segbuf;
1460         }
1461 }
1462
1463 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1464                                           struct inode *sufile)
1465 {
1466         struct nilfs_segment_buffer *segbuf;
1467         unsigned long live_blocks;
1468         int ret;
1469
1470         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1471                 live_blocks = segbuf->sb_sum.nblocks +
1472                         (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1473                 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1474                                                      live_blocks,
1475                                                      sci->sc_seg_ctime);
1476                 WARN_ON(ret); /* always succeed because the segusage is dirty */
1477         }
1478 }
1479
1480 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1481 {
1482         struct nilfs_segment_buffer *segbuf;
1483         int ret;
1484
1485         segbuf = NILFS_FIRST_SEGBUF(logs);
1486         ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1487                                              segbuf->sb_pseg_start -
1488                                              segbuf->sb_fseg_start, 0);
1489         WARN_ON(ret); /* always succeed because the segusage is dirty */
1490
1491         list_for_each_entry_continue(segbuf, logs, sb_list) {
1492                 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1493                                                      0, 0);
1494                 WARN_ON(ret); /* always succeed */
1495         }
1496 }
1497
1498 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1499                                             struct nilfs_segment_buffer *last,
1500                                             struct inode *sufile)
1501 {
1502         struct nilfs_segment_buffer *segbuf = last;
1503         int ret;
1504
1505         list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1506                 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1507                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1508                 WARN_ON(ret);
1509         }
1510         nilfs_truncate_logs(&sci->sc_segbufs, last);
1511 }
1512
1513
1514 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1515                                  struct the_nilfs *nilfs, int mode)
1516 {
1517         struct nilfs_cstage prev_stage = sci->sc_stage;
1518         int err, nadd = 1;
1519
1520         /* Collection retry loop */
1521         for (;;) {
1522                 sci->sc_nblk_this_inc = 0;
1523                 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1524
1525                 err = nilfs_segctor_reset_segment_buffer(sci);
1526                 if (unlikely(err))
1527                         goto failed;
1528
1529                 err = nilfs_segctor_collect_blocks(sci, mode);
1530                 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1531                 if (!err)
1532                         break;
1533
1534                 if (unlikely(err != -E2BIG))
1535                         goto failed;
1536
1537                 /* The current segment is filled up */
1538                 if (mode != SC_LSEG_SR ||
1539                     nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1540                         break;
1541
1542                 nilfs_clear_logs(&sci->sc_segbufs);
1543
1544                 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1545                         err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1546                                                         sci->sc_freesegs,
1547                                                         sci->sc_nfreesegs,
1548                                                         NULL);
1549                         WARN_ON(err); /* do not happen */
1550                         sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1551                 }
1552
1553                 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1554                 if (unlikely(err))
1555                         return err;
1556
1557                 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1558                 sci->sc_stage = prev_stage;
1559         }
1560         nilfs_segctor_zeropad_segsum(sci);
1561         nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1562         return 0;
1563
1564  failed:
1565         return err;
1566 }
1567
1568 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1569                                       struct buffer_head *new_bh)
1570 {
1571         BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1572
1573         list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1574         /* The caller must release old_bh */
1575 }
1576
1577 static int
1578 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1579                                      struct nilfs_segment_buffer *segbuf,
1580                                      int mode)
1581 {
1582         struct inode *inode = NULL;
1583         sector_t blocknr;
1584         unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1585         unsigned long nblocks = 0, ndatablk = 0;
1586         const struct nilfs_sc_operations *sc_op = NULL;
1587         struct nilfs_segsum_pointer ssp;
1588         struct nilfs_finfo *finfo = NULL;
1589         union nilfs_binfo binfo;
1590         struct buffer_head *bh, *bh_org;
1591         ino_t ino = 0;
1592         int err = 0;
1593
1594         if (!nfinfo)
1595                 goto out;
1596
1597         blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1598         ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1599         ssp.offset = sizeof(struct nilfs_segment_summary);
1600
1601         list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1602                 if (bh == segbuf->sb_super_root)
1603                         break;
1604                 if (!finfo) {
1605                         finfo = nilfs_segctor_map_segsum_entry(
1606                                 sci, &ssp, sizeof(*finfo));
1607                         ino = le64_to_cpu(finfo->fi_ino);
1608                         nblocks = le32_to_cpu(finfo->fi_nblocks);
1609                         ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1610
1611                         inode = bh->b_folio->mapping->host;
1612
1613                         if (mode == SC_LSEG_DSYNC)
1614                                 sc_op = &nilfs_sc_dsync_ops;
1615                         else if (ino == NILFS_DAT_INO)
1616                                 sc_op = &nilfs_sc_dat_ops;
1617                         else /* file blocks */
1618                                 sc_op = &nilfs_sc_file_ops;
1619                 }
1620                 bh_org = bh;
1621                 get_bh(bh_org);
1622                 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1623                                         &binfo);
1624                 if (bh != bh_org)
1625                         nilfs_list_replace_buffer(bh_org, bh);
1626                 brelse(bh_org);
1627                 if (unlikely(err))
1628                         goto failed_bmap;
1629
1630                 if (ndatablk > 0)
1631                         sc_op->write_data_binfo(sci, &ssp, &binfo);
1632                 else
1633                         sc_op->write_node_binfo(sci, &ssp, &binfo);
1634
1635                 blocknr++;
1636                 if (--nblocks == 0) {
1637                         finfo = NULL;
1638                         if (--nfinfo == 0)
1639                                 break;
1640                 } else if (ndatablk > 0)
1641                         ndatablk--;
1642         }
1643  out:
1644         return 0;
1645
1646  failed_bmap:
1647         return err;
1648 }
1649
1650 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1651 {
1652         struct nilfs_segment_buffer *segbuf;
1653         int err;
1654
1655         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1656                 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1657                 if (unlikely(err))
1658                         return err;
1659                 nilfs_segbuf_fill_in_segsum(segbuf);
1660         }
1661         return 0;
1662 }
1663
1664 static void nilfs_begin_page_io(struct page *page)
1665 {
1666         if (!page || PageWriteback(page))
1667                 /*
1668                  * For split b-tree node pages, this function may be called
1669                  * twice.  We ignore the 2nd or later calls by this check.
1670                  */
1671                 return;
1672
1673         lock_page(page);
1674         clear_page_dirty_for_io(page);
1675         set_page_writeback(page);
1676         unlock_page(page);
1677 }
1678
1679 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1680 {
1681         struct nilfs_segment_buffer *segbuf;
1682         struct page *bd_page = NULL, *fs_page = NULL;
1683
1684         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1685                 struct buffer_head *bh;
1686
1687                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1688                                     b_assoc_buffers) {
1689                         if (bh->b_page != bd_page) {
1690                                 if (bd_page) {
1691                                         lock_page(bd_page);
1692                                         clear_page_dirty_for_io(bd_page);
1693                                         set_page_writeback(bd_page);
1694                                         unlock_page(bd_page);
1695                                 }
1696                                 bd_page = bh->b_page;
1697                         }
1698                 }
1699
1700                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1701                                     b_assoc_buffers) {
1702                         set_buffer_async_write(bh);
1703                         if (bh == segbuf->sb_super_root) {
1704                                 if (bh->b_page != bd_page) {
1705                                         lock_page(bd_page);
1706                                         clear_page_dirty_for_io(bd_page);
1707                                         set_page_writeback(bd_page);
1708                                         unlock_page(bd_page);
1709                                         bd_page = bh->b_page;
1710                                 }
1711                                 break;
1712                         }
1713                         if (bh->b_page != fs_page) {
1714                                 nilfs_begin_page_io(fs_page);
1715                                 fs_page = bh->b_page;
1716                         }
1717                 }
1718         }
1719         if (bd_page) {
1720                 lock_page(bd_page);
1721                 clear_page_dirty_for_io(bd_page);
1722                 set_page_writeback(bd_page);
1723                 unlock_page(bd_page);
1724         }
1725         nilfs_begin_page_io(fs_page);
1726 }
1727
1728 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1729                                struct the_nilfs *nilfs)
1730 {
1731         int ret;
1732
1733         ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1734         list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1735         return ret;
1736 }
1737
1738 static void nilfs_end_page_io(struct page *page, int err)
1739 {
1740         if (!page)
1741                 return;
1742
1743         if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1744                 /*
1745                  * For b-tree node pages, this function may be called twice
1746                  * or more because they might be split in a segment.
1747                  */
1748                 if (PageDirty(page)) {
1749                         /*
1750                          * For pages holding split b-tree node buffers, dirty
1751                          * flag on the buffers may be cleared discretely.
1752                          * In that case, the page is once redirtied for
1753                          * remaining buffers, and it must be cancelled if
1754                          * all the buffers get cleaned later.
1755                          */
1756                         lock_page(page);
1757                         if (nilfs_page_buffers_clean(page))
1758                                 __nilfs_clear_page_dirty(page);
1759                         unlock_page(page);
1760                 }
1761                 return;
1762         }
1763
1764         if (!err) {
1765                 if (!nilfs_page_buffers_clean(page))
1766                         __set_page_dirty_nobuffers(page);
1767                 ClearPageError(page);
1768         } else {
1769                 __set_page_dirty_nobuffers(page);
1770                 SetPageError(page);
1771         }
1772
1773         end_page_writeback(page);
1774 }
1775
1776 static void nilfs_abort_logs(struct list_head *logs, int err)
1777 {
1778         struct nilfs_segment_buffer *segbuf;
1779         struct page *bd_page = NULL, *fs_page = NULL;
1780         struct buffer_head *bh;
1781
1782         if (list_empty(logs))
1783                 return;
1784
1785         list_for_each_entry(segbuf, logs, sb_list) {
1786                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1787                                     b_assoc_buffers) {
1788                         clear_buffer_uptodate(bh);
1789                         if (bh->b_page != bd_page) {
1790                                 if (bd_page)
1791                                         end_page_writeback(bd_page);
1792                                 bd_page = bh->b_page;
1793                         }
1794                 }
1795
1796                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1797                                     b_assoc_buffers) {
1798                         clear_buffer_async_write(bh);
1799                         if (bh == segbuf->sb_super_root) {
1800                                 clear_buffer_uptodate(bh);
1801                                 if (bh->b_page != bd_page) {
1802                                         end_page_writeback(bd_page);
1803                                         bd_page = bh->b_page;
1804                                 }
1805                                 break;
1806                         }
1807                         if (bh->b_page != fs_page) {
1808                                 nilfs_end_page_io(fs_page, err);
1809                                 fs_page = bh->b_page;
1810                         }
1811                 }
1812         }
1813         if (bd_page)
1814                 end_page_writeback(bd_page);
1815
1816         nilfs_end_page_io(fs_page, err);
1817 }
1818
1819 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1820                                              struct the_nilfs *nilfs, int err)
1821 {
1822         LIST_HEAD(logs);
1823         int ret;
1824
1825         list_splice_tail_init(&sci->sc_write_logs, &logs);
1826         ret = nilfs_wait_on_logs(&logs);
1827         nilfs_abort_logs(&logs, ret ? : err);
1828
1829         list_splice_tail_init(&sci->sc_segbufs, &logs);
1830         nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1831         nilfs_free_incomplete_logs(&logs, nilfs);
1832
1833         if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1834                 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1835                                                 sci->sc_freesegs,
1836                                                 sci->sc_nfreesegs,
1837                                                 NULL);
1838                 WARN_ON(ret); /* do not happen */
1839         }
1840
1841         nilfs_destroy_logs(&logs);
1842 }
1843
1844 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1845                                    struct nilfs_segment_buffer *segbuf)
1846 {
1847         nilfs->ns_segnum = segbuf->sb_segnum;
1848         nilfs->ns_nextnum = segbuf->sb_nextnum;
1849         nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1850                 + segbuf->sb_sum.nblocks;
1851         nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1852         nilfs->ns_ctime = segbuf->sb_sum.ctime;
1853 }
1854
1855 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1856 {
1857         struct nilfs_segment_buffer *segbuf;
1858         struct page *bd_page = NULL, *fs_page = NULL;
1859         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1860         int update_sr = false;
1861
1862         list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1863                 struct buffer_head *bh;
1864
1865                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1866                                     b_assoc_buffers) {
1867                         set_buffer_uptodate(bh);
1868                         clear_buffer_dirty(bh);
1869                         if (bh->b_page != bd_page) {
1870                                 if (bd_page)
1871                                         end_page_writeback(bd_page);
1872                                 bd_page = bh->b_page;
1873                         }
1874                 }
1875                 /*
1876                  * We assume that the buffers which belong to the same page
1877                  * continue over the buffer list.
1878                  * Under this assumption, the last BHs of pages is
1879                  * identifiable by the discontinuity of bh->b_page
1880                  * (page != fs_page).
1881                  *
1882                  * For B-tree node blocks, however, this assumption is not
1883                  * guaranteed.  The cleanup code of B-tree node pages needs
1884                  * special care.
1885                  */
1886                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1887                                     b_assoc_buffers) {
1888                         const unsigned long set_bits = BIT(BH_Uptodate);
1889                         const unsigned long clear_bits =
1890                                 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1891                                  BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1892                                  BIT(BH_NILFS_Redirected));
1893
1894                         set_mask_bits(&bh->b_state, clear_bits, set_bits);
1895                         if (bh == segbuf->sb_super_root) {
1896                                 if (bh->b_page != bd_page) {
1897                                         end_page_writeback(bd_page);
1898                                         bd_page = bh->b_page;
1899                                 }
1900                                 update_sr = true;
1901                                 break;
1902                         }
1903                         if (bh->b_page != fs_page) {
1904                                 nilfs_end_page_io(fs_page, 0);
1905                                 fs_page = bh->b_page;
1906                         }
1907                 }
1908
1909                 if (!nilfs_segbuf_simplex(segbuf)) {
1910                         if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1911                                 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1912                                 sci->sc_lseg_stime = jiffies;
1913                         }
1914                         if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1915                                 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1916                 }
1917         }
1918         /*
1919          * Since pages may continue over multiple segment buffers,
1920          * end of the last page must be checked outside of the loop.
1921          */
1922         if (bd_page)
1923                 end_page_writeback(bd_page);
1924
1925         nilfs_end_page_io(fs_page, 0);
1926
1927         nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1928
1929         if (nilfs_doing_gc())
1930                 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1931         else
1932                 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1933
1934         sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1935
1936         segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1937         nilfs_set_next_segment(nilfs, segbuf);
1938
1939         if (update_sr) {
1940                 nilfs->ns_flushed_device = 0;
1941                 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1942                                        segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1943
1944                 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1945                 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1946                 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1947                 nilfs_segctor_clear_metadata_dirty(sci);
1948         } else
1949                 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1950 }
1951
1952 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1953 {
1954         int ret;
1955
1956         ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1957         if (!ret) {
1958                 nilfs_segctor_complete_write(sci);
1959                 nilfs_destroy_logs(&sci->sc_write_logs);
1960         }
1961         return ret;
1962 }
1963
1964 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1965                                              struct the_nilfs *nilfs)
1966 {
1967         struct nilfs_inode_info *ii, *n;
1968         struct inode *ifile = sci->sc_root->ifile;
1969
1970         spin_lock(&nilfs->ns_inode_lock);
1971  retry:
1972         list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1973                 if (!ii->i_bh) {
1974                         struct buffer_head *ibh;
1975                         int err;
1976
1977                         spin_unlock(&nilfs->ns_inode_lock);
1978                         err = nilfs_ifile_get_inode_block(
1979                                 ifile, ii->vfs_inode.i_ino, &ibh);
1980                         if (unlikely(err)) {
1981                                 nilfs_warn(sci->sc_super,
1982                                            "log writer: error %d getting inode block (ino=%lu)",
1983                                            err, ii->vfs_inode.i_ino);
1984                                 return err;
1985                         }
1986                         spin_lock(&nilfs->ns_inode_lock);
1987                         if (likely(!ii->i_bh))
1988                                 ii->i_bh = ibh;
1989                         else
1990                                 brelse(ibh);
1991                         goto retry;
1992                 }
1993
1994                 // Always redirty the buffer to avoid race condition
1995                 mark_buffer_dirty(ii->i_bh);
1996                 nilfs_mdt_mark_dirty(ifile);
1997
1998                 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1999                 set_bit(NILFS_I_BUSY, &ii->i_state);
2000                 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
2001         }
2002         spin_unlock(&nilfs->ns_inode_lock);
2003
2004         return 0;
2005 }
2006
2007 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
2008                                              struct the_nilfs *nilfs)
2009 {
2010         struct nilfs_inode_info *ii, *n;
2011         int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
2012         int defer_iput = false;
2013
2014         spin_lock(&nilfs->ns_inode_lock);
2015         list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2016                 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
2017                     test_bit(NILFS_I_DIRTY, &ii->i_state))
2018                         continue;
2019
2020                 clear_bit(NILFS_I_BUSY, &ii->i_state);
2021                 brelse(ii->i_bh);
2022                 ii->i_bh = NULL;
2023                 list_del_init(&ii->i_dirty);
2024                 if (!ii->vfs_inode.i_nlink || during_mount) {
2025                         /*
2026                          * Defer calling iput() to avoid deadlocks if
2027                          * i_nlink == 0 or mount is not yet finished.
2028                          */
2029                         list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2030                         defer_iput = true;
2031                 } else {
2032                         spin_unlock(&nilfs->ns_inode_lock);
2033                         iput(&ii->vfs_inode);
2034                         spin_lock(&nilfs->ns_inode_lock);
2035                 }
2036         }
2037         spin_unlock(&nilfs->ns_inode_lock);
2038
2039         if (defer_iput)
2040                 schedule_work(&sci->sc_iput_work);
2041 }
2042
2043 /*
2044  * Main procedure of segment constructor
2045  */
2046 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2047 {
2048         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2049         int err;
2050
2051         if (sb_rdonly(sci->sc_super))
2052                 return -EROFS;
2053
2054         nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2055         sci->sc_cno = nilfs->ns_cno;
2056
2057         err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2058         if (unlikely(err))
2059                 goto out;
2060
2061         if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2062                 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2063
2064         if (nilfs_segctor_clean(sci))
2065                 goto out;
2066
2067         do {
2068                 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2069
2070                 err = nilfs_segctor_begin_construction(sci, nilfs);
2071                 if (unlikely(err))
2072                         goto out;
2073
2074                 /* Update time stamp */
2075                 sci->sc_seg_ctime = ktime_get_real_seconds();
2076
2077                 err = nilfs_segctor_collect(sci, nilfs, mode);
2078                 if (unlikely(err))
2079                         goto failed;
2080
2081                 /* Avoid empty segment */
2082                 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2083                     nilfs_segbuf_empty(sci->sc_curseg)) {
2084                         nilfs_segctor_abort_construction(sci, nilfs, 1);
2085                         goto out;
2086                 }
2087
2088                 err = nilfs_segctor_assign(sci, mode);
2089                 if (unlikely(err))
2090                         goto failed;
2091
2092                 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2093                         nilfs_segctor_fill_in_file_bmap(sci);
2094
2095                 if (mode == SC_LSEG_SR &&
2096                     nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2097                         err = nilfs_segctor_fill_in_checkpoint(sci);
2098                         if (unlikely(err))
2099                                 goto failed_to_write;
2100
2101                         nilfs_segctor_fill_in_super_root(sci, nilfs);
2102                 }
2103                 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2104
2105                 /* Write partial segments */
2106                 nilfs_segctor_prepare_write(sci);
2107
2108                 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2109                                             nilfs->ns_crc_seed);
2110
2111                 err = nilfs_segctor_write(sci, nilfs);
2112                 if (unlikely(err))
2113                         goto failed_to_write;
2114
2115                 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2116                     nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2117                         /*
2118                          * At this point, we avoid double buffering
2119                          * for blocksize < pagesize because page dirty
2120                          * flag is turned off during write and dirty
2121                          * buffers are not properly collected for
2122                          * pages crossing over segments.
2123                          */
2124                         err = nilfs_segctor_wait(sci);
2125                         if (err)
2126                                 goto failed_to_write;
2127                 }
2128         } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2129
2130  out:
2131         nilfs_segctor_drop_written_files(sci, nilfs);
2132         return err;
2133
2134  failed_to_write:
2135         if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2136                 nilfs_redirty_inodes(&sci->sc_dirty_files);
2137
2138  failed:
2139         if (nilfs_doing_gc())
2140                 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2141         nilfs_segctor_abort_construction(sci, nilfs, err);
2142         goto out;
2143 }
2144
2145 /**
2146  * nilfs_segctor_start_timer - set timer of background write
2147  * @sci: nilfs_sc_info
2148  *
2149  * If the timer has already been set, it ignores the new request.
2150  * This function MUST be called within a section locking the segment
2151  * semaphore.
2152  */
2153 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2154 {
2155         spin_lock(&sci->sc_state_lock);
2156         if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2157                 sci->sc_timer.expires = jiffies + sci->sc_interval;
2158                 add_timer(&sci->sc_timer);
2159                 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2160         }
2161         spin_unlock(&sci->sc_state_lock);
2162 }
2163
2164 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2165 {
2166         spin_lock(&sci->sc_state_lock);
2167         if (!(sci->sc_flush_request & BIT(bn))) {
2168                 unsigned long prev_req = sci->sc_flush_request;
2169
2170                 sci->sc_flush_request |= BIT(bn);
2171                 if (!prev_req)
2172                         wake_up(&sci->sc_wait_daemon);
2173         }
2174         spin_unlock(&sci->sc_state_lock);
2175 }
2176
2177 /**
2178  * nilfs_flush_segment - trigger a segment construction for resource control
2179  * @sb: super block
2180  * @ino: inode number of the file to be flushed out.
2181  */
2182 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2183 {
2184         struct the_nilfs *nilfs = sb->s_fs_info;
2185         struct nilfs_sc_info *sci = nilfs->ns_writer;
2186
2187         if (!sci || nilfs_doing_construction())
2188                 return;
2189         nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2190                                         /* assign bit 0 to data files */
2191 }
2192
2193 struct nilfs_segctor_wait_request {
2194         wait_queue_entry_t      wq;
2195         __u32           seq;
2196         int             err;
2197         atomic_t        done;
2198 };
2199
2200 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2201 {
2202         struct nilfs_segctor_wait_request wait_req;
2203         int err = 0;
2204
2205         spin_lock(&sci->sc_state_lock);
2206         init_wait(&wait_req.wq);
2207         wait_req.err = 0;
2208         atomic_set(&wait_req.done, 0);
2209         wait_req.seq = ++sci->sc_seq_request;
2210         spin_unlock(&sci->sc_state_lock);
2211
2212         init_waitqueue_entry(&wait_req.wq, current);
2213         add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2214         set_current_state(TASK_INTERRUPTIBLE);
2215         wake_up(&sci->sc_wait_daemon);
2216
2217         for (;;) {
2218                 if (atomic_read(&wait_req.done)) {
2219                         err = wait_req.err;
2220                         break;
2221                 }
2222                 if (!signal_pending(current)) {
2223                         schedule();
2224                         continue;
2225                 }
2226                 err = -ERESTARTSYS;
2227                 break;
2228         }
2229         finish_wait(&sci->sc_wait_request, &wait_req.wq);
2230         return err;
2231 }
2232
2233 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2234 {
2235         struct nilfs_segctor_wait_request *wrq, *n;
2236         unsigned long flags;
2237
2238         spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2239         list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2240                 if (!atomic_read(&wrq->done) &&
2241                     nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2242                         wrq->err = err;
2243                         atomic_set(&wrq->done, 1);
2244                 }
2245                 if (atomic_read(&wrq->done)) {
2246                         wrq->wq.func(&wrq->wq,
2247                                      TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2248                                      0, NULL);
2249                 }
2250         }
2251         spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2252 }
2253
2254 /**
2255  * nilfs_construct_segment - construct a logical segment
2256  * @sb: super block
2257  *
2258  * Return Value: On success, 0 is returned. On errors, one of the following
2259  * negative error code is returned.
2260  *
2261  * %-EROFS - Read only filesystem.
2262  *
2263  * %-EIO - I/O error
2264  *
2265  * %-ENOSPC - No space left on device (only in a panic state).
2266  *
2267  * %-ERESTARTSYS - Interrupted.
2268  *
2269  * %-ENOMEM - Insufficient memory available.
2270  */
2271 int nilfs_construct_segment(struct super_block *sb)
2272 {
2273         struct the_nilfs *nilfs = sb->s_fs_info;
2274         struct nilfs_sc_info *sci = nilfs->ns_writer;
2275         struct nilfs_transaction_info *ti;
2276
2277         if (sb_rdonly(sb) || unlikely(!sci))
2278                 return -EROFS;
2279
2280         /* A call inside transactions causes a deadlock. */
2281         BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2282
2283         return nilfs_segctor_sync(sci);
2284 }
2285
2286 /**
2287  * nilfs_construct_dsync_segment - construct a data-only logical segment
2288  * @sb: super block
2289  * @inode: inode whose data blocks should be written out
2290  * @start: start byte offset
2291  * @end: end byte offset (inclusive)
2292  *
2293  * Return Value: On success, 0 is returned. On errors, one of the following
2294  * negative error code is returned.
2295  *
2296  * %-EROFS - Read only filesystem.
2297  *
2298  * %-EIO - I/O error
2299  *
2300  * %-ENOSPC - No space left on device (only in a panic state).
2301  *
2302  * %-ERESTARTSYS - Interrupted.
2303  *
2304  * %-ENOMEM - Insufficient memory available.
2305  */
2306 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2307                                   loff_t start, loff_t end)
2308 {
2309         struct the_nilfs *nilfs = sb->s_fs_info;
2310         struct nilfs_sc_info *sci = nilfs->ns_writer;
2311         struct nilfs_inode_info *ii;
2312         struct nilfs_transaction_info ti;
2313         int err = 0;
2314
2315         if (sb_rdonly(sb) || unlikely(!sci))
2316                 return -EROFS;
2317
2318         nilfs_transaction_lock(sb, &ti, 0);
2319
2320         ii = NILFS_I(inode);
2321         if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2322             nilfs_test_opt(nilfs, STRICT_ORDER) ||
2323             test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2324             nilfs_discontinued(nilfs)) {
2325                 nilfs_transaction_unlock(sb);
2326                 err = nilfs_segctor_sync(sci);
2327                 return err;
2328         }
2329
2330         spin_lock(&nilfs->ns_inode_lock);
2331         if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2332             !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2333                 spin_unlock(&nilfs->ns_inode_lock);
2334                 nilfs_transaction_unlock(sb);
2335                 return 0;
2336         }
2337         spin_unlock(&nilfs->ns_inode_lock);
2338         sci->sc_dsync_inode = ii;
2339         sci->sc_dsync_start = start;
2340         sci->sc_dsync_end = end;
2341
2342         err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2343         if (!err)
2344                 nilfs->ns_flushed_device = 0;
2345
2346         nilfs_transaction_unlock(sb);
2347         return err;
2348 }
2349
2350 #define FLUSH_FILE_BIT  (0x1) /* data file only */
2351 #define FLUSH_DAT_BIT   BIT(NILFS_DAT_INO) /* DAT only */
2352
2353 /**
2354  * nilfs_segctor_accept - record accepted sequence count of log-write requests
2355  * @sci: segment constructor object
2356  */
2357 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2358 {
2359         spin_lock(&sci->sc_state_lock);
2360         sci->sc_seq_accepted = sci->sc_seq_request;
2361         spin_unlock(&sci->sc_state_lock);
2362         del_timer_sync(&sci->sc_timer);
2363 }
2364
2365 /**
2366  * nilfs_segctor_notify - notify the result of request to caller threads
2367  * @sci: segment constructor object
2368  * @mode: mode of log forming
2369  * @err: error code to be notified
2370  */
2371 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2372 {
2373         /* Clear requests (even when the construction failed) */
2374         spin_lock(&sci->sc_state_lock);
2375
2376         if (mode == SC_LSEG_SR) {
2377                 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2378                 sci->sc_seq_done = sci->sc_seq_accepted;
2379                 nilfs_segctor_wakeup(sci, err);
2380                 sci->sc_flush_request = 0;
2381         } else {
2382                 if (mode == SC_FLUSH_FILE)
2383                         sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2384                 else if (mode == SC_FLUSH_DAT)
2385                         sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2386
2387                 /* re-enable timer if checkpoint creation was not done */
2388                 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2389                     time_before(jiffies, sci->sc_timer.expires))
2390                         add_timer(&sci->sc_timer);
2391         }
2392         spin_unlock(&sci->sc_state_lock);
2393 }
2394
2395 /**
2396  * nilfs_segctor_construct - form logs and write them to disk
2397  * @sci: segment constructor object
2398  * @mode: mode of log forming
2399  */
2400 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2401 {
2402         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2403         struct nilfs_super_block **sbp;
2404         int err = 0;
2405
2406         nilfs_segctor_accept(sci);
2407
2408         if (nilfs_discontinued(nilfs))
2409                 mode = SC_LSEG_SR;
2410         if (!nilfs_segctor_confirm(sci))
2411                 err = nilfs_segctor_do_construct(sci, mode);
2412
2413         if (likely(!err)) {
2414                 if (mode != SC_FLUSH_DAT)
2415                         atomic_set(&nilfs->ns_ndirtyblks, 0);
2416                 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2417                     nilfs_discontinued(nilfs)) {
2418                         down_write(&nilfs->ns_sem);
2419                         err = -EIO;
2420                         sbp = nilfs_prepare_super(sci->sc_super,
2421                                                   nilfs_sb_will_flip(nilfs));
2422                         if (likely(sbp)) {
2423                                 nilfs_set_log_cursor(sbp[0], nilfs);
2424                                 err = nilfs_commit_super(sci->sc_super,
2425                                                          NILFS_SB_COMMIT);
2426                         }
2427                         up_write(&nilfs->ns_sem);
2428                 }
2429         }
2430
2431         nilfs_segctor_notify(sci, mode, err);
2432         return err;
2433 }
2434
2435 static void nilfs_construction_timeout(struct timer_list *t)
2436 {
2437         struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2438
2439         wake_up_process(sci->sc_timer_task);
2440 }
2441
2442 static void
2443 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2444 {
2445         struct nilfs_inode_info *ii, *n;
2446
2447         list_for_each_entry_safe(ii, n, head, i_dirty) {
2448                 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2449                         continue;
2450                 list_del_init(&ii->i_dirty);
2451                 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2452                 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
2453                 iput(&ii->vfs_inode);
2454         }
2455 }
2456
2457 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2458                          void **kbufs)
2459 {
2460         struct the_nilfs *nilfs = sb->s_fs_info;
2461         struct nilfs_sc_info *sci = nilfs->ns_writer;
2462         struct nilfs_transaction_info ti;
2463         int err;
2464
2465         if (unlikely(!sci))
2466                 return -EROFS;
2467
2468         nilfs_transaction_lock(sb, &ti, 1);
2469
2470         err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2471         if (unlikely(err))
2472                 goto out_unlock;
2473
2474         err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2475         if (unlikely(err)) {
2476                 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2477                 goto out_unlock;
2478         }
2479
2480         sci->sc_freesegs = kbufs[4];
2481         sci->sc_nfreesegs = argv[4].v_nmembs;
2482         list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2483
2484         for (;;) {
2485                 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2486                 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2487
2488                 if (likely(!err))
2489                         break;
2490
2491                 nilfs_warn(sb, "error %d cleaning segments", err);
2492                 set_current_state(TASK_INTERRUPTIBLE);
2493                 schedule_timeout(sci->sc_interval);
2494         }
2495         if (nilfs_test_opt(nilfs, DISCARD)) {
2496                 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2497                                                  sci->sc_nfreesegs);
2498                 if (ret) {
2499                         nilfs_warn(sb,
2500                                    "error %d on discard request, turning discards off for the device",
2501                                    ret);
2502                         nilfs_clear_opt(nilfs, DISCARD);
2503                 }
2504         }
2505
2506  out_unlock:
2507         sci->sc_freesegs = NULL;
2508         sci->sc_nfreesegs = 0;
2509         nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2510         nilfs_transaction_unlock(sb);
2511         return err;
2512 }
2513
2514 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2515 {
2516         struct nilfs_transaction_info ti;
2517
2518         nilfs_transaction_lock(sci->sc_super, &ti, 0);
2519         nilfs_segctor_construct(sci, mode);
2520
2521         /*
2522          * Unclosed segment should be retried.  We do this using sc_timer.
2523          * Timeout of sc_timer will invoke complete construction which leads
2524          * to close the current logical segment.
2525          */
2526         if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2527                 nilfs_segctor_start_timer(sci);
2528
2529         nilfs_transaction_unlock(sci->sc_super);
2530 }
2531
2532 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2533 {
2534         int mode = 0;
2535
2536         spin_lock(&sci->sc_state_lock);
2537         mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2538                 SC_FLUSH_DAT : SC_FLUSH_FILE;
2539         spin_unlock(&sci->sc_state_lock);
2540
2541         if (mode) {
2542                 nilfs_segctor_do_construct(sci, mode);
2543
2544                 spin_lock(&sci->sc_state_lock);
2545                 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2546                         ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2547                 spin_unlock(&sci->sc_state_lock);
2548         }
2549         clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2550 }
2551
2552 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2553 {
2554         if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2555             time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2556                 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2557                         return SC_FLUSH_FILE;
2558                 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2559                         return SC_FLUSH_DAT;
2560         }
2561         return SC_LSEG_SR;
2562 }
2563
2564 /**
2565  * nilfs_segctor_thread - main loop of the segment constructor thread.
2566  * @arg: pointer to a struct nilfs_sc_info.
2567  *
2568  * nilfs_segctor_thread() initializes a timer and serves as a daemon
2569  * to execute segment constructions.
2570  */
2571 static int nilfs_segctor_thread(void *arg)
2572 {
2573         struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2574         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2575         int timeout = 0;
2576
2577         sci->sc_timer_task = current;
2578
2579         /* start sync. */
2580         sci->sc_task = current;
2581         wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2582         nilfs_info(sci->sc_super,
2583                    "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2584                    sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2585
2586         spin_lock(&sci->sc_state_lock);
2587  loop:
2588         for (;;) {
2589                 int mode;
2590
2591                 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2592                         goto end_thread;
2593
2594                 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2595                         mode = SC_LSEG_SR;
2596                 else if (sci->sc_flush_request)
2597                         mode = nilfs_segctor_flush_mode(sci);
2598                 else
2599                         break;
2600
2601                 spin_unlock(&sci->sc_state_lock);
2602                 nilfs_segctor_thread_construct(sci, mode);
2603                 spin_lock(&sci->sc_state_lock);
2604                 timeout = 0;
2605         }
2606
2607
2608         if (freezing(current)) {
2609                 spin_unlock(&sci->sc_state_lock);
2610                 try_to_freeze();
2611                 spin_lock(&sci->sc_state_lock);
2612         } else {
2613                 DEFINE_WAIT(wait);
2614                 int should_sleep = 1;
2615
2616                 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2617                                 TASK_INTERRUPTIBLE);
2618
2619                 if (sci->sc_seq_request != sci->sc_seq_done)
2620                         should_sleep = 0;
2621                 else if (sci->sc_flush_request)
2622                         should_sleep = 0;
2623                 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2624                         should_sleep = time_before(jiffies,
2625                                         sci->sc_timer.expires);
2626
2627                 if (should_sleep) {
2628                         spin_unlock(&sci->sc_state_lock);
2629                         schedule();
2630                         spin_lock(&sci->sc_state_lock);
2631                 }
2632                 finish_wait(&sci->sc_wait_daemon, &wait);
2633                 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2634                            time_after_eq(jiffies, sci->sc_timer.expires));
2635
2636                 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2637                         set_nilfs_discontinued(nilfs);
2638         }
2639         goto loop;
2640
2641  end_thread:
2642         /* end sync. */
2643         sci->sc_task = NULL;
2644         wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2645         spin_unlock(&sci->sc_state_lock);
2646         return 0;
2647 }
2648
2649 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2650 {
2651         struct task_struct *t;
2652
2653         t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2654         if (IS_ERR(t)) {
2655                 int err = PTR_ERR(t);
2656
2657                 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2658                           err);
2659                 return err;
2660         }
2661         wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2662         return 0;
2663 }
2664
2665 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2666         __acquires(&sci->sc_state_lock)
2667         __releases(&sci->sc_state_lock)
2668 {
2669         sci->sc_state |= NILFS_SEGCTOR_QUIT;
2670
2671         while (sci->sc_task) {
2672                 wake_up(&sci->sc_wait_daemon);
2673                 spin_unlock(&sci->sc_state_lock);
2674                 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2675                 spin_lock(&sci->sc_state_lock);
2676         }
2677 }
2678
2679 /*
2680  * Setup & clean-up functions
2681  */
2682 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2683                                                struct nilfs_root *root)
2684 {
2685         struct the_nilfs *nilfs = sb->s_fs_info;
2686         struct nilfs_sc_info *sci;
2687
2688         sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2689         if (!sci)
2690                 return NULL;
2691
2692         sci->sc_super = sb;
2693
2694         nilfs_get_root(root);
2695         sci->sc_root = root;
2696
2697         init_waitqueue_head(&sci->sc_wait_request);
2698         init_waitqueue_head(&sci->sc_wait_daemon);
2699         init_waitqueue_head(&sci->sc_wait_task);
2700         spin_lock_init(&sci->sc_state_lock);
2701         INIT_LIST_HEAD(&sci->sc_dirty_files);
2702         INIT_LIST_HEAD(&sci->sc_segbufs);
2703         INIT_LIST_HEAD(&sci->sc_write_logs);
2704         INIT_LIST_HEAD(&sci->sc_gc_inodes);
2705         INIT_LIST_HEAD(&sci->sc_iput_queue);
2706         INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2707         timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2708
2709         sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2710         sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2711         sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2712
2713         if (nilfs->ns_interval)
2714                 sci->sc_interval = HZ * nilfs->ns_interval;
2715         if (nilfs->ns_watermark)
2716                 sci->sc_watermark = nilfs->ns_watermark;
2717         return sci;
2718 }
2719
2720 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2721 {
2722         int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2723
2724         /*
2725          * The segctord thread was stopped and its timer was removed.
2726          * But some tasks remain.
2727          */
2728         do {
2729                 struct nilfs_transaction_info ti;
2730
2731                 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2732                 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2733                 nilfs_transaction_unlock(sci->sc_super);
2734
2735                 flush_work(&sci->sc_iput_work);
2736
2737         } while (ret && ret != -EROFS && retrycount-- > 0);
2738 }
2739
2740 /**
2741  * nilfs_segctor_destroy - destroy the segment constructor.
2742  * @sci: nilfs_sc_info
2743  *
2744  * nilfs_segctor_destroy() kills the segctord thread and frees
2745  * the nilfs_sc_info struct.
2746  * Caller must hold the segment semaphore.
2747  */
2748 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2749 {
2750         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2751         int flag;
2752
2753         up_write(&nilfs->ns_segctor_sem);
2754
2755         spin_lock(&sci->sc_state_lock);
2756         nilfs_segctor_kill_thread(sci);
2757         flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2758                 || sci->sc_seq_request != sci->sc_seq_done);
2759         spin_unlock(&sci->sc_state_lock);
2760
2761         if (flush_work(&sci->sc_iput_work))
2762                 flag = true;
2763
2764         if (flag || !nilfs_segctor_confirm(sci))
2765                 nilfs_segctor_write_out(sci);
2766
2767         if (!list_empty(&sci->sc_dirty_files)) {
2768                 nilfs_warn(sci->sc_super,
2769                            "disposed unprocessed dirty file(s) when stopping log writer");
2770                 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2771         }
2772
2773         if (!list_empty(&sci->sc_iput_queue)) {
2774                 nilfs_warn(sci->sc_super,
2775                            "disposed unprocessed inode(s) in iput queue when stopping log writer");
2776                 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2777         }
2778
2779         WARN_ON(!list_empty(&sci->sc_segbufs));
2780         WARN_ON(!list_empty(&sci->sc_write_logs));
2781
2782         nilfs_put_root(sci->sc_root);
2783
2784         down_write(&nilfs->ns_segctor_sem);
2785
2786         timer_shutdown_sync(&sci->sc_timer);
2787         kfree(sci);
2788 }
2789
2790 /**
2791  * nilfs_attach_log_writer - attach log writer
2792  * @sb: super block instance
2793  * @root: root object of the current filesystem tree
2794  *
2795  * This allocates a log writer object, initializes it, and starts the
2796  * log writer.
2797  *
2798  * Return Value: On success, 0 is returned. On error, one of the following
2799  * negative error code is returned.
2800  *
2801  * %-ENOMEM - Insufficient memory available.
2802  */
2803 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2804 {
2805         struct the_nilfs *nilfs = sb->s_fs_info;
2806         int err;
2807
2808         if (nilfs->ns_writer) {
2809                 /*
2810                  * This happens if the filesystem is made read-only by
2811                  * __nilfs_error or nilfs_remount and then remounted
2812                  * read/write.  In these cases, reuse the existing
2813                  * writer.
2814                  */
2815                 return 0;
2816         }
2817
2818         nilfs->ns_writer = nilfs_segctor_new(sb, root);
2819         if (!nilfs->ns_writer)
2820                 return -ENOMEM;
2821
2822         inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2823
2824         err = nilfs_segctor_start_thread(nilfs->ns_writer);
2825         if (unlikely(err))
2826                 nilfs_detach_log_writer(sb);
2827
2828         return err;
2829 }
2830
2831 /**
2832  * nilfs_detach_log_writer - destroy log writer
2833  * @sb: super block instance
2834  *
2835  * This kills log writer daemon, frees the log writer object, and
2836  * destroys list of dirty files.
2837  */
2838 void nilfs_detach_log_writer(struct super_block *sb)
2839 {
2840         struct the_nilfs *nilfs = sb->s_fs_info;
2841         LIST_HEAD(garbage_list);
2842
2843         down_write(&nilfs->ns_segctor_sem);
2844         if (nilfs->ns_writer) {
2845                 nilfs_segctor_destroy(nilfs->ns_writer);
2846                 nilfs->ns_writer = NULL;
2847         }
2848
2849         /* Force to free the list of dirty files */
2850         spin_lock(&nilfs->ns_inode_lock);
2851         if (!list_empty(&nilfs->ns_dirty_files)) {
2852                 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2853                 nilfs_warn(sb,
2854                            "disposed unprocessed dirty file(s) when detaching log writer");
2855         }
2856         spin_unlock(&nilfs->ns_inode_lock);
2857         up_write(&nilfs->ns_segctor_sem);
2858
2859         nilfs_dispose_list(nilfs, &garbage_list, 1);
2860 }