treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 398
[linux-2.6-microblaze.git] / fs / gfs2 / log.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/crc32.h>
14 #include <linux/crc32c.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include <linux/list_sort.h>
22
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "log.h"
28 #include "lops.h"
29 #include "meta_io.h"
30 #include "util.h"
31 #include "dir.h"
32 #include "trace_gfs2.h"
33
34 /**
35  * gfs2_struct2blk - compute stuff
36  * @sdp: the filesystem
37  * @nstruct: the number of structures
38  * @ssize: the size of the structures
39  *
40  * Compute the number of log descriptor blocks needed to hold a certain number
41  * of structures of a certain size.
42  *
43  * Returns: the number of blocks needed (minimum is always 1)
44  */
45
46 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
47                              unsigned int ssize)
48 {
49         unsigned int blks;
50         unsigned int first, second;
51
52         blks = 1;
53         first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
54
55         if (nstruct > first) {
56                 second = (sdp->sd_sb.sb_bsize -
57                           sizeof(struct gfs2_meta_header)) / ssize;
58                 blks += DIV_ROUND_UP(nstruct - first, second);
59         }
60
61         return blks;
62 }
63
64 /**
65  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
66  * @mapping: The associated mapping (maybe NULL)
67  * @bd: The gfs2_bufdata to remove
68  *
69  * The ail lock _must_ be held when calling this function
70  *
71  */
72
73 static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
74 {
75         bd->bd_tr = NULL;
76         list_del_init(&bd->bd_ail_st_list);
77         list_del_init(&bd->bd_ail_gl_list);
78         atomic_dec(&bd->bd_gl->gl_ail_count);
79         brelse(bd->bd_bh);
80 }
81
82 /**
83  * gfs2_ail1_start_one - Start I/O on a part of the AIL
84  * @sdp: the filesystem
85  * @wbc: The writeback control structure
86  * @ai: The ail structure
87  *
88  */
89
90 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
91                                struct writeback_control *wbc,
92                                struct gfs2_trans *tr,
93                                bool *withdraw)
94 __releases(&sdp->sd_ail_lock)
95 __acquires(&sdp->sd_ail_lock)
96 {
97         struct gfs2_glock *gl = NULL;
98         struct address_space *mapping;
99         struct gfs2_bufdata *bd, *s;
100         struct buffer_head *bh;
101
102         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
103                 bh = bd->bd_bh;
104
105                 gfs2_assert(sdp, bd->bd_tr == tr);
106
107                 if (!buffer_busy(bh)) {
108                         if (!buffer_uptodate(bh) &&
109                             !test_and_set_bit(SDF_AIL1_IO_ERROR,
110                                               &sdp->sd_flags)) {
111                                 gfs2_io_error_bh(sdp, bh);
112                                 *withdraw = true;
113                         }
114                         list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
115                         continue;
116                 }
117
118                 if (!buffer_dirty(bh))
119                         continue;
120                 if (gl == bd->bd_gl)
121                         continue;
122                 gl = bd->bd_gl;
123                 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124                 mapping = bh->b_page->mapping;
125                 if (!mapping)
126                         continue;
127                 spin_unlock(&sdp->sd_ail_lock);
128                 generic_writepages(mapping, wbc);
129                 spin_lock(&sdp->sd_ail_lock);
130                 if (wbc->nr_to_write <= 0)
131                         break;
132                 return 1;
133         }
134
135         return 0;
136 }
137
138
139 /**
140  * gfs2_ail1_flush - start writeback of some ail1 entries 
141  * @sdp: The super block
142  * @wbc: The writeback control structure
143  *
144  * Writes back some ail1 entries, according to the limits in the
145  * writeback control structure
146  */
147
148 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
149 {
150         struct list_head *head = &sdp->sd_ail1_list;
151         struct gfs2_trans *tr;
152         struct blk_plug plug;
153         bool withdraw = false;
154
155         trace_gfs2_ail_flush(sdp, wbc, 1);
156         blk_start_plug(&plug);
157         spin_lock(&sdp->sd_ail_lock);
158 restart:
159         list_for_each_entry_reverse(tr, head, tr_list) {
160                 if (wbc->nr_to_write <= 0)
161                         break;
162                 if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
163                         goto restart;
164         }
165         spin_unlock(&sdp->sd_ail_lock);
166         blk_finish_plug(&plug);
167         if (withdraw)
168                 gfs2_lm_withdraw(sdp, NULL);
169         trace_gfs2_ail_flush(sdp, wbc, 0);
170 }
171
172 /**
173  * gfs2_ail1_start - start writeback of all ail1 entries
174  * @sdp: The superblock
175  */
176
177 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
178 {
179         struct writeback_control wbc = {
180                 .sync_mode = WB_SYNC_NONE,
181                 .nr_to_write = LONG_MAX,
182                 .range_start = 0,
183                 .range_end = LLONG_MAX,
184         };
185
186         return gfs2_ail1_flush(sdp, &wbc);
187 }
188
189 /**
190  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
191  * @sdp: the filesystem
192  * @ai: the AIL entry
193  *
194  */
195
196 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
197                                 bool *withdraw)
198 {
199         struct gfs2_bufdata *bd, *s;
200         struct buffer_head *bh;
201
202         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
203                                          bd_ail_st_list) {
204                 bh = bd->bd_bh;
205                 gfs2_assert(sdp, bd->bd_tr == tr);
206                 if (buffer_busy(bh))
207                         continue;
208                 if (!buffer_uptodate(bh) &&
209                     !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
210                         gfs2_io_error_bh(sdp, bh);
211                         *withdraw = true;
212                 }
213                 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
214         }
215 }
216
217 /**
218  * gfs2_ail1_empty - Try to empty the ail1 lists
219  * @sdp: The superblock
220  *
221  * Tries to empty the ail1 lists, starting with the oldest first
222  */
223
224 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
225 {
226         struct gfs2_trans *tr, *s;
227         int oldest_tr = 1;
228         int ret;
229         bool withdraw = false;
230
231         spin_lock(&sdp->sd_ail_lock);
232         list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
233                 gfs2_ail1_empty_one(sdp, tr, &withdraw);
234                 if (list_empty(&tr->tr_ail1_list) && oldest_tr)
235                         list_move(&tr->tr_list, &sdp->sd_ail2_list);
236                 else
237                         oldest_tr = 0;
238         }
239         ret = list_empty(&sdp->sd_ail1_list);
240         spin_unlock(&sdp->sd_ail_lock);
241
242         if (withdraw)
243                 gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
244
245         return ret;
246 }
247
248 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
249 {
250         struct gfs2_trans *tr;
251         struct gfs2_bufdata *bd;
252         struct buffer_head *bh;
253
254         spin_lock(&sdp->sd_ail_lock);
255         list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
256                 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
257                         bh = bd->bd_bh;
258                         if (!buffer_locked(bh))
259                                 continue;
260                         get_bh(bh);
261                         spin_unlock(&sdp->sd_ail_lock);
262                         wait_on_buffer(bh);
263                         brelse(bh);
264                         return;
265                 }
266         }
267         spin_unlock(&sdp->sd_ail_lock);
268 }
269
270 /**
271  * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
272  * @sdp: the filesystem
273  * @ai: the AIL entry
274  *
275  */
276
277 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
278 {
279         struct list_head *head = &tr->tr_ail2_list;
280         struct gfs2_bufdata *bd;
281
282         while (!list_empty(head)) {
283                 bd = list_entry(head->prev, struct gfs2_bufdata,
284                                 bd_ail_st_list);
285                 gfs2_assert(sdp, bd->bd_tr == tr);
286                 gfs2_remove_from_ail(bd);
287         }
288 }
289
290 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
291 {
292         struct gfs2_trans *tr, *safe;
293         unsigned int old_tail = sdp->sd_log_tail;
294         int wrap = (new_tail < old_tail);
295         int a, b, rm;
296
297         spin_lock(&sdp->sd_ail_lock);
298
299         list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
300                 a = (old_tail <= tr->tr_first);
301                 b = (tr->tr_first < new_tail);
302                 rm = (wrap) ? (a || b) : (a && b);
303                 if (!rm)
304                         continue;
305
306                 gfs2_ail2_empty_one(sdp, tr);
307                 list_del(&tr->tr_list);
308                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
309                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
310                 kfree(tr);
311         }
312
313         spin_unlock(&sdp->sd_ail_lock);
314 }
315
316 /**
317  * gfs2_log_release - Release a given number of log blocks
318  * @sdp: The GFS2 superblock
319  * @blks: The number of blocks
320  *
321  */
322
323 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
324 {
325
326         atomic_add(blks, &sdp->sd_log_blks_free);
327         trace_gfs2_log_blocks(sdp, blks);
328         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
329                                   sdp->sd_jdesc->jd_blocks);
330         up_read(&sdp->sd_log_flush_lock);
331 }
332
333 /**
334  * gfs2_log_reserve - Make a log reservation
335  * @sdp: The GFS2 superblock
336  * @blks: The number of blocks to reserve
337  *
338  * Note that we never give out the last few blocks of the journal. Thats
339  * due to the fact that there is a small number of header blocks
340  * associated with each log flush. The exact number can't be known until
341  * flush time, so we ensure that we have just enough free blocks at all
342  * times to avoid running out during a log flush.
343  *
344  * We no longer flush the log here, instead we wake up logd to do that
345  * for us. To avoid the thundering herd and to ensure that we deal fairly
346  * with queued waiters, we use an exclusive wait. This means that when we
347  * get woken with enough journal space to get our reservation, we need to
348  * wake the next waiter on the list.
349  *
350  * Returns: errno
351  */
352
353 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
354 {
355         int ret = 0;
356         unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
357         unsigned wanted = blks + reserved_blks;
358         DEFINE_WAIT(wait);
359         int did_wait = 0;
360         unsigned int free_blocks;
361
362         if (gfs2_assert_warn(sdp, blks) ||
363             gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
364                 return -EINVAL;
365         atomic_add(blks, &sdp->sd_log_blks_needed);
366 retry:
367         free_blocks = atomic_read(&sdp->sd_log_blks_free);
368         if (unlikely(free_blocks <= wanted)) {
369                 do {
370                         prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
371                                         TASK_UNINTERRUPTIBLE);
372                         wake_up(&sdp->sd_logd_waitq);
373                         did_wait = 1;
374                         if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
375                                 io_schedule();
376                         free_blocks = atomic_read(&sdp->sd_log_blks_free);
377                 } while(free_blocks <= wanted);
378                 finish_wait(&sdp->sd_log_waitq, &wait);
379         }
380         atomic_inc(&sdp->sd_reserving_log);
381         if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
382                                 free_blocks - blks) != free_blocks) {
383                 if (atomic_dec_and_test(&sdp->sd_reserving_log))
384                         wake_up(&sdp->sd_reserving_log_wait);
385                 goto retry;
386         }
387         atomic_sub(blks, &sdp->sd_log_blks_needed);
388         trace_gfs2_log_blocks(sdp, -blks);
389
390         /*
391          * If we waited, then so might others, wake them up _after_ we get
392          * our share of the log.
393          */
394         if (unlikely(did_wait))
395                 wake_up(&sdp->sd_log_waitq);
396
397         down_read(&sdp->sd_log_flush_lock);
398         if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
399                 gfs2_log_release(sdp, blks);
400                 ret = -EROFS;
401         }
402         if (atomic_dec_and_test(&sdp->sd_reserving_log))
403                 wake_up(&sdp->sd_reserving_log_wait);
404         return ret;
405 }
406
407 /**
408  * log_distance - Compute distance between two journal blocks
409  * @sdp: The GFS2 superblock
410  * @newer: The most recent journal block of the pair
411  * @older: The older journal block of the pair
412  *
413  *   Compute the distance (in the journal direction) between two
414  *   blocks in the journal
415  *
416  * Returns: the distance in blocks
417  */
418
419 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
420                                         unsigned int older)
421 {
422         int dist;
423
424         dist = newer - older;
425         if (dist < 0)
426                 dist += sdp->sd_jdesc->jd_blocks;
427
428         return dist;
429 }
430
431 /**
432  * calc_reserved - Calculate the number of blocks to reserve when
433  *                 refunding a transaction's unused buffers.
434  * @sdp: The GFS2 superblock
435  *
436  * This is complex.  We need to reserve room for all our currently used
437  * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
438  * all our journaled data buffers for journaled files (e.g. files in the 
439  * meta_fs like rindex, or files for which chattr +j was done.)
440  * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
441  * will count it as free space (sd_log_blks_free) and corruption will follow.
442  *
443  * We can have metadata bufs and jdata bufs in the same journal.  So each
444  * type gets its own log header, for which we need to reserve a block.
445  * In fact, each type has the potential for needing more than one header 
446  * in cases where we have more buffers than will fit on a journal page.
447  * Metadata journal entries take up half the space of journaled buffer entries.
448  * Thus, metadata entries have buf_limit (502) and journaled buffers have
449  * databuf_limit (251) before they cause a wrap around.
450  *
451  * Also, we need to reserve blocks for revoke journal entries and one for an
452  * overall header for the lot.
453  *
454  * Returns: the number of blocks reserved
455  */
456 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
457 {
458         unsigned int reserved = 0;
459         unsigned int mbuf;
460         unsigned int dbuf;
461         struct gfs2_trans *tr = sdp->sd_log_tr;
462
463         if (tr) {
464                 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
465                 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
466                 reserved = mbuf + dbuf;
467                 /* Account for header blocks */
468                 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
469                 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
470         }
471
472         if (sdp->sd_log_commited_revoke > 0)
473                 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
474                                           sizeof(u64));
475         /* One for the overall header */
476         if (reserved)
477                 reserved++;
478         return reserved;
479 }
480
481 static unsigned int current_tail(struct gfs2_sbd *sdp)
482 {
483         struct gfs2_trans *tr;
484         unsigned int tail;
485
486         spin_lock(&sdp->sd_ail_lock);
487
488         if (list_empty(&sdp->sd_ail1_list)) {
489                 tail = sdp->sd_log_head;
490         } else {
491                 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
492                                 tr_list);
493                 tail = tr->tr_first;
494         }
495
496         spin_unlock(&sdp->sd_ail_lock);
497
498         return tail;
499 }
500
501 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
502 {
503         unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
504
505         ail2_empty(sdp, new_tail);
506
507         atomic_add(dist, &sdp->sd_log_blks_free);
508         trace_gfs2_log_blocks(sdp, dist);
509         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
510                              sdp->sd_jdesc->jd_blocks);
511
512         sdp->sd_log_tail = new_tail;
513 }
514
515
516 static void log_flush_wait(struct gfs2_sbd *sdp)
517 {
518         DEFINE_WAIT(wait);
519
520         if (atomic_read(&sdp->sd_log_in_flight)) {
521                 do {
522                         prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
523                                         TASK_UNINTERRUPTIBLE);
524                         if (atomic_read(&sdp->sd_log_in_flight))
525                                 io_schedule();
526                 } while(atomic_read(&sdp->sd_log_in_flight));
527                 finish_wait(&sdp->sd_log_flush_wait, &wait);
528         }
529 }
530
531 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
532 {
533         struct gfs2_inode *ipa, *ipb;
534
535         ipa = list_entry(a, struct gfs2_inode, i_ordered);
536         ipb = list_entry(b, struct gfs2_inode, i_ordered);
537
538         if (ipa->i_no_addr < ipb->i_no_addr)
539                 return -1;
540         if (ipa->i_no_addr > ipb->i_no_addr)
541                 return 1;
542         return 0;
543 }
544
545 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
546 {
547         struct gfs2_inode *ip;
548         LIST_HEAD(written);
549
550         spin_lock(&sdp->sd_ordered_lock);
551         list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
552         while (!list_empty(&sdp->sd_log_ordered)) {
553                 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
554                 if (ip->i_inode.i_mapping->nrpages == 0) {
555                         test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
556                         list_del(&ip->i_ordered);
557                         continue;
558                 }
559                 list_move(&ip->i_ordered, &written);
560                 spin_unlock(&sdp->sd_ordered_lock);
561                 filemap_fdatawrite(ip->i_inode.i_mapping);
562                 spin_lock(&sdp->sd_ordered_lock);
563         }
564         list_splice(&written, &sdp->sd_log_ordered);
565         spin_unlock(&sdp->sd_ordered_lock);
566 }
567
568 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
569 {
570         struct gfs2_inode *ip;
571
572         spin_lock(&sdp->sd_ordered_lock);
573         while (!list_empty(&sdp->sd_log_ordered)) {
574                 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
575                 list_del(&ip->i_ordered);
576                 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
577                 if (ip->i_inode.i_mapping->nrpages == 0)
578                         continue;
579                 spin_unlock(&sdp->sd_ordered_lock);
580                 filemap_fdatawait(ip->i_inode.i_mapping);
581                 spin_lock(&sdp->sd_ordered_lock);
582         }
583         spin_unlock(&sdp->sd_ordered_lock);
584 }
585
586 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
587 {
588         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
589
590         spin_lock(&sdp->sd_ordered_lock);
591         if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
592                 list_del(&ip->i_ordered);
593         spin_unlock(&sdp->sd_ordered_lock);
594 }
595
596 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
597 {
598         struct buffer_head *bh = bd->bd_bh;
599         struct gfs2_glock *gl = bd->bd_gl;
600
601         bh->b_private = NULL;
602         bd->bd_blkno = bh->b_blocknr;
603         gfs2_remove_from_ail(bd); /* drops ref on bh */
604         bd->bd_bh = NULL;
605         sdp->sd_log_num_revoke++;
606         if (!test_bit(GLF_REVOKES, &gl->gl_flags)) {
607                 set_bit(GLF_REVOKES, &gl->gl_flags);
608                 gfs2_glock_hold(gl);
609         }
610         set_bit(GLF_LFLUSH, &gl->gl_flags);
611         list_add(&bd->bd_list, &sdp->sd_log_revokes);
612 }
613
614 void gfs2_write_revokes(struct gfs2_sbd *sdp)
615 {
616         struct gfs2_trans *tr;
617         struct gfs2_bufdata *bd, *tmp;
618         int have_revokes = 0;
619         int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
620
621         gfs2_ail1_empty(sdp);
622         spin_lock(&sdp->sd_ail_lock);
623         list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
624                 list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
625                         if (list_empty(&bd->bd_list)) {
626                                 have_revokes = 1;
627                                 goto done;
628                         }
629                 }
630         }
631 done:
632         spin_unlock(&sdp->sd_ail_lock);
633         if (have_revokes == 0)
634                 return;
635         while (sdp->sd_log_num_revoke > max_revokes)
636                 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
637         max_revokes -= sdp->sd_log_num_revoke;
638         if (!sdp->sd_log_num_revoke) {
639                 atomic_dec(&sdp->sd_log_blks_free);
640                 /* If no blocks have been reserved, we need to also
641                  * reserve a block for the header */
642                 if (!sdp->sd_log_blks_reserved)
643                         atomic_dec(&sdp->sd_log_blks_free);
644         }
645         gfs2_log_lock(sdp);
646         spin_lock(&sdp->sd_ail_lock);
647         list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
648                 list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
649                         if (max_revokes == 0)
650                                 goto out_of_blocks;
651                         if (!list_empty(&bd->bd_list))
652                                 continue;
653                         gfs2_add_revoke(sdp, bd);
654                         max_revokes--;
655                 }
656         }
657 out_of_blocks:
658         spin_unlock(&sdp->sd_ail_lock);
659         gfs2_log_unlock(sdp);
660
661         if (!sdp->sd_log_num_revoke) {
662                 atomic_inc(&sdp->sd_log_blks_free);
663                 if (!sdp->sd_log_blks_reserved)
664                         atomic_inc(&sdp->sd_log_blks_free);
665         }
666 }
667
668 /**
669  * gfs2_write_log_header - Write a journal log header buffer at lblock
670  * @sdp: The GFS2 superblock
671  * @jd: journal descriptor of the journal to which we are writing
672  * @seq: sequence number
673  * @tail: tail of the log
674  * @lblock: value for lh_blkno (block number relative to start of journal)
675  * @flags: log header flags GFS2_LOG_HEAD_*
676  * @op_flags: flags to pass to the bio
677  *
678  * Returns: the initialized log buffer descriptor
679  */
680
681 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
682                            u64 seq, u32 tail, u32 lblock, u32 flags,
683                            int op_flags)
684 {
685         struct gfs2_log_header *lh;
686         u32 hash, crc;
687         struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
688         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
689         struct timespec64 tv;
690         struct super_block *sb = sdp->sd_vfs;
691         u64 dblock;
692
693         lh = page_address(page);
694         clear_page(lh);
695
696         lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
697         lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
698         lh->lh_header.__pad0 = cpu_to_be64(0);
699         lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
700         lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
701         lh->lh_sequence = cpu_to_be64(seq);
702         lh->lh_flags = cpu_to_be32(flags);
703         lh->lh_tail = cpu_to_be32(tail);
704         lh->lh_blkno = cpu_to_be32(lblock);
705         hash = ~crc32(~0, lh, LH_V1_SIZE);
706         lh->lh_hash = cpu_to_be32(hash);
707
708         ktime_get_coarse_real_ts64(&tv);
709         lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
710         lh->lh_sec = cpu_to_be64(tv.tv_sec);
711         if (!list_empty(&jd->extent_list))
712                 dblock = gfs2_log_bmap(sdp);
713         else {
714                 int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
715                 if (gfs2_assert_withdraw(sdp, ret == 0))
716                         return;
717         }
718         lh->lh_addr = cpu_to_be64(dblock);
719         lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
720
721         /* We may only write local statfs, quota, etc., when writing to our
722            own journal. The values are left 0 when recovering a journal
723            different from our own. */
724         if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
725                 lh->lh_statfs_addr =
726                         cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
727                 lh->lh_quota_addr =
728                         cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
729
730                 spin_lock(&sdp->sd_statfs_spin);
731                 lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
732                 lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
733                 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
734                 spin_unlock(&sdp->sd_statfs_spin);
735         }
736
737         BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
738
739         crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
740                      sb->s_blocksize - LH_V1_SIZE - 4);
741         lh->lh_crc = cpu_to_be32(crc);
742
743         gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
744         gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
745         log_flush_wait(sdp);
746 }
747
748 /**
749  * log_write_header - Get and initialize a journal header buffer
750  * @sdp: The GFS2 superblock
751  * @flags: The log header flags, including log header origin
752  *
753  * Returns: the initialized log buffer descriptor
754  */
755
756 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
757 {
758         unsigned int tail;
759         int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
760         enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
761
762         gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
763         tail = current_tail(sdp);
764
765         if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
766                 gfs2_ordered_wait(sdp);
767                 log_flush_wait(sdp);
768                 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
769         }
770         sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
771         gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
772                               sdp->sd_log_flush_head, flags, op_flags);
773
774         if (sdp->sd_log_tail != tail)
775                 log_pull_tail(sdp, tail);
776 }
777
778 /**
779  * gfs2_log_flush - flush incore transaction(s)
780  * @sdp: the filesystem
781  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
782  * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
783  *
784  */
785
786 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
787 {
788         struct gfs2_trans *tr;
789         enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
790
791         down_write(&sdp->sd_log_flush_lock);
792
793         /* Log might have been flushed while we waited for the flush lock */
794         if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
795                 up_write(&sdp->sd_log_flush_lock);
796                 return;
797         }
798         trace_gfs2_log_flush(sdp, 1, flags);
799
800         if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
801                 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
802
803         sdp->sd_log_flush_head = sdp->sd_log_head;
804         tr = sdp->sd_log_tr;
805         if (tr) {
806                 sdp->sd_log_tr = NULL;
807                 INIT_LIST_HEAD(&tr->tr_ail1_list);
808                 INIT_LIST_HEAD(&tr->tr_ail2_list);
809                 tr->tr_first = sdp->sd_log_flush_head;
810                 if (unlikely (state == SFS_FROZEN))
811                         gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
812         }
813
814         if (unlikely(state == SFS_FROZEN))
815                 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
816         gfs2_assert_withdraw(sdp,
817                         sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
818
819         gfs2_ordered_write(sdp);
820         lops_before_commit(sdp, tr);
821         gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
822
823         if (sdp->sd_log_head != sdp->sd_log_flush_head) {
824                 log_flush_wait(sdp);
825                 log_write_header(sdp, flags);
826         } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
827                 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
828                 trace_gfs2_log_blocks(sdp, -1);
829                 log_write_header(sdp, flags);
830         }
831         lops_after_commit(sdp, tr);
832
833         gfs2_log_lock(sdp);
834         sdp->sd_log_head = sdp->sd_log_flush_head;
835         sdp->sd_log_blks_reserved = 0;
836         sdp->sd_log_commited_revoke = 0;
837
838         spin_lock(&sdp->sd_ail_lock);
839         if (tr && !list_empty(&tr->tr_ail1_list)) {
840                 list_add(&tr->tr_list, &sdp->sd_ail1_list);
841                 tr = NULL;
842         }
843         spin_unlock(&sdp->sd_ail_lock);
844         gfs2_log_unlock(sdp);
845
846         if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
847                 if (!sdp->sd_log_idle) {
848                         for (;;) {
849                                 gfs2_ail1_start(sdp);
850                                 gfs2_ail1_wait(sdp);
851                                 if (gfs2_ail1_empty(sdp))
852                                         break;
853                         }
854                         atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
855                         trace_gfs2_log_blocks(sdp, -1);
856                         log_write_header(sdp, flags);
857                         sdp->sd_log_head = sdp->sd_log_flush_head;
858                 }
859                 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
860                              GFS2_LOG_HEAD_FLUSH_FREEZE))
861                         gfs2_log_shutdown(sdp);
862                 if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
863                         atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
864         }
865
866         trace_gfs2_log_flush(sdp, 0, flags);
867         up_write(&sdp->sd_log_flush_lock);
868
869         kfree(tr);
870 }
871
872 /**
873  * gfs2_merge_trans - Merge a new transaction into a cached transaction
874  * @old: Original transaction to be expanded
875  * @new: New transaction to be merged
876  */
877
878 static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
879 {
880         WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
881
882         old->tr_num_buf_new     += new->tr_num_buf_new;
883         old->tr_num_databuf_new += new->tr_num_databuf_new;
884         old->tr_num_buf_rm      += new->tr_num_buf_rm;
885         old->tr_num_databuf_rm  += new->tr_num_databuf_rm;
886         old->tr_num_revoke      += new->tr_num_revoke;
887         old->tr_num_revoke_rm   += new->tr_num_revoke_rm;
888
889         list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
890         list_splice_tail_init(&new->tr_buf, &old->tr_buf);
891 }
892
893 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
894 {
895         unsigned int reserved;
896         unsigned int unused;
897         unsigned int maxres;
898
899         gfs2_log_lock(sdp);
900
901         if (sdp->sd_log_tr) {
902                 gfs2_merge_trans(sdp->sd_log_tr, tr);
903         } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
904                 gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
905                 sdp->sd_log_tr = tr;
906                 set_bit(TR_ATTACHED, &tr->tr_flags);
907         }
908
909         sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
910         reserved = calc_reserved(sdp);
911         maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
912         gfs2_assert_withdraw(sdp, maxres >= reserved);
913         unused = maxres - reserved;
914         atomic_add(unused, &sdp->sd_log_blks_free);
915         trace_gfs2_log_blocks(sdp, unused);
916         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
917                              sdp->sd_jdesc->jd_blocks);
918         sdp->sd_log_blks_reserved = reserved;
919
920         gfs2_log_unlock(sdp);
921 }
922
923 /**
924  * gfs2_log_commit - Commit a transaction to the log
925  * @sdp: the filesystem
926  * @tr: the transaction
927  *
928  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
929  * or the total number of used blocks (pinned blocks plus AIL blocks)
930  * is greater than thresh2.
931  *
932  * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
933  * journal size.
934  *
935  * Returns: errno
936  */
937
938 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
939 {
940         log_refund(sdp, tr);
941
942         if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
943             ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
944             atomic_read(&sdp->sd_log_thresh2)))
945                 wake_up(&sdp->sd_logd_waitq);
946 }
947
948 /**
949  * gfs2_log_shutdown - write a shutdown header into a journal
950  * @sdp: the filesystem
951  *
952  */
953
954 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
955 {
956         gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
957         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
958         gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
959
960         sdp->sd_log_flush_head = sdp->sd_log_head;
961
962         log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
963
964         gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
965         gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
966
967         sdp->sd_log_head = sdp->sd_log_flush_head;
968         sdp->sd_log_tail = sdp->sd_log_head;
969 }
970
971 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
972 {
973         return (atomic_read(&sdp->sd_log_pinned) +
974                 atomic_read(&sdp->sd_log_blks_needed) >=
975                 atomic_read(&sdp->sd_log_thresh1));
976 }
977
978 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
979 {
980         unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
981
982         if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
983                 return 1;
984
985         return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
986                 atomic_read(&sdp->sd_log_thresh2);
987 }
988
989 /**
990  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
991  * @sdp: Pointer to GFS2 superblock
992  *
993  * Also, periodically check to make sure that we're using the most recent
994  * journal index.
995  */
996
997 int gfs2_logd(void *data)
998 {
999         struct gfs2_sbd *sdp = data;
1000         unsigned long t = 1;
1001         DEFINE_WAIT(wait);
1002         bool did_flush;
1003
1004         while (!kthread_should_stop()) {
1005
1006                 /* Check for errors writing to the journal */
1007                 if (sdp->sd_log_error) {
1008                         gfs2_lm_withdraw(sdp,
1009                                          "GFS2: fsid=%s: error %d: "
1010                                          "withdrawing the file system to "
1011                                          "prevent further damage.\n",
1012                                          sdp->sd_fsname, sdp->sd_log_error);
1013                 }
1014
1015                 did_flush = false;
1016                 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1017                         gfs2_ail1_empty(sdp);
1018                         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1019                                        GFS2_LFC_LOGD_JFLUSH_REQD);
1020                         did_flush = true;
1021                 }
1022
1023                 if (gfs2_ail_flush_reqd(sdp)) {
1024                         gfs2_ail1_start(sdp);
1025                         gfs2_ail1_wait(sdp);
1026                         gfs2_ail1_empty(sdp);
1027                         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1028                                        GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1029                         did_flush = true;
1030                 }
1031
1032                 if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1033                         wake_up(&sdp->sd_log_waitq);
1034
1035                 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1036
1037                 try_to_freeze();
1038
1039                 do {
1040                         prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1041                                         TASK_INTERRUPTIBLE);
1042                         if (!gfs2_ail_flush_reqd(sdp) &&
1043                             !gfs2_jrnl_flush_reqd(sdp) &&
1044                             !kthread_should_stop())
1045                                 t = schedule_timeout(t);
1046                 } while(t && !gfs2_ail_flush_reqd(sdp) &&
1047                         !gfs2_jrnl_flush_reqd(sdp) &&
1048                         !kthread_should_stop());
1049                 finish_wait(&sdp->sd_logd_waitq, &wait);
1050         }
1051
1052         return 0;
1053 }
1054