2 * linux/fs/jbd/revoke.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
6 * Copyright 2000 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal revoke routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
15 * Revoke is the mechanism used to prevent old log records for deleted
16 * metadata from being replayed on top of newer data using the same
17 * blocks. The revoke mechanism is used in two separate places:
19 * + Commit: during commit we write the entire list of the current
20 * transaction's revoked blocks to the journal
22 * + Recovery: during recovery we record the transaction ID of all
23 * revoked blocks. If there are multiple revoke records in the log
24 * for a single block, only the last one counts, and if there is a log
25 * entry for a block beyond the last revoke, then that log entry still
28 * We can get interactions between revokes and new log data within a
31 * Block is revoked and then journaled:
32 * The desired end result is the journaling of the new block, so we
33 * cancel the revoke before the transaction commits.
35 * Block is journaled and then revoked:
36 * The revoke must take precedence over the write of the block, so we
37 * need either to cancel the journal entry or to write the revoke
38 * later in the log than the log block. In this case, we choose the
39 * latter: journaling a block cancels any revoke record for that block
40 * in the current transaction, so any revoke for that block in the
41 * transaction must have happened after the block was journaled and so
42 * the revoke must take precedence.
44 * Block is revoked and then written as data:
45 * The data write is allowed to succeed, but the revoke is _not_
46 * cancelled. We still need to prevent old log records from
47 * overwriting the new data. We don't even need to clear the revoke
50 * Revoke information on buffers is a tri-state value:
52 * RevokeValid clear: no cached revoke status, need to look it up
53 * RevokeValid set, Revoked clear:
54 * buffer has not been revoked, and cancel_revoke
56 * RevokeValid set, Revoked set:
57 * buffer has been revoked.
63 #include <linux/time.h>
65 #include <linux/jbd.h>
66 #include <linux/errno.h>
67 #include <linux/slab.h>
68 #include <linux/list.h>
69 #include <linux/init.h>
70 #include <linux/bio.h>
72 #include <linux/log2.h>
74 static struct kmem_cache *revoke_record_cache;
75 static struct kmem_cache *revoke_table_cache;
77 /* Each revoke record represents one single revoked block. During
78 journal replay, this involves recording the transaction ID of the
79 last transaction to revoke this block. */
81 struct jbd_revoke_record_s
83 struct list_head hash;
84 tid_t sequence; /* Used for recovery only */
85 unsigned long blocknr;
89 /* The revoke table is just a simple hash table of revoke records. */
90 struct jbd_revoke_table_s
92 /* It is conceivable that we might want a larger hash table
93 * for recovery. Must be a power of two. */
96 struct list_head *hash_table;
101 static void write_one_revoke_record(journal_t *, transaction_t *,
102 struct journal_head **, int *,
103 struct jbd_revoke_record_s *, int);
104 static void flush_descriptor(journal_t *, struct journal_head *, int, int);
107 /* Utility functions to maintain the revoke table */
109 /* Borrowed from buffer.c: this is a tried and tested block hash function */
110 static inline int hash(journal_t *journal, unsigned long block)
112 struct jbd_revoke_table_s *table = journal->j_revoke;
113 int hash_shift = table->hash_shift;
115 return ((block << (hash_shift - 6)) ^
117 (block << (hash_shift - 12))) & (table->hash_size - 1);
120 static int insert_revoke_hash(journal_t *journal, unsigned long blocknr,
123 struct list_head *hash_list;
124 struct jbd_revoke_record_s *record;
127 record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
131 record->sequence = seq;
132 record->blocknr = blocknr;
133 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
134 spin_lock(&journal->j_revoke_lock);
135 list_add(&record->hash, hash_list);
136 spin_unlock(&journal->j_revoke_lock);
140 if (!journal_oom_retry)
142 jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
147 /* Find a revoke record in the journal's hash table. */
149 static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
150 unsigned long blocknr)
152 struct list_head *hash_list;
153 struct jbd_revoke_record_s *record;
155 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
157 spin_lock(&journal->j_revoke_lock);
158 record = (struct jbd_revoke_record_s *) hash_list->next;
159 while (&(record->hash) != hash_list) {
160 if (record->blocknr == blocknr) {
161 spin_unlock(&journal->j_revoke_lock);
164 record = (struct jbd_revoke_record_s *) record->hash.next;
166 spin_unlock(&journal->j_revoke_lock);
170 void journal_destroy_revoke_caches(void)
172 if (revoke_record_cache) {
173 kmem_cache_destroy(revoke_record_cache);
174 revoke_record_cache = NULL;
176 if (revoke_table_cache) {
177 kmem_cache_destroy(revoke_table_cache);
178 revoke_table_cache = NULL;
182 int __init journal_init_revoke_caches(void)
184 J_ASSERT(!revoke_record_cache);
185 J_ASSERT(!revoke_table_cache);
187 revoke_record_cache = kmem_cache_create("revoke_record",
188 sizeof(struct jbd_revoke_record_s),
190 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
192 if (!revoke_record_cache)
193 goto record_cache_failure;
195 revoke_table_cache = kmem_cache_create("revoke_table",
196 sizeof(struct jbd_revoke_table_s),
197 0, SLAB_TEMPORARY, NULL);
198 if (!revoke_table_cache)
199 goto table_cache_failure;
204 journal_destroy_revoke_caches();
205 record_cache_failure:
209 static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
213 struct jbd_revoke_table_s *table;
215 table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
219 while((tmp >>= 1UL) != 0UL)
222 table->hash_size = hash_size;
223 table->hash_shift = shift;
225 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
226 if (!table->hash_table) {
227 kmem_cache_free(revoke_table_cache, table);
232 for (tmp = 0; tmp < hash_size; tmp++)
233 INIT_LIST_HEAD(&table->hash_table[tmp]);
239 static void journal_destroy_revoke_table(struct jbd_revoke_table_s *table)
242 struct list_head *hash_list;
244 for (i = 0; i < table->hash_size; i++) {
245 hash_list = &table->hash_table[i];
246 J_ASSERT(list_empty(hash_list));
249 kfree(table->hash_table);
250 kmem_cache_free(revoke_table_cache, table);
253 /* Initialise the revoke table for a given journal to a given size. */
254 int journal_init_revoke(journal_t *journal, int hash_size)
256 J_ASSERT(journal->j_revoke_table[0] == NULL);
257 J_ASSERT(is_power_of_2(hash_size));
259 journal->j_revoke_table[0] = journal_init_revoke_table(hash_size);
260 if (!journal->j_revoke_table[0])
263 journal->j_revoke_table[1] = journal_init_revoke_table(hash_size);
264 if (!journal->j_revoke_table[1])
267 journal->j_revoke = journal->j_revoke_table[1];
269 spin_lock_init(&journal->j_revoke_lock);
274 journal_destroy_revoke_table(journal->j_revoke_table[0]);
279 /* Destroy a journal's revoke table. The table must already be empty! */
280 void journal_destroy_revoke(journal_t *journal)
282 journal->j_revoke = NULL;
283 if (journal->j_revoke_table[0])
284 journal_destroy_revoke_table(journal->j_revoke_table[0]);
285 if (journal->j_revoke_table[1])
286 journal_destroy_revoke_table(journal->j_revoke_table[1]);
293 * journal_revoke: revoke a given buffer_head from the journal. This
294 * prevents the block from being replayed during recovery if we take a
295 * crash after this current transaction commits. Any subsequent
296 * metadata writes of the buffer in this transaction cancel the
299 * Note that this call may block --- it is up to the caller to make
300 * sure that there are no further calls to journal_write_metadata
301 * before the revoke is complete. In ext3, this implies calling the
302 * revoke before clearing the block bitmap when we are deleting
305 * Revoke performs a journal_forget on any buffer_head passed in as a
306 * parameter, but does _not_ forget the buffer_head if the bh was only
309 * bh_in may not be a journalled buffer - it may have come off
310 * the hash tables without an attached journal_head.
312 * If bh_in is non-zero, journal_revoke() will decrement its b_count
316 int journal_revoke(handle_t *handle, unsigned long blocknr,
317 struct buffer_head *bh_in)
319 struct buffer_head *bh = NULL;
321 struct block_device *bdev;
326 BUFFER_TRACE(bh_in, "enter");
328 journal = handle->h_transaction->t_journal;
329 if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
330 J_ASSERT (!"Cannot set revoke feature!");
334 bdev = journal->j_fs_dev;
338 bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
340 BUFFER_TRACE(bh, "found on hash");
342 #ifdef JBD_EXPENSIVE_CHECKING
344 struct buffer_head *bh2;
346 /* If there is a different buffer_head lying around in
347 * memory anywhere... */
348 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
350 /* ... and it has RevokeValid status... */
351 if (bh2 != bh && buffer_revokevalid(bh2))
352 /* ...then it better be revoked too,
353 * since it's illegal to create a revoke
354 * record against a buffer_head which is
355 * not marked revoked --- that would
356 * risk missing a subsequent revoke
358 J_ASSERT_BH(bh2, buffer_revoked(bh2));
364 /* We really ought not ever to revoke twice in a row without
365 first having the revoke cancelled: it's illegal to free a
366 block twice without allocating it in between! */
368 if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
369 "inconsistent data on disk")) {
374 set_buffer_revoked(bh);
375 set_buffer_revokevalid(bh);
377 BUFFER_TRACE(bh_in, "call journal_forget");
378 journal_forget(handle, bh_in);
380 BUFFER_TRACE(bh, "call brelse");
385 jbd_debug(2, "insert revoke for block %lu, bh_in=%p\n", blocknr, bh_in);
386 err = insert_revoke_hash(journal, blocknr,
387 handle->h_transaction->t_tid);
388 BUFFER_TRACE(bh_in, "exit");
393 * Cancel an outstanding revoke. For use only internally by the
394 * journaling code (called from journal_get_write_access).
396 * We trust buffer_revoked() on the buffer if the buffer is already
397 * being journaled: if there is no revoke pending on the buffer, then we
398 * don't do anything here.
400 * This would break if it were possible for a buffer to be revoked and
401 * discarded, and then reallocated within the same transaction. In such
402 * a case we would have lost the revoked bit, but when we arrived here
403 * the second time we would still have a pending revoke to cancel. So,
404 * do not trust the Revoked bit on buffers unless RevokeValid is also
407 * The caller must have the journal locked.
409 int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
411 struct jbd_revoke_record_s *record;
412 journal_t *journal = handle->h_transaction->t_journal;
414 int did_revoke = 0; /* akpm: debug */
415 struct buffer_head *bh = jh2bh(jh);
417 jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
419 /* Is the existing Revoke bit valid? If so, we trust it, and
420 * only perform the full cancel if the revoke bit is set. If
421 * not, we can't trust the revoke bit, and we need to do the
422 * full search for a revoke record. */
423 if (test_set_buffer_revokevalid(bh)) {
424 need_cancel = test_clear_buffer_revoked(bh);
427 clear_buffer_revoked(bh);
431 record = find_revoke_record(journal, bh->b_blocknr);
433 jbd_debug(4, "cancelled existing revoke on "
434 "blocknr %llu\n", (unsigned long long)bh->b_blocknr);
435 spin_lock(&journal->j_revoke_lock);
436 list_del(&record->hash);
437 spin_unlock(&journal->j_revoke_lock);
438 kmem_cache_free(revoke_record_cache, record);
443 #ifdef JBD_EXPENSIVE_CHECKING
444 /* There better not be one left behind by now! */
445 record = find_revoke_record(journal, bh->b_blocknr);
446 J_ASSERT_JH(jh, record == NULL);
449 /* Finally, have we just cleared revoke on an unhashed
450 * buffer_head? If so, we'd better make sure we clear the
451 * revoked status on any hashed alias too, otherwise the revoke
452 * state machine will get very upset later on. */
454 struct buffer_head *bh2;
455 bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
458 clear_buffer_revoked(bh2);
465 /* journal_switch_revoke table select j_revoke for next transaction
466 * we do not want to suspend any processing until all revokes are
469 void journal_switch_revoke_table(journal_t *journal)
473 if (journal->j_revoke == journal->j_revoke_table[0])
474 journal->j_revoke = journal->j_revoke_table[1];
476 journal->j_revoke = journal->j_revoke_table[0];
478 for (i = 0; i < journal->j_revoke->hash_size; i++)
479 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
483 * Write revoke records to the journal for all entries in the current
484 * revoke hash, deleting the entries as we go.
486 * Called with the journal lock held.
489 void journal_write_revoke_records(journal_t *journal,
490 transaction_t *transaction, int write_op)
492 struct journal_head *descriptor;
493 struct jbd_revoke_record_s *record;
494 struct jbd_revoke_table_s *revoke;
495 struct list_head *hash_list;
496 int i, offset, count;
502 /* select revoke table for committing transaction */
503 revoke = journal->j_revoke == journal->j_revoke_table[0] ?
504 journal->j_revoke_table[1] : journal->j_revoke_table[0];
506 for (i = 0; i < revoke->hash_size; i++) {
507 hash_list = &revoke->hash_table[i];
509 while (!list_empty(hash_list)) {
510 record = (struct jbd_revoke_record_s *)
512 write_one_revoke_record(journal, transaction,
513 &descriptor, &offset,
516 list_del(&record->hash);
517 kmem_cache_free(revoke_record_cache, record);
521 flush_descriptor(journal, descriptor, offset, write_op);
522 jbd_debug(1, "Wrote %d revoke records\n", count);
526 * Write out one revoke record. We need to create a new descriptor
527 * block if the old one is full or if we have not already created one.
530 static void write_one_revoke_record(journal_t *journal,
531 transaction_t *transaction,
532 struct journal_head **descriptorp,
534 struct jbd_revoke_record_s *record,
537 struct journal_head *descriptor;
539 journal_header_t *header;
541 /* If we are already aborting, this all becomes a noop. We
542 still need to go round the loop in
543 journal_write_revoke_records in order to free all of the
544 revoke records: only the IO to the journal is omitted. */
545 if (is_journal_aborted(journal))
548 descriptor = *descriptorp;
551 /* Make sure we have a descriptor with space left for the record */
553 if (offset == journal->j_blocksize) {
554 flush_descriptor(journal, descriptor, offset, write_op);
560 descriptor = journal_get_descriptor_buffer(journal);
563 header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
564 header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
565 header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
566 header->h_sequence = cpu_to_be32(transaction->t_tid);
568 /* Record it so that we can wait for IO completion later */
569 JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
570 journal_file_buffer(descriptor, transaction, BJ_LogCtl);
572 offset = sizeof(journal_revoke_header_t);
573 *descriptorp = descriptor;
576 * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
577 cpu_to_be32(record->blocknr);
583 * Flush a revoke descriptor out to the journal. If we are aborting,
584 * this is a noop; otherwise we are generating a buffer which needs to
585 * be waited for during commit, so it has to go onto the appropriate
586 * journal buffer list.
589 static void flush_descriptor(journal_t *journal,
590 struct journal_head *descriptor,
591 int offset, int write_op)
593 journal_revoke_header_t *header;
594 struct buffer_head *bh = jh2bh(descriptor);
596 if (is_journal_aborted(journal)) {
601 header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
602 header->r_count = cpu_to_be32(offset);
603 set_buffer_jwrite(bh);
604 BUFFER_TRACE(bh, "write");
605 set_buffer_dirty(bh);
606 ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
611 * Revoke support for recovery.
613 * Recovery needs to be able to:
615 * record all revoke records, including the tid of the latest instance
616 * of each revoke in the journal
618 * check whether a given block in a given transaction should be replayed
619 * (ie. has not been revoked by a revoke record in that or a subsequent
622 * empty the revoke table after recovery.
626 * First, setting revoke records. We create a new revoke record for
627 * every block ever revoked in the log as we scan it for recovery, and
628 * we update the existing records if we find multiple revokes for a
632 int journal_set_revoke(journal_t *journal,
633 unsigned long blocknr,
636 struct jbd_revoke_record_s *record;
638 record = find_revoke_record(journal, blocknr);
640 /* If we have multiple occurrences, only record the
641 * latest sequence number in the hashed record */
642 if (tid_gt(sequence, record->sequence))
643 record->sequence = sequence;
646 return insert_revoke_hash(journal, blocknr, sequence);
650 * Test revoke records. For a given block referenced in the log, has
651 * that block been revoked? A revoke record with a given transaction
652 * sequence number revokes all blocks in that transaction and earlier
653 * ones, but later transactions still need replayed.
656 int journal_test_revoke(journal_t *journal,
657 unsigned long blocknr,
660 struct jbd_revoke_record_s *record;
662 record = find_revoke_record(journal, blocknr);
665 if (tid_gt(sequence, record->sequence))
671 * Finally, once recovery is over, we need to clear the revoke table so
672 * that it can be reused by the running filesystem.
675 void journal_clear_revoke(journal_t *journal)
678 struct list_head *hash_list;
679 struct jbd_revoke_record_s *record;
680 struct jbd_revoke_table_s *revoke;
682 revoke = journal->j_revoke;
684 for (i = 0; i < revoke->hash_size; i++) {
685 hash_list = &revoke->hash_table[i];
686 while (!list_empty(hash_list)) {
687 record = (struct jbd_revoke_record_s*) hash_list->next;
688 list_del(&record->hash);
689 kmem_cache_free(revoke_record_cache, record);