1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
16 #include "print-tree.h"
18 #include "compression.h"
20 #include "inode-map.h"
21 #include "block-group.h"
22 #include "space-info.h"
24 /* magic values for the inode_only field in btrfs_log_inode:
26 * LOG_INODE_ALL means to log everything
27 * LOG_INODE_EXISTS means to log just enough to recreate the inode
38 * directory trouble cases
40 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
41 * log, we must force a full commit before doing an fsync of the directory
42 * where the unlink was done.
43 * ---> record transid of last unlink/rename per directory
47 * rename foo/some_dir foo2/some_dir
49 * fsync foo/some_dir/some_file
51 * The fsync above will unlink the original some_dir without recording
52 * it in its new location (foo2). After a crash, some_dir will be gone
53 * unless the fsync of some_file forces a full commit
55 * 2) we must log any new names for any file or dir that is in the fsync
56 * log. ---> check inode while renaming/linking.
58 * 2a) we must log any new names for any file or dir during rename
59 * when the directory they are being removed from was logged.
60 * ---> check inode and old parent dir during rename
62 * 2a is actually the more important variant. With the extra logging
63 * a crash might unlink the old name without recreating the new one
65 * 3) after a crash, we must go through any directories with a link count
66 * of zero and redo the rm -rf
73 * The directory f1 was fully removed from the FS, but fsync was never
74 * called on f1, only its parent dir. After a crash the rm -rf must
75 * be replayed. This must be able to recurse down the entire
76 * directory tree. The inode link count fixup code takes care of the
81 * stages for the tree walking. The first
82 * stage (0) is to only pin down the blocks we find
83 * the second stage (1) is to make sure that all the inodes
84 * we find in the log are created in the subvolume.
86 * The last stage is to deal with directories and links and extents
87 * and all the other fun semantics
91 LOG_WALK_REPLAY_INODES,
92 LOG_WALK_REPLAY_DIR_INDEX,
96 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct btrfs_inode *inode,
99 struct btrfs_log_ctx *ctx);
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_root *log,
106 struct btrfs_path *path,
107 u64 dirid, int del_all);
110 * tree logging is a special write ahead log used to make sure that
111 * fsyncs and O_SYNCs can happen without doing full tree commits.
113 * Full tree commits are expensive because they require commonly
114 * modified blocks to be recowed, creating many dirty pages in the
115 * extent tree an 4x-6x higher write load than ext3.
117 * Instead of doing a tree commit on every fsync, we use the
118 * key ranges and transaction ids to find items for a given file or directory
119 * that have changed in this transaction. Those items are copied into
120 * a special tree (one per subvolume root), that tree is written to disk
121 * and then the fsync is considered complete.
123 * After a crash, items are copied out of the log-tree back into the
124 * subvolume tree. Any file data extents found are recorded in the extent
125 * allocation tree, and the log-tree freed.
127 * The log tree is read three times, once to pin down all the extents it is
128 * using in ram and once, once to create all the inodes logged in the tree
129 * and once to do all the other items.
133 * start a sub transaction and setup the log tree
134 * this increments the log tree writer count to make the people
135 * syncing the tree wait for us to finish
137 static int start_log_trans(struct btrfs_trans_handle *trans,
138 struct btrfs_root *root,
139 struct btrfs_log_ctx *ctx)
141 struct btrfs_fs_info *fs_info = root->fs_info;
144 mutex_lock(&root->log_mutex);
146 if (root->log_root) {
147 if (btrfs_need_log_full_commit(trans)) {
152 if (!root->log_start_pid) {
153 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
154 root->log_start_pid = current->pid;
155 } else if (root->log_start_pid != current->pid) {
156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
159 mutex_lock(&fs_info->tree_log_mutex);
160 if (!fs_info->log_root_tree)
161 ret = btrfs_init_log_root_tree(trans, fs_info);
162 mutex_unlock(&fs_info->tree_log_mutex);
166 ret = btrfs_add_log_tree(trans, root);
170 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
171 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
172 root->log_start_pid = current->pid;
175 atomic_inc(&root->log_batch);
176 atomic_inc(&root->log_writers);
177 if (ctx && !ctx->logging_new_name) {
178 int index = root->log_transid % 2;
179 list_add_tail(&ctx->list, &root->log_ctxs[index]);
180 ctx->log_transid = root->log_transid;
184 mutex_unlock(&root->log_mutex);
189 * returns 0 if there was a log transaction running and we were able
190 * to join, or returns -ENOENT if there were not transactions
193 static int join_running_log_trans(struct btrfs_root *root)
197 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
200 mutex_lock(&root->log_mutex);
201 if (root->log_root) {
203 atomic_inc(&root->log_writers);
205 mutex_unlock(&root->log_mutex);
210 * This either makes the current running log transaction wait
211 * until you call btrfs_end_log_trans() or it makes any future
212 * log transactions wait until you call btrfs_end_log_trans()
214 void btrfs_pin_log_trans(struct btrfs_root *root)
216 atomic_inc(&root->log_writers);
220 * indicate we're done making changes to the log tree
221 * and wake up anyone waiting to do a sync
223 void btrfs_end_log_trans(struct btrfs_root *root)
225 if (atomic_dec_and_test(&root->log_writers)) {
226 /* atomic_dec_and_test implies a barrier */
227 cond_wake_up_nomb(&root->log_writer_wait);
231 static int btrfs_write_tree_block(struct extent_buffer *buf)
233 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
234 buf->start + buf->len - 1);
237 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
239 filemap_fdatawait_range(buf->pages[0]->mapping,
240 buf->start, buf->start + buf->len - 1);
244 * the walk control struct is used to pass state down the chain when
245 * processing the log tree. The stage field tells us which part
246 * of the log tree processing we are currently doing. The others
247 * are state fields used for that specific part
249 struct walk_control {
250 /* should we free the extent on disk when done? This is used
251 * at transaction commit time while freeing a log tree
255 /* should we write out the extent buffer? This is used
256 * while flushing the log tree to disk during a sync
260 /* should we wait for the extent buffer io to finish? Also used
261 * while flushing the log tree to disk for a sync
265 /* pin only walk, we record which extents on disk belong to the
270 /* what stage of the replay code we're currently in */
274 * Ignore any items from the inode currently being processed. Needs
275 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
276 * the LOG_WALK_REPLAY_INODES stage.
278 bool ignore_cur_inode;
280 /* the root we are currently replaying */
281 struct btrfs_root *replay_dest;
283 /* the trans handle for the current replay */
284 struct btrfs_trans_handle *trans;
286 /* the function that gets used to process blocks we find in the
287 * tree. Note the extent_buffer might not be up to date when it is
288 * passed in, and it must be checked or read if you need the data
291 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
292 struct walk_control *wc, u64 gen, int level);
296 * process_func used to pin down extents, write them or wait on them
298 static int process_one_buffer(struct btrfs_root *log,
299 struct extent_buffer *eb,
300 struct walk_control *wc, u64 gen, int level)
302 struct btrfs_fs_info *fs_info = log->fs_info;
306 * If this fs is mixed then we need to be able to process the leaves to
307 * pin down any logged extents, so we have to read the block.
309 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
310 ret = btrfs_read_buffer(eb, gen, level, NULL);
316 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
319 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
320 if (wc->pin && btrfs_header_level(eb) == 0)
321 ret = btrfs_exclude_logged_extents(eb);
323 btrfs_write_tree_block(eb);
325 btrfs_wait_tree_block_writeback(eb);
331 * Item overwrite used by replay and tree logging. eb, slot and key all refer
332 * to the src data we are copying out.
334 * root is the tree we are copying into, and path is a scratch
335 * path for use in this function (it should be released on entry and
336 * will be released on exit).
338 * If the key is already in the destination tree the existing item is
339 * overwritten. If the existing item isn't big enough, it is extended.
340 * If it is too large, it is truncated.
342 * If the key isn't in the destination yet, a new item is inserted.
344 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
345 struct btrfs_root *root,
346 struct btrfs_path *path,
347 struct extent_buffer *eb, int slot,
348 struct btrfs_key *key)
352 u64 saved_i_size = 0;
353 int save_old_i_size = 0;
354 unsigned long src_ptr;
355 unsigned long dst_ptr;
356 int overwrite_root = 0;
357 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
359 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
362 item_size = btrfs_item_size_nr(eb, slot);
363 src_ptr = btrfs_item_ptr_offset(eb, slot);
365 /* look for the key in the destination tree */
366 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
373 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
375 if (dst_size != item_size)
378 if (item_size == 0) {
379 btrfs_release_path(path);
382 dst_copy = kmalloc(item_size, GFP_NOFS);
383 src_copy = kmalloc(item_size, GFP_NOFS);
384 if (!dst_copy || !src_copy) {
385 btrfs_release_path(path);
391 read_extent_buffer(eb, src_copy, src_ptr, item_size);
393 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
394 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
396 ret = memcmp(dst_copy, src_copy, item_size);
401 * they have the same contents, just return, this saves
402 * us from cowing blocks in the destination tree and doing
403 * extra writes that may not have been done by a previous
407 btrfs_release_path(path);
412 * We need to load the old nbytes into the inode so when we
413 * replay the extents we've logged we get the right nbytes.
416 struct btrfs_inode_item *item;
420 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
421 struct btrfs_inode_item);
422 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
423 item = btrfs_item_ptr(eb, slot,
424 struct btrfs_inode_item);
425 btrfs_set_inode_nbytes(eb, item, nbytes);
428 * If this is a directory we need to reset the i_size to
429 * 0 so that we can set it up properly when replaying
430 * the rest of the items in this log.
432 mode = btrfs_inode_mode(eb, item);
434 btrfs_set_inode_size(eb, item, 0);
436 } else if (inode_item) {
437 struct btrfs_inode_item *item;
441 * New inode, set nbytes to 0 so that the nbytes comes out
442 * properly when we replay the extents.
444 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
445 btrfs_set_inode_nbytes(eb, item, 0);
448 * If this is a directory we need to reset the i_size to 0 so
449 * that we can set it up properly when replaying the rest of
450 * the items in this log.
452 mode = btrfs_inode_mode(eb, item);
454 btrfs_set_inode_size(eb, item, 0);
457 btrfs_release_path(path);
458 /* try to insert the key into the destination tree */
459 path->skip_release_on_error = 1;
460 ret = btrfs_insert_empty_item(trans, root, path,
462 path->skip_release_on_error = 0;
464 /* make sure any existing item is the correct size */
465 if (ret == -EEXIST || ret == -EOVERFLOW) {
467 found_size = btrfs_item_size_nr(path->nodes[0],
469 if (found_size > item_size)
470 btrfs_truncate_item(path, item_size, 1);
471 else if (found_size < item_size)
472 btrfs_extend_item(path, item_size - found_size);
476 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
479 /* don't overwrite an existing inode if the generation number
480 * was logged as zero. This is done when the tree logging code
481 * is just logging an inode to make sure it exists after recovery.
483 * Also, don't overwrite i_size on directories during replay.
484 * log replay inserts and removes directory items based on the
485 * state of the tree found in the subvolume, and i_size is modified
488 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
489 struct btrfs_inode_item *src_item;
490 struct btrfs_inode_item *dst_item;
492 src_item = (struct btrfs_inode_item *)src_ptr;
493 dst_item = (struct btrfs_inode_item *)dst_ptr;
495 if (btrfs_inode_generation(eb, src_item) == 0) {
496 struct extent_buffer *dst_eb = path->nodes[0];
497 const u64 ino_size = btrfs_inode_size(eb, src_item);
500 * For regular files an ino_size == 0 is used only when
501 * logging that an inode exists, as part of a directory
502 * fsync, and the inode wasn't fsynced before. In this
503 * case don't set the size of the inode in the fs/subvol
504 * tree, otherwise we would be throwing valid data away.
506 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
507 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
509 btrfs_set_inode_size(dst_eb, dst_item, ino_size);
513 if (overwrite_root &&
514 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
515 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
517 saved_i_size = btrfs_inode_size(path->nodes[0],
522 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
525 if (save_old_i_size) {
526 struct btrfs_inode_item *dst_item;
527 dst_item = (struct btrfs_inode_item *)dst_ptr;
528 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
531 /* make sure the generation is filled in */
532 if (key->type == BTRFS_INODE_ITEM_KEY) {
533 struct btrfs_inode_item *dst_item;
534 dst_item = (struct btrfs_inode_item *)dst_ptr;
535 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
536 btrfs_set_inode_generation(path->nodes[0], dst_item,
541 btrfs_mark_buffer_dirty(path->nodes[0]);
542 btrfs_release_path(path);
547 * simple helper to read an inode off the disk from a given root
548 * This can only be called for subvolume roots and not for the log
550 static noinline struct inode *read_one_inode(struct btrfs_root *root,
555 inode = btrfs_iget(root->fs_info->sb, objectid, root);
561 /* replays a single extent in 'eb' at 'slot' with 'key' into the
562 * subvolume 'root'. path is released on entry and should be released
565 * extents in the log tree have not been allocated out of the extent
566 * tree yet. So, this completes the allocation, taking a reference
567 * as required if the extent already exists or creating a new extent
568 * if it isn't in the extent allocation tree yet.
570 * The extent is inserted into the file, dropping any existing extents
571 * from the file that overlap the new one.
573 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
574 struct btrfs_root *root,
575 struct btrfs_path *path,
576 struct extent_buffer *eb, int slot,
577 struct btrfs_key *key)
579 struct btrfs_drop_extents_args drop_args = { 0 };
580 struct btrfs_fs_info *fs_info = root->fs_info;
583 u64 start = key->offset;
585 struct btrfs_file_extent_item *item;
586 struct inode *inode = NULL;
590 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
591 found_type = btrfs_file_extent_type(eb, item);
593 if (found_type == BTRFS_FILE_EXTENT_REG ||
594 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
595 nbytes = btrfs_file_extent_num_bytes(eb, item);
596 extent_end = start + nbytes;
599 * We don't add to the inodes nbytes if we are prealloc or a
602 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
604 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
605 size = btrfs_file_extent_ram_bytes(eb, item);
606 nbytes = btrfs_file_extent_ram_bytes(eb, item);
607 extent_end = ALIGN(start + size,
608 fs_info->sectorsize);
614 inode = read_one_inode(root, key->objectid);
621 * first check to see if we already have this extent in the
622 * file. This must be done before the btrfs_drop_extents run
623 * so we don't try to drop this extent.
625 ret = btrfs_lookup_file_extent(trans, root, path,
626 btrfs_ino(BTRFS_I(inode)), start, 0);
629 (found_type == BTRFS_FILE_EXTENT_REG ||
630 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
631 struct btrfs_file_extent_item cmp1;
632 struct btrfs_file_extent_item cmp2;
633 struct btrfs_file_extent_item *existing;
634 struct extent_buffer *leaf;
636 leaf = path->nodes[0];
637 existing = btrfs_item_ptr(leaf, path->slots[0],
638 struct btrfs_file_extent_item);
640 read_extent_buffer(eb, &cmp1, (unsigned long)item,
642 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
646 * we already have a pointer to this exact extent,
647 * we don't have to do anything
649 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
650 btrfs_release_path(path);
654 btrfs_release_path(path);
656 /* drop any overlapping extents */
657 drop_args.start = start;
658 drop_args.end = extent_end;
659 drop_args.drop_cache = true;
660 ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
664 if (found_type == BTRFS_FILE_EXTENT_REG ||
665 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
667 unsigned long dest_offset;
668 struct btrfs_key ins;
670 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
671 btrfs_fs_incompat(fs_info, NO_HOLES))
674 ret = btrfs_insert_empty_item(trans, root, path, key,
678 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
680 copy_extent_buffer(path->nodes[0], eb, dest_offset,
681 (unsigned long)item, sizeof(*item));
683 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
684 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
685 ins.type = BTRFS_EXTENT_ITEM_KEY;
686 offset = key->offset - btrfs_file_extent_offset(eb, item);
689 * Manually record dirty extent, as here we did a shallow
690 * file extent item copy and skip normal backref update,
691 * but modifying extent tree all by ourselves.
692 * So need to manually record dirty extent for qgroup,
693 * as the owner of the file extent changed from log tree
694 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
696 ret = btrfs_qgroup_trace_extent(trans,
697 btrfs_file_extent_disk_bytenr(eb, item),
698 btrfs_file_extent_disk_num_bytes(eb, item),
703 if (ins.objectid > 0) {
704 struct btrfs_ref ref = { 0 };
707 LIST_HEAD(ordered_sums);
710 * is this extent already allocated in the extent
711 * allocation tree? If so, just add a reference
713 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
716 btrfs_init_generic_ref(&ref,
717 BTRFS_ADD_DELAYED_REF,
718 ins.objectid, ins.offset, 0);
719 btrfs_init_data_ref(&ref,
720 root->root_key.objectid,
721 key->objectid, offset);
722 ret = btrfs_inc_extent_ref(trans, &ref);
727 * insert the extent pointer in the extent
730 ret = btrfs_alloc_logged_file_extent(trans,
731 root->root_key.objectid,
732 key->objectid, offset, &ins);
736 btrfs_release_path(path);
738 if (btrfs_file_extent_compression(eb, item)) {
739 csum_start = ins.objectid;
740 csum_end = csum_start + ins.offset;
742 csum_start = ins.objectid +
743 btrfs_file_extent_offset(eb, item);
744 csum_end = csum_start +
745 btrfs_file_extent_num_bytes(eb, item);
748 ret = btrfs_lookup_csums_range(root->log_root,
749 csum_start, csum_end - 1,
754 * Now delete all existing cums in the csum root that
755 * cover our range. We do this because we can have an
756 * extent that is completely referenced by one file
757 * extent item and partially referenced by another
758 * file extent item (like after using the clone or
759 * extent_same ioctls). In this case if we end up doing
760 * the replay of the one that partially references the
761 * extent first, and we do not do the csum deletion
762 * below, we can get 2 csum items in the csum tree that
763 * overlap each other. For example, imagine our log has
764 * the two following file extent items:
766 * key (257 EXTENT_DATA 409600)
767 * extent data disk byte 12845056 nr 102400
768 * extent data offset 20480 nr 20480 ram 102400
770 * key (257 EXTENT_DATA 819200)
771 * extent data disk byte 12845056 nr 102400
772 * extent data offset 0 nr 102400 ram 102400
774 * Where the second one fully references the 100K extent
775 * that starts at disk byte 12845056, and the log tree
776 * has a single csum item that covers the entire range
779 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
781 * After the first file extent item is replayed, the
782 * csum tree gets the following csum item:
784 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
786 * Which covers the 20K sub-range starting at offset 20K
787 * of our extent. Now when we replay the second file
788 * extent item, if we do not delete existing csum items
789 * that cover any of its blocks, we end up getting two
790 * csum items in our csum tree that overlap each other:
792 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
793 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
795 * Which is a problem, because after this anyone trying
796 * to lookup up for the checksum of any block of our
797 * extent starting at an offset of 40K or higher, will
798 * end up looking at the second csum item only, which
799 * does not contain the checksum for any block starting
800 * at offset 40K or higher of our extent.
802 while (!list_empty(&ordered_sums)) {
803 struct btrfs_ordered_sum *sums;
804 sums = list_entry(ordered_sums.next,
805 struct btrfs_ordered_sum,
808 ret = btrfs_del_csums(trans,
813 ret = btrfs_csum_file_blocks(trans,
814 fs_info->csum_root, sums);
815 list_del(&sums->list);
821 btrfs_release_path(path);
823 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
824 /* inline extents are easy, we just overwrite them */
825 ret = overwrite_item(trans, root, path, eb, slot, key);
830 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
836 btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
837 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
845 * when cleaning up conflicts between the directory names in the
846 * subvolume, directory names in the log and directory names in the
847 * inode back references, we may have to unlink inodes from directories.
849 * This is a helper function to do the unlink of a specific directory
852 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
853 struct btrfs_root *root,
854 struct btrfs_path *path,
855 struct btrfs_inode *dir,
856 struct btrfs_dir_item *di)
861 struct extent_buffer *leaf;
862 struct btrfs_key location;
865 leaf = path->nodes[0];
867 btrfs_dir_item_key_to_cpu(leaf, di, &location);
868 name_len = btrfs_dir_name_len(leaf, di);
869 name = kmalloc(name_len, GFP_NOFS);
873 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
874 btrfs_release_path(path);
876 inode = read_one_inode(root, location.objectid);
882 ret = link_to_fixup_dir(trans, root, path, location.objectid);
886 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
891 ret = btrfs_run_delayed_items(trans);
899 * helper function to see if a given name and sequence number found
900 * in an inode back reference are already in a directory and correctly
901 * point to this inode
903 static noinline int inode_in_dir(struct btrfs_root *root,
904 struct btrfs_path *path,
905 u64 dirid, u64 objectid, u64 index,
906 const char *name, int name_len)
908 struct btrfs_dir_item *di;
909 struct btrfs_key location;
912 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
913 index, name, name_len, 0);
914 if (di && !IS_ERR(di)) {
915 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
916 if (location.objectid != objectid)
920 btrfs_release_path(path);
922 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
923 if (di && !IS_ERR(di)) {
924 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
925 if (location.objectid != objectid)
931 btrfs_release_path(path);
936 * helper function to check a log tree for a named back reference in
937 * an inode. This is used to decide if a back reference that is
938 * found in the subvolume conflicts with what we find in the log.
940 * inode backreferences may have multiple refs in a single item,
941 * during replay we process one reference at a time, and we don't
942 * want to delete valid links to a file from the subvolume if that
943 * link is also in the log.
945 static noinline int backref_in_log(struct btrfs_root *log,
946 struct btrfs_key *key,
948 const char *name, int namelen)
950 struct btrfs_path *path;
953 path = btrfs_alloc_path();
957 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
960 } else if (ret == 1) {
965 if (key->type == BTRFS_INODE_EXTREF_KEY)
966 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
971 ret = !!btrfs_find_name_in_backref(path->nodes[0],
975 btrfs_free_path(path);
979 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
980 struct btrfs_root *root,
981 struct btrfs_path *path,
982 struct btrfs_root *log_root,
983 struct btrfs_inode *dir,
984 struct btrfs_inode *inode,
985 u64 inode_objectid, u64 parent_objectid,
986 u64 ref_index, char *name, int namelen,
992 struct extent_buffer *leaf;
993 struct btrfs_dir_item *di;
994 struct btrfs_key search_key;
995 struct btrfs_inode_extref *extref;
998 /* Search old style refs */
999 search_key.objectid = inode_objectid;
1000 search_key.type = BTRFS_INODE_REF_KEY;
1001 search_key.offset = parent_objectid;
1002 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1004 struct btrfs_inode_ref *victim_ref;
1006 unsigned long ptr_end;
1008 leaf = path->nodes[0];
1010 /* are we trying to overwrite a back ref for the root directory
1011 * if so, just jump out, we're done
1013 if (search_key.objectid == search_key.offset)
1016 /* check all the names in this back reference to see
1017 * if they are in the log. if so, we allow them to stay
1018 * otherwise they must be unlinked as a conflict
1020 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1021 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1022 while (ptr < ptr_end) {
1023 victim_ref = (struct btrfs_inode_ref *)ptr;
1024 victim_name_len = btrfs_inode_ref_name_len(leaf,
1026 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1030 read_extent_buffer(leaf, victim_name,
1031 (unsigned long)(victim_ref + 1),
1034 ret = backref_in_log(log_root, &search_key,
1035 parent_objectid, victim_name,
1041 inc_nlink(&inode->vfs_inode);
1042 btrfs_release_path(path);
1044 ret = btrfs_unlink_inode(trans, root, dir, inode,
1045 victim_name, victim_name_len);
1049 ret = btrfs_run_delayed_items(trans);
1057 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1061 * NOTE: we have searched root tree and checked the
1062 * corresponding ref, it does not need to check again.
1066 btrfs_release_path(path);
1068 /* Same search but for extended refs */
1069 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1070 inode_objectid, parent_objectid, 0,
1072 if (!IS_ERR_OR_NULL(extref)) {
1076 struct inode *victim_parent;
1078 leaf = path->nodes[0];
1080 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1081 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1083 while (cur_offset < item_size) {
1084 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1086 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1088 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1091 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1094 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1097 search_key.objectid = inode_objectid;
1098 search_key.type = BTRFS_INODE_EXTREF_KEY;
1099 search_key.offset = btrfs_extref_hash(parent_objectid,
1102 ret = backref_in_log(log_root, &search_key,
1103 parent_objectid, victim_name,
1109 victim_parent = read_one_inode(root,
1111 if (victim_parent) {
1112 inc_nlink(&inode->vfs_inode);
1113 btrfs_release_path(path);
1115 ret = btrfs_unlink_inode(trans, root,
1116 BTRFS_I(victim_parent),
1121 ret = btrfs_run_delayed_items(
1124 iput(victim_parent);
1133 cur_offset += victim_name_len + sizeof(*extref);
1137 btrfs_release_path(path);
1139 /* look for a conflicting sequence number */
1140 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1141 ref_index, name, namelen, 0);
1142 if (di && !IS_ERR(di)) {
1143 ret = drop_one_dir_item(trans, root, path, dir, di);
1147 btrfs_release_path(path);
1149 /* look for a conflicting name */
1150 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1152 if (di && !IS_ERR(di)) {
1153 ret = drop_one_dir_item(trans, root, path, dir, di);
1157 btrfs_release_path(path);
1162 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1163 u32 *namelen, char **name, u64 *index,
1164 u64 *parent_objectid)
1166 struct btrfs_inode_extref *extref;
1168 extref = (struct btrfs_inode_extref *)ref_ptr;
1170 *namelen = btrfs_inode_extref_name_len(eb, extref);
1171 *name = kmalloc(*namelen, GFP_NOFS);
1175 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1179 *index = btrfs_inode_extref_index(eb, extref);
1180 if (parent_objectid)
1181 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1186 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1187 u32 *namelen, char **name, u64 *index)
1189 struct btrfs_inode_ref *ref;
1191 ref = (struct btrfs_inode_ref *)ref_ptr;
1193 *namelen = btrfs_inode_ref_name_len(eb, ref);
1194 *name = kmalloc(*namelen, GFP_NOFS);
1198 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1201 *index = btrfs_inode_ref_index(eb, ref);
1207 * Take an inode reference item from the log tree and iterate all names from the
1208 * inode reference item in the subvolume tree with the same key (if it exists).
1209 * For any name that is not in the inode reference item from the log tree, do a
1210 * proper unlink of that name (that is, remove its entry from the inode
1211 * reference item and both dir index keys).
1213 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1214 struct btrfs_root *root,
1215 struct btrfs_path *path,
1216 struct btrfs_inode *inode,
1217 struct extent_buffer *log_eb,
1219 struct btrfs_key *key)
1222 unsigned long ref_ptr;
1223 unsigned long ref_end;
1224 struct extent_buffer *eb;
1227 btrfs_release_path(path);
1228 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1236 eb = path->nodes[0];
1237 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1238 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1239 while (ref_ptr < ref_end) {
1244 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1245 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1248 parent_id = key->offset;
1249 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1255 if (key->type == BTRFS_INODE_EXTREF_KEY)
1256 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1260 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1266 btrfs_release_path(path);
1267 dir = read_one_inode(root, parent_id);
1273 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1274 inode, name, namelen);
1284 if (key->type == BTRFS_INODE_EXTREF_KEY)
1285 ref_ptr += sizeof(struct btrfs_inode_extref);
1287 ref_ptr += sizeof(struct btrfs_inode_ref);
1291 btrfs_release_path(path);
1295 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1296 const u8 ref_type, const char *name,
1299 struct btrfs_key key;
1300 struct btrfs_path *path;
1301 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1304 path = btrfs_alloc_path();
1308 key.objectid = btrfs_ino(BTRFS_I(inode));
1309 key.type = ref_type;
1310 if (key.type == BTRFS_INODE_REF_KEY)
1311 key.offset = parent_id;
1313 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1315 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1322 if (key.type == BTRFS_INODE_EXTREF_KEY)
1323 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1324 path->slots[0], parent_id, name, namelen);
1326 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1330 btrfs_free_path(path);
1334 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1335 struct inode *dir, struct inode *inode, const char *name,
1336 int namelen, u64 ref_index)
1338 struct btrfs_dir_item *dir_item;
1339 struct btrfs_key key;
1340 struct btrfs_path *path;
1341 struct inode *other_inode = NULL;
1344 path = btrfs_alloc_path();
1348 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1349 btrfs_ino(BTRFS_I(dir)),
1352 btrfs_release_path(path);
1354 } else if (IS_ERR(dir_item)) {
1355 ret = PTR_ERR(dir_item);
1360 * Our inode's dentry collides with the dentry of another inode which is
1361 * in the log but not yet processed since it has a higher inode number.
1362 * So delete that other dentry.
1364 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1365 btrfs_release_path(path);
1366 other_inode = read_one_inode(root, key.objectid);
1371 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1376 * If we dropped the link count to 0, bump it so that later the iput()
1377 * on the inode will not free it. We will fixup the link count later.
1379 if (other_inode->i_nlink == 0)
1380 inc_nlink(other_inode);
1382 ret = btrfs_run_delayed_items(trans);
1386 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1387 name, namelen, 0, ref_index);
1390 btrfs_free_path(path);
1396 * replay one inode back reference item found in the log tree.
1397 * eb, slot and key refer to the buffer and key found in the log tree.
1398 * root is the destination we are replaying into, and path is for temp
1399 * use by this function. (it should be released on return).
1401 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1402 struct btrfs_root *root,
1403 struct btrfs_root *log,
1404 struct btrfs_path *path,
1405 struct extent_buffer *eb, int slot,
1406 struct btrfs_key *key)
1408 struct inode *dir = NULL;
1409 struct inode *inode = NULL;
1410 unsigned long ref_ptr;
1411 unsigned long ref_end;
1415 int search_done = 0;
1416 int log_ref_ver = 0;
1417 u64 parent_objectid;
1420 int ref_struct_size;
1422 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1423 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1425 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1426 struct btrfs_inode_extref *r;
1428 ref_struct_size = sizeof(struct btrfs_inode_extref);
1430 r = (struct btrfs_inode_extref *)ref_ptr;
1431 parent_objectid = btrfs_inode_extref_parent(eb, r);
1433 ref_struct_size = sizeof(struct btrfs_inode_ref);
1434 parent_objectid = key->offset;
1436 inode_objectid = key->objectid;
1439 * it is possible that we didn't log all the parent directories
1440 * for a given inode. If we don't find the dir, just don't
1441 * copy the back ref in. The link count fixup code will take
1444 dir = read_one_inode(root, parent_objectid);
1450 inode = read_one_inode(root, inode_objectid);
1456 while (ref_ptr < ref_end) {
1458 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1459 &ref_index, &parent_objectid);
1461 * parent object can change from one array
1465 dir = read_one_inode(root, parent_objectid);
1471 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1477 /* if we already have a perfect match, we're done */
1478 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1479 btrfs_ino(BTRFS_I(inode)), ref_index,
1482 * look for a conflicting back reference in the
1483 * metadata. if we find one we have to unlink that name
1484 * of the file before we add our new link. Later on, we
1485 * overwrite any existing back reference, and we don't
1486 * want to create dangling pointers in the directory.
1490 ret = __add_inode_ref(trans, root, path, log,
1495 ref_index, name, namelen,
1505 * If a reference item already exists for this inode
1506 * with the same parent and name, but different index,
1507 * drop it and the corresponding directory index entries
1508 * from the parent before adding the new reference item
1509 * and dir index entries, otherwise we would fail with
1510 * -EEXIST returned from btrfs_add_link() below.
1512 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1515 ret = btrfs_unlink_inode(trans, root,
1520 * If we dropped the link count to 0, bump it so
1521 * that later the iput() on the inode will not
1522 * free it. We will fixup the link count later.
1524 if (!ret && inode->i_nlink == 0)
1530 /* insert our name */
1531 ret = add_link(trans, root, dir, inode, name, namelen,
1536 btrfs_update_inode(trans, root, BTRFS_I(inode));
1539 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1549 * Before we overwrite the inode reference item in the subvolume tree
1550 * with the item from the log tree, we must unlink all names from the
1551 * parent directory that are in the subvolume's tree inode reference
1552 * item, otherwise we end up with an inconsistent subvolume tree where
1553 * dir index entries exist for a name but there is no inode reference
1554 * item with the same name.
1556 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1561 /* finally write the back reference in the inode */
1562 ret = overwrite_item(trans, root, path, eb, slot, key);
1564 btrfs_release_path(path);
1571 static int count_inode_extrefs(struct btrfs_root *root,
1572 struct btrfs_inode *inode, struct btrfs_path *path)
1576 unsigned int nlink = 0;
1579 u64 inode_objectid = btrfs_ino(inode);
1582 struct btrfs_inode_extref *extref;
1583 struct extent_buffer *leaf;
1586 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1591 leaf = path->nodes[0];
1592 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1593 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1596 while (cur_offset < item_size) {
1597 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1598 name_len = btrfs_inode_extref_name_len(leaf, extref);
1602 cur_offset += name_len + sizeof(*extref);
1606 btrfs_release_path(path);
1608 btrfs_release_path(path);
1610 if (ret < 0 && ret != -ENOENT)
1615 static int count_inode_refs(struct btrfs_root *root,
1616 struct btrfs_inode *inode, struct btrfs_path *path)
1619 struct btrfs_key key;
1620 unsigned int nlink = 0;
1622 unsigned long ptr_end;
1624 u64 ino = btrfs_ino(inode);
1627 key.type = BTRFS_INODE_REF_KEY;
1628 key.offset = (u64)-1;
1631 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1635 if (path->slots[0] == 0)
1640 btrfs_item_key_to_cpu(path->nodes[0], &key,
1642 if (key.objectid != ino ||
1643 key.type != BTRFS_INODE_REF_KEY)
1645 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1646 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1648 while (ptr < ptr_end) {
1649 struct btrfs_inode_ref *ref;
1651 ref = (struct btrfs_inode_ref *)ptr;
1652 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1654 ptr = (unsigned long)(ref + 1) + name_len;
1658 if (key.offset == 0)
1660 if (path->slots[0] > 0) {
1665 btrfs_release_path(path);
1667 btrfs_release_path(path);
1673 * There are a few corners where the link count of the file can't
1674 * be properly maintained during replay. So, instead of adding
1675 * lots of complexity to the log code, we just scan the backrefs
1676 * for any file that has been through replay.
1678 * The scan will update the link count on the inode to reflect the
1679 * number of back refs found. If it goes down to zero, the iput
1680 * will free the inode.
1682 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1683 struct btrfs_root *root,
1684 struct inode *inode)
1686 struct btrfs_path *path;
1689 u64 ino = btrfs_ino(BTRFS_I(inode));
1691 path = btrfs_alloc_path();
1695 ret = count_inode_refs(root, BTRFS_I(inode), path);
1701 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1709 if (nlink != inode->i_nlink) {
1710 set_nlink(inode, nlink);
1711 btrfs_update_inode(trans, root, BTRFS_I(inode));
1713 BTRFS_I(inode)->index_cnt = (u64)-1;
1715 if (inode->i_nlink == 0) {
1716 if (S_ISDIR(inode->i_mode)) {
1717 ret = replay_dir_deletes(trans, root, NULL, path,
1722 ret = btrfs_insert_orphan_item(trans, root, ino);
1728 btrfs_free_path(path);
1732 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1733 struct btrfs_root *root,
1734 struct btrfs_path *path)
1737 struct btrfs_key key;
1738 struct inode *inode;
1740 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1741 key.type = BTRFS_ORPHAN_ITEM_KEY;
1742 key.offset = (u64)-1;
1744 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1749 if (path->slots[0] == 0)
1754 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1755 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1756 key.type != BTRFS_ORPHAN_ITEM_KEY)
1759 ret = btrfs_del_item(trans, root, path);
1763 btrfs_release_path(path);
1764 inode = read_one_inode(root, key.offset);
1768 ret = fixup_inode_link_count(trans, root, inode);
1774 * fixup on a directory may create new entries,
1775 * make sure we always look for the highset possible
1778 key.offset = (u64)-1;
1782 btrfs_release_path(path);
1788 * record a given inode in the fixup dir so we can check its link
1789 * count when replay is done. The link count is incremented here
1790 * so the inode won't go away until we check it
1792 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1793 struct btrfs_root *root,
1794 struct btrfs_path *path,
1797 struct btrfs_key key;
1799 struct inode *inode;
1801 inode = read_one_inode(root, objectid);
1805 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1806 key.type = BTRFS_ORPHAN_ITEM_KEY;
1807 key.offset = objectid;
1809 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1811 btrfs_release_path(path);
1813 if (!inode->i_nlink)
1814 set_nlink(inode, 1);
1817 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1818 } else if (ret == -EEXIST) {
1821 BUG(); /* Logic Error */
1829 * when replaying the log for a directory, we only insert names
1830 * for inodes that actually exist. This means an fsync on a directory
1831 * does not implicitly fsync all the new files in it
1833 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1834 struct btrfs_root *root,
1835 u64 dirid, u64 index,
1836 char *name, int name_len,
1837 struct btrfs_key *location)
1839 struct inode *inode;
1843 inode = read_one_inode(root, location->objectid);
1847 dir = read_one_inode(root, dirid);
1853 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1854 name_len, 1, index);
1856 /* FIXME, put inode into FIXUP list */
1864 * take a single entry in a log directory item and replay it into
1867 * if a conflicting item exists in the subdirectory already,
1868 * the inode it points to is unlinked and put into the link count
1871 * If a name from the log points to a file or directory that does
1872 * not exist in the FS, it is skipped. fsyncs on directories
1873 * do not force down inodes inside that directory, just changes to the
1874 * names or unlinks in a directory.
1876 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1877 * non-existing inode) and 1 if the name was replayed.
1879 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1880 struct btrfs_root *root,
1881 struct btrfs_path *path,
1882 struct extent_buffer *eb,
1883 struct btrfs_dir_item *di,
1884 struct btrfs_key *key)
1888 struct btrfs_dir_item *dst_di;
1889 struct btrfs_key found_key;
1890 struct btrfs_key log_key;
1895 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1896 bool name_added = false;
1898 dir = read_one_inode(root, key->objectid);
1902 name_len = btrfs_dir_name_len(eb, di);
1903 name = kmalloc(name_len, GFP_NOFS);
1909 log_type = btrfs_dir_type(eb, di);
1910 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1913 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1914 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1919 btrfs_release_path(path);
1921 if (key->type == BTRFS_DIR_ITEM_KEY) {
1922 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1924 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1925 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1934 if (IS_ERR_OR_NULL(dst_di)) {
1935 /* we need a sequence number to insert, so we only
1936 * do inserts for the BTRFS_DIR_INDEX_KEY types
1938 if (key->type != BTRFS_DIR_INDEX_KEY)
1943 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1944 /* the existing item matches the logged item */
1945 if (found_key.objectid == log_key.objectid &&
1946 found_key.type == log_key.type &&
1947 found_key.offset == log_key.offset &&
1948 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1949 update_size = false;
1954 * don't drop the conflicting directory entry if the inode
1955 * for the new entry doesn't exist
1960 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1964 if (key->type == BTRFS_DIR_INDEX_KEY)
1967 btrfs_release_path(path);
1968 if (!ret && update_size) {
1969 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
1970 ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
1974 if (!ret && name_added)
1980 * Check if the inode reference exists in the log for the given name,
1981 * inode and parent inode
1983 found_key.objectid = log_key.objectid;
1984 found_key.type = BTRFS_INODE_REF_KEY;
1985 found_key.offset = key->objectid;
1986 ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
1990 /* The dentry will be added later. */
1992 update_size = false;
1996 found_key.objectid = log_key.objectid;
1997 found_key.type = BTRFS_INODE_EXTREF_KEY;
1998 found_key.offset = key->objectid;
1999 ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
2004 /* The dentry will be added later. */
2006 update_size = false;
2009 btrfs_release_path(path);
2010 ret = insert_one_name(trans, root, key->objectid, key->offset,
2011 name, name_len, &log_key);
2012 if (ret && ret != -ENOENT && ret != -EEXIST)
2016 update_size = false;
2022 * find all the names in a directory item and reconcile them into
2023 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2024 * one name in a directory item, but the same code gets used for
2025 * both directory index types
2027 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2028 struct btrfs_root *root,
2029 struct btrfs_path *path,
2030 struct extent_buffer *eb, int slot,
2031 struct btrfs_key *key)
2034 u32 item_size = btrfs_item_size_nr(eb, slot);
2035 struct btrfs_dir_item *di;
2038 unsigned long ptr_end;
2039 struct btrfs_path *fixup_path = NULL;
2041 ptr = btrfs_item_ptr_offset(eb, slot);
2042 ptr_end = ptr + item_size;
2043 while (ptr < ptr_end) {
2044 di = (struct btrfs_dir_item *)ptr;
2045 name_len = btrfs_dir_name_len(eb, di);
2046 ret = replay_one_name(trans, root, path, eb, di, key);
2049 ptr = (unsigned long)(di + 1);
2053 * If this entry refers to a non-directory (directories can not
2054 * have a link count > 1) and it was added in the transaction
2055 * that was not committed, make sure we fixup the link count of
2056 * the inode it the entry points to. Otherwise something like
2057 * the following would result in a directory pointing to an
2058 * inode with a wrong link that does not account for this dir
2066 * ln testdir/bar testdir/bar_link
2067 * ln testdir/foo testdir/foo_link
2068 * xfs_io -c "fsync" testdir/bar
2072 * mount fs, log replay happens
2074 * File foo would remain with a link count of 1 when it has two
2075 * entries pointing to it in the directory testdir. This would
2076 * make it impossible to ever delete the parent directory has
2077 * it would result in stale dentries that can never be deleted.
2079 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2080 struct btrfs_key di_key;
2083 fixup_path = btrfs_alloc_path();
2090 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2091 ret = link_to_fixup_dir(trans, root, fixup_path,
2098 btrfs_free_path(fixup_path);
2103 * directory replay has two parts. There are the standard directory
2104 * items in the log copied from the subvolume, and range items
2105 * created in the log while the subvolume was logged.
2107 * The range items tell us which parts of the key space the log
2108 * is authoritative for. During replay, if a key in the subvolume
2109 * directory is in a logged range item, but not actually in the log
2110 * that means it was deleted from the directory before the fsync
2111 * and should be removed.
2113 static noinline int find_dir_range(struct btrfs_root *root,
2114 struct btrfs_path *path,
2115 u64 dirid, int key_type,
2116 u64 *start_ret, u64 *end_ret)
2118 struct btrfs_key key;
2120 struct btrfs_dir_log_item *item;
2124 if (*start_ret == (u64)-1)
2127 key.objectid = dirid;
2128 key.type = key_type;
2129 key.offset = *start_ret;
2131 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2135 if (path->slots[0] == 0)
2140 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2142 if (key.type != key_type || key.objectid != dirid) {
2146 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2147 struct btrfs_dir_log_item);
2148 found_end = btrfs_dir_log_end(path->nodes[0], item);
2150 if (*start_ret >= key.offset && *start_ret <= found_end) {
2152 *start_ret = key.offset;
2153 *end_ret = found_end;
2158 /* check the next slot in the tree to see if it is a valid item */
2159 nritems = btrfs_header_nritems(path->nodes[0]);
2161 if (path->slots[0] >= nritems) {
2162 ret = btrfs_next_leaf(root, path);
2167 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2169 if (key.type != key_type || key.objectid != dirid) {
2173 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2174 struct btrfs_dir_log_item);
2175 found_end = btrfs_dir_log_end(path->nodes[0], item);
2176 *start_ret = key.offset;
2177 *end_ret = found_end;
2180 btrfs_release_path(path);
2185 * this looks for a given directory item in the log. If the directory
2186 * item is not in the log, the item is removed and the inode it points
2189 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2190 struct btrfs_root *root,
2191 struct btrfs_root *log,
2192 struct btrfs_path *path,
2193 struct btrfs_path *log_path,
2195 struct btrfs_key *dir_key)
2198 struct extent_buffer *eb;
2201 struct btrfs_dir_item *di;
2202 struct btrfs_dir_item *log_di;
2205 unsigned long ptr_end;
2207 struct inode *inode;
2208 struct btrfs_key location;
2211 eb = path->nodes[0];
2212 slot = path->slots[0];
2213 item_size = btrfs_item_size_nr(eb, slot);
2214 ptr = btrfs_item_ptr_offset(eb, slot);
2215 ptr_end = ptr + item_size;
2216 while (ptr < ptr_end) {
2217 di = (struct btrfs_dir_item *)ptr;
2218 name_len = btrfs_dir_name_len(eb, di);
2219 name = kmalloc(name_len, GFP_NOFS);
2224 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2227 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2228 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2231 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2232 log_di = btrfs_lookup_dir_index_item(trans, log,
2238 if (!log_di || log_di == ERR_PTR(-ENOENT)) {
2239 btrfs_dir_item_key_to_cpu(eb, di, &location);
2240 btrfs_release_path(path);
2241 btrfs_release_path(log_path);
2242 inode = read_one_inode(root, location.objectid);
2248 ret = link_to_fixup_dir(trans, root,
2249 path, location.objectid);
2257 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2258 BTRFS_I(inode), name, name_len);
2260 ret = btrfs_run_delayed_items(trans);
2266 /* there might still be more names under this key
2267 * check and repeat if required
2269 ret = btrfs_search_slot(NULL, root, dir_key, path,
2275 } else if (IS_ERR(log_di)) {
2277 return PTR_ERR(log_di);
2279 btrfs_release_path(log_path);
2282 ptr = (unsigned long)(di + 1);
2287 btrfs_release_path(path);
2288 btrfs_release_path(log_path);
2292 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2293 struct btrfs_root *root,
2294 struct btrfs_root *log,
2295 struct btrfs_path *path,
2298 struct btrfs_key search_key;
2299 struct btrfs_path *log_path;
2304 log_path = btrfs_alloc_path();
2308 search_key.objectid = ino;
2309 search_key.type = BTRFS_XATTR_ITEM_KEY;
2310 search_key.offset = 0;
2312 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2316 nritems = btrfs_header_nritems(path->nodes[0]);
2317 for (i = path->slots[0]; i < nritems; i++) {
2318 struct btrfs_key key;
2319 struct btrfs_dir_item *di;
2320 struct btrfs_dir_item *log_di;
2324 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2325 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2330 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2331 total_size = btrfs_item_size_nr(path->nodes[0], i);
2333 while (cur < total_size) {
2334 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2335 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2336 u32 this_len = sizeof(*di) + name_len + data_len;
2339 name = kmalloc(name_len, GFP_NOFS);
2344 read_extent_buffer(path->nodes[0], name,
2345 (unsigned long)(di + 1), name_len);
2347 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2349 btrfs_release_path(log_path);
2351 /* Doesn't exist in log tree, so delete it. */
2352 btrfs_release_path(path);
2353 di = btrfs_lookup_xattr(trans, root, path, ino,
2354 name, name_len, -1);
2361 ret = btrfs_delete_one_dir_name(trans, root,
2365 btrfs_release_path(path);
2370 if (IS_ERR(log_di)) {
2371 ret = PTR_ERR(log_di);
2375 di = (struct btrfs_dir_item *)((char *)di + this_len);
2378 ret = btrfs_next_leaf(root, path);
2384 btrfs_free_path(log_path);
2385 btrfs_release_path(path);
2391 * deletion replay happens before we copy any new directory items
2392 * out of the log or out of backreferences from inodes. It
2393 * scans the log to find ranges of keys that log is authoritative for,
2394 * and then scans the directory to find items in those ranges that are
2395 * not present in the log.
2397 * Anything we don't find in the log is unlinked and removed from the
2400 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2401 struct btrfs_root *root,
2402 struct btrfs_root *log,
2403 struct btrfs_path *path,
2404 u64 dirid, int del_all)
2408 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2410 struct btrfs_key dir_key;
2411 struct btrfs_key found_key;
2412 struct btrfs_path *log_path;
2415 dir_key.objectid = dirid;
2416 dir_key.type = BTRFS_DIR_ITEM_KEY;
2417 log_path = btrfs_alloc_path();
2421 dir = read_one_inode(root, dirid);
2422 /* it isn't an error if the inode isn't there, that can happen
2423 * because we replay the deletes before we copy in the inode item
2427 btrfs_free_path(log_path);
2435 range_end = (u64)-1;
2437 ret = find_dir_range(log, path, dirid, key_type,
2438 &range_start, &range_end);
2443 dir_key.offset = range_start;
2446 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2451 nritems = btrfs_header_nritems(path->nodes[0]);
2452 if (path->slots[0] >= nritems) {
2453 ret = btrfs_next_leaf(root, path);
2459 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2461 if (found_key.objectid != dirid ||
2462 found_key.type != dir_key.type)
2465 if (found_key.offset > range_end)
2468 ret = check_item_in_log(trans, root, log, path,
2473 if (found_key.offset == (u64)-1)
2475 dir_key.offset = found_key.offset + 1;
2477 btrfs_release_path(path);
2478 if (range_end == (u64)-1)
2480 range_start = range_end + 1;
2485 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2486 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2487 dir_key.type = BTRFS_DIR_INDEX_KEY;
2488 btrfs_release_path(path);
2492 btrfs_release_path(path);
2493 btrfs_free_path(log_path);
2499 * the process_func used to replay items from the log tree. This
2500 * gets called in two different stages. The first stage just looks
2501 * for inodes and makes sure they are all copied into the subvolume.
2503 * The second stage copies all the other item types from the log into
2504 * the subvolume. The two stage approach is slower, but gets rid of
2505 * lots of complexity around inodes referencing other inodes that exist
2506 * only in the log (references come from either directory items or inode
2509 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2510 struct walk_control *wc, u64 gen, int level)
2513 struct btrfs_path *path;
2514 struct btrfs_root *root = wc->replay_dest;
2515 struct btrfs_key key;
2519 ret = btrfs_read_buffer(eb, gen, level, NULL);
2523 level = btrfs_header_level(eb);
2528 path = btrfs_alloc_path();
2532 nritems = btrfs_header_nritems(eb);
2533 for (i = 0; i < nritems; i++) {
2534 btrfs_item_key_to_cpu(eb, &key, i);
2536 /* inode keys are done during the first stage */
2537 if (key.type == BTRFS_INODE_ITEM_KEY &&
2538 wc->stage == LOG_WALK_REPLAY_INODES) {
2539 struct btrfs_inode_item *inode_item;
2542 inode_item = btrfs_item_ptr(eb, i,
2543 struct btrfs_inode_item);
2545 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2546 * and never got linked before the fsync, skip it, as
2547 * replaying it is pointless since it would be deleted
2548 * later. We skip logging tmpfiles, but it's always
2549 * possible we are replaying a log created with a kernel
2550 * that used to log tmpfiles.
2552 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2553 wc->ignore_cur_inode = true;
2556 wc->ignore_cur_inode = false;
2558 ret = replay_xattr_deletes(wc->trans, root, log,
2559 path, key.objectid);
2562 mode = btrfs_inode_mode(eb, inode_item);
2563 if (S_ISDIR(mode)) {
2564 ret = replay_dir_deletes(wc->trans,
2565 root, log, path, key.objectid, 0);
2569 ret = overwrite_item(wc->trans, root, path,
2575 * Before replaying extents, truncate the inode to its
2576 * size. We need to do it now and not after log replay
2577 * because before an fsync we can have prealloc extents
2578 * added beyond the inode's i_size. If we did it after,
2579 * through orphan cleanup for example, we would drop
2580 * those prealloc extents just after replaying them.
2582 if (S_ISREG(mode)) {
2583 struct btrfs_drop_extents_args drop_args = { 0 };
2584 struct inode *inode;
2587 inode = read_one_inode(root, key.objectid);
2592 from = ALIGN(i_size_read(inode),
2593 root->fs_info->sectorsize);
2594 drop_args.start = from;
2595 drop_args.end = (u64)-1;
2596 drop_args.drop_cache = true;
2597 ret = btrfs_drop_extents(wc->trans, root,
2601 inode_sub_bytes(inode,
2602 drop_args.bytes_found);
2603 /* Update the inode's nbytes. */
2604 ret = btrfs_update_inode(wc->trans,
2605 root, BTRFS_I(inode));
2612 ret = link_to_fixup_dir(wc->trans, root,
2613 path, key.objectid);
2618 if (wc->ignore_cur_inode)
2621 if (key.type == BTRFS_DIR_INDEX_KEY &&
2622 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2623 ret = replay_one_dir_item(wc->trans, root, path,
2629 if (wc->stage < LOG_WALK_REPLAY_ALL)
2632 /* these keys are simply copied */
2633 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2634 ret = overwrite_item(wc->trans, root, path,
2638 } else if (key.type == BTRFS_INODE_REF_KEY ||
2639 key.type == BTRFS_INODE_EXTREF_KEY) {
2640 ret = add_inode_ref(wc->trans, root, log, path,
2642 if (ret && ret != -ENOENT)
2645 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2646 ret = replay_one_extent(wc->trans, root, path,
2650 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2651 ret = replay_one_dir_item(wc->trans, root, path,
2657 btrfs_free_path(path);
2662 * Correctly adjust the reserved bytes occupied by a log tree extent buffer
2664 static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
2666 struct btrfs_block_group *cache;
2668 cache = btrfs_lookup_block_group(fs_info, start);
2670 btrfs_err(fs_info, "unable to find block group for %llu", start);
2674 spin_lock(&cache->space_info->lock);
2675 spin_lock(&cache->lock);
2676 cache->reserved -= fs_info->nodesize;
2677 cache->space_info->bytes_reserved -= fs_info->nodesize;
2678 spin_unlock(&cache->lock);
2679 spin_unlock(&cache->space_info->lock);
2681 btrfs_put_block_group(cache);
2684 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2685 struct btrfs_root *root,
2686 struct btrfs_path *path, int *level,
2687 struct walk_control *wc)
2689 struct btrfs_fs_info *fs_info = root->fs_info;
2692 struct extent_buffer *next;
2693 struct extent_buffer *cur;
2697 while (*level > 0) {
2698 struct btrfs_key first_key;
2700 cur = path->nodes[*level];
2702 WARN_ON(btrfs_header_level(cur) != *level);
2704 if (path->slots[*level] >=
2705 btrfs_header_nritems(cur))
2708 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2709 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2710 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2711 blocksize = fs_info->nodesize;
2713 next = btrfs_find_create_tree_block(fs_info, bytenr,
2714 btrfs_header_owner(cur),
2717 return PTR_ERR(next);
2720 ret = wc->process_func(root, next, wc, ptr_gen,
2723 free_extent_buffer(next);
2727 path->slots[*level]++;
2729 ret = btrfs_read_buffer(next, ptr_gen,
2730 *level - 1, &first_key);
2732 free_extent_buffer(next);
2737 btrfs_tree_lock(next);
2738 btrfs_clean_tree_block(next);
2739 btrfs_wait_tree_block_writeback(next);
2740 btrfs_tree_unlock(next);
2741 ret = btrfs_pin_reserved_extent(trans,
2744 free_extent_buffer(next);
2748 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2749 clear_extent_buffer_dirty(next);
2750 unaccount_log_buffer(fs_info, bytenr);
2753 free_extent_buffer(next);
2756 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2758 free_extent_buffer(next);
2762 if (path->nodes[*level-1])
2763 free_extent_buffer(path->nodes[*level-1]);
2764 path->nodes[*level-1] = next;
2765 *level = btrfs_header_level(next);
2766 path->slots[*level] = 0;
2769 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2775 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2776 struct btrfs_root *root,
2777 struct btrfs_path *path, int *level,
2778 struct walk_control *wc)
2780 struct btrfs_fs_info *fs_info = root->fs_info;
2785 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2786 slot = path->slots[i];
2787 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2790 WARN_ON(*level == 0);
2793 ret = wc->process_func(root, path->nodes[*level], wc,
2794 btrfs_header_generation(path->nodes[*level]),
2800 struct extent_buffer *next;
2802 next = path->nodes[*level];
2805 btrfs_tree_lock(next);
2806 btrfs_clean_tree_block(next);
2807 btrfs_wait_tree_block_writeback(next);
2808 btrfs_tree_unlock(next);
2809 ret = btrfs_pin_reserved_extent(trans,
2810 path->nodes[*level]->start,
2811 path->nodes[*level]->len);
2815 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2816 clear_extent_buffer_dirty(next);
2818 unaccount_log_buffer(fs_info,
2819 path->nodes[*level]->start);
2822 free_extent_buffer(path->nodes[*level]);
2823 path->nodes[*level] = NULL;
2831 * drop the reference count on the tree rooted at 'snap'. This traverses
2832 * the tree freeing any blocks that have a ref count of zero after being
2835 static int walk_log_tree(struct btrfs_trans_handle *trans,
2836 struct btrfs_root *log, struct walk_control *wc)
2838 struct btrfs_fs_info *fs_info = log->fs_info;
2842 struct btrfs_path *path;
2845 path = btrfs_alloc_path();
2849 level = btrfs_header_level(log->node);
2851 path->nodes[level] = log->node;
2852 atomic_inc(&log->node->refs);
2853 path->slots[level] = 0;
2856 wret = walk_down_log_tree(trans, log, path, &level, wc);
2864 wret = walk_up_log_tree(trans, log, path, &level, wc);
2873 /* was the root node processed? if not, catch it here */
2874 if (path->nodes[orig_level]) {
2875 ret = wc->process_func(log, path->nodes[orig_level], wc,
2876 btrfs_header_generation(path->nodes[orig_level]),
2881 struct extent_buffer *next;
2883 next = path->nodes[orig_level];
2886 btrfs_tree_lock(next);
2887 btrfs_clean_tree_block(next);
2888 btrfs_wait_tree_block_writeback(next);
2889 btrfs_tree_unlock(next);
2890 ret = btrfs_pin_reserved_extent(trans,
2891 next->start, next->len);
2895 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2896 clear_extent_buffer_dirty(next);
2897 unaccount_log_buffer(fs_info, next->start);
2903 btrfs_free_path(path);
2908 * helper function to update the item for a given subvolumes log root
2909 * in the tree of log roots
2911 static int update_log_root(struct btrfs_trans_handle *trans,
2912 struct btrfs_root *log,
2913 struct btrfs_root_item *root_item)
2915 struct btrfs_fs_info *fs_info = log->fs_info;
2918 if (log->log_transid == 1) {
2919 /* insert root item on the first sync */
2920 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2921 &log->root_key, root_item);
2923 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2924 &log->root_key, root_item);
2929 static void wait_log_commit(struct btrfs_root *root, int transid)
2932 int index = transid % 2;
2935 * we only allow two pending log transactions at a time,
2936 * so we know that if ours is more than 2 older than the
2937 * current transaction, we're done
2940 prepare_to_wait(&root->log_commit_wait[index],
2941 &wait, TASK_UNINTERRUPTIBLE);
2943 if (!(root->log_transid_committed < transid &&
2944 atomic_read(&root->log_commit[index])))
2947 mutex_unlock(&root->log_mutex);
2949 mutex_lock(&root->log_mutex);
2951 finish_wait(&root->log_commit_wait[index], &wait);
2954 static void wait_for_writer(struct btrfs_root *root)
2959 prepare_to_wait(&root->log_writer_wait, &wait,
2960 TASK_UNINTERRUPTIBLE);
2961 if (!atomic_read(&root->log_writers))
2964 mutex_unlock(&root->log_mutex);
2966 mutex_lock(&root->log_mutex);
2968 finish_wait(&root->log_writer_wait, &wait);
2971 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2972 struct btrfs_log_ctx *ctx)
2977 mutex_lock(&root->log_mutex);
2978 list_del_init(&ctx->list);
2979 mutex_unlock(&root->log_mutex);
2983 * Invoked in log mutex context, or be sure there is no other task which
2984 * can access the list.
2986 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2987 int index, int error)
2989 struct btrfs_log_ctx *ctx;
2990 struct btrfs_log_ctx *safe;
2992 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2993 list_del_init(&ctx->list);
2994 ctx->log_ret = error;
2997 INIT_LIST_HEAD(&root->log_ctxs[index]);
3001 * btrfs_sync_log does sends a given tree log down to the disk and
3002 * updates the super blocks to record it. When this call is done,
3003 * you know that any inodes previously logged are safely on disk only
3006 * Any other return value means you need to call btrfs_commit_transaction.
3007 * Some of the edge cases for fsyncing directories that have had unlinks
3008 * or renames done in the past mean that sometimes the only safe
3009 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3010 * that has happened.
3012 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3013 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3019 struct btrfs_fs_info *fs_info = root->fs_info;
3020 struct btrfs_root *log = root->log_root;
3021 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3022 struct btrfs_root_item new_root_item;
3023 int log_transid = 0;
3024 struct btrfs_log_ctx root_log_ctx;
3025 struct blk_plug plug;
3027 mutex_lock(&root->log_mutex);
3028 log_transid = ctx->log_transid;
3029 if (root->log_transid_committed >= log_transid) {
3030 mutex_unlock(&root->log_mutex);
3031 return ctx->log_ret;
3034 index1 = log_transid % 2;
3035 if (atomic_read(&root->log_commit[index1])) {
3036 wait_log_commit(root, log_transid);
3037 mutex_unlock(&root->log_mutex);
3038 return ctx->log_ret;
3040 ASSERT(log_transid == root->log_transid);
3041 atomic_set(&root->log_commit[index1], 1);
3043 /* wait for previous tree log sync to complete */
3044 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3045 wait_log_commit(root, log_transid - 1);
3048 int batch = atomic_read(&root->log_batch);
3049 /* when we're on an ssd, just kick the log commit out */
3050 if (!btrfs_test_opt(fs_info, SSD) &&
3051 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3052 mutex_unlock(&root->log_mutex);
3053 schedule_timeout_uninterruptible(1);
3054 mutex_lock(&root->log_mutex);
3056 wait_for_writer(root);
3057 if (batch == atomic_read(&root->log_batch))
3061 /* bail out if we need to do a full commit */
3062 if (btrfs_need_log_full_commit(trans)) {
3064 mutex_unlock(&root->log_mutex);
3068 if (log_transid % 2 == 0)
3069 mark = EXTENT_DIRTY;
3073 /* we start IO on all the marked extents here, but we don't actually
3074 * wait for them until later.
3076 blk_start_plug(&plug);
3077 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3079 blk_finish_plug(&plug);
3080 btrfs_abort_transaction(trans, ret);
3081 btrfs_set_log_full_commit(trans);
3082 mutex_unlock(&root->log_mutex);
3087 * We _must_ update under the root->log_mutex in order to make sure we
3088 * have a consistent view of the log root we are trying to commit at
3091 * We _must_ copy this into a local copy, because we are not holding the
3092 * log_root_tree->log_mutex yet. This is important because when we
3093 * commit the log_root_tree we must have a consistent view of the
3094 * log_root_tree when we update the super block to point at the
3095 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3096 * with the commit and possibly point at the new block which we may not
3099 btrfs_set_root_node(&log->root_item, log->node);
3100 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3102 root->log_transid++;
3103 log->log_transid = root->log_transid;
3104 root->log_start_pid = 0;
3106 * IO has been started, blocks of the log tree have WRITTEN flag set
3107 * in their headers. new modifications of the log will be written to
3108 * new positions. so it's safe to allow log writers to go in.
3110 mutex_unlock(&root->log_mutex);
3112 btrfs_init_log_ctx(&root_log_ctx, NULL);
3114 mutex_lock(&log_root_tree->log_mutex);
3116 index2 = log_root_tree->log_transid % 2;
3117 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3118 root_log_ctx.log_transid = log_root_tree->log_transid;
3121 * Now we are safe to update the log_root_tree because we're under the
3122 * log_mutex, and we're a current writer so we're holding the commit
3123 * open until we drop the log_mutex.
3125 ret = update_log_root(trans, log, &new_root_item);
3127 if (!list_empty(&root_log_ctx.list))
3128 list_del_init(&root_log_ctx.list);
3130 blk_finish_plug(&plug);
3131 btrfs_set_log_full_commit(trans);
3133 if (ret != -ENOSPC) {
3134 btrfs_abort_transaction(trans, ret);
3135 mutex_unlock(&log_root_tree->log_mutex);
3138 btrfs_wait_tree_log_extents(log, mark);
3139 mutex_unlock(&log_root_tree->log_mutex);
3144 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3145 blk_finish_plug(&plug);
3146 list_del_init(&root_log_ctx.list);
3147 mutex_unlock(&log_root_tree->log_mutex);
3148 ret = root_log_ctx.log_ret;
3152 index2 = root_log_ctx.log_transid % 2;
3153 if (atomic_read(&log_root_tree->log_commit[index2])) {
3154 blk_finish_plug(&plug);
3155 ret = btrfs_wait_tree_log_extents(log, mark);
3156 wait_log_commit(log_root_tree,
3157 root_log_ctx.log_transid);
3158 mutex_unlock(&log_root_tree->log_mutex);
3160 ret = root_log_ctx.log_ret;
3163 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3164 atomic_set(&log_root_tree->log_commit[index2], 1);
3166 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3167 wait_log_commit(log_root_tree,
3168 root_log_ctx.log_transid - 1);
3172 * now that we've moved on to the tree of log tree roots,
3173 * check the full commit flag again
3175 if (btrfs_need_log_full_commit(trans)) {
3176 blk_finish_plug(&plug);
3177 btrfs_wait_tree_log_extents(log, mark);
3178 mutex_unlock(&log_root_tree->log_mutex);
3180 goto out_wake_log_root;
3183 ret = btrfs_write_marked_extents(fs_info,
3184 &log_root_tree->dirty_log_pages,
3185 EXTENT_DIRTY | EXTENT_NEW);
3186 blk_finish_plug(&plug);
3188 btrfs_set_log_full_commit(trans);
3189 btrfs_abort_transaction(trans, ret);
3190 mutex_unlock(&log_root_tree->log_mutex);
3191 goto out_wake_log_root;
3193 ret = btrfs_wait_tree_log_extents(log, mark);
3195 ret = btrfs_wait_tree_log_extents(log_root_tree,
3196 EXTENT_NEW | EXTENT_DIRTY);
3198 btrfs_set_log_full_commit(trans);
3199 mutex_unlock(&log_root_tree->log_mutex);
3200 goto out_wake_log_root;
3203 btrfs_set_super_log_root(fs_info->super_for_commit,
3204 log_root_tree->node->start);
3205 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3206 btrfs_header_level(log_root_tree->node));
3208 log_root_tree->log_transid++;
3209 mutex_unlock(&log_root_tree->log_mutex);
3212 * Nobody else is going to jump in and write the ctree
3213 * super here because the log_commit atomic below is protecting
3214 * us. We must be called with a transaction handle pinning
3215 * the running transaction open, so a full commit can't hop
3216 * in and cause problems either.
3218 ret = write_all_supers(fs_info, 1);
3220 btrfs_set_log_full_commit(trans);
3221 btrfs_abort_transaction(trans, ret);
3222 goto out_wake_log_root;
3225 mutex_lock(&root->log_mutex);
3226 if (root->last_log_commit < log_transid)
3227 root->last_log_commit = log_transid;
3228 mutex_unlock(&root->log_mutex);
3231 mutex_lock(&log_root_tree->log_mutex);
3232 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3234 log_root_tree->log_transid_committed++;
3235 atomic_set(&log_root_tree->log_commit[index2], 0);
3236 mutex_unlock(&log_root_tree->log_mutex);
3239 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3240 * all the updates above are seen by the woken threads. It might not be
3241 * necessary, but proving that seems to be hard.
3243 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3245 mutex_lock(&root->log_mutex);
3246 btrfs_remove_all_log_ctxs(root, index1, ret);
3247 root->log_transid_committed++;
3248 atomic_set(&root->log_commit[index1], 0);
3249 mutex_unlock(&root->log_mutex);
3252 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3253 * all the updates above are seen by the woken threads. It might not be
3254 * necessary, but proving that seems to be hard.
3256 cond_wake_up(&root->log_commit_wait[index1]);
3260 static void free_log_tree(struct btrfs_trans_handle *trans,
3261 struct btrfs_root *log)
3264 struct walk_control wc = {
3266 .process_func = process_one_buffer
3269 ret = walk_log_tree(trans, log, &wc);
3272 btrfs_abort_transaction(trans, ret);
3274 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3277 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3278 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3279 extent_io_tree_release(&log->log_csum_range);
3280 btrfs_put_root(log);
3284 * free all the extents used by the tree log. This should be called
3285 * at commit time of the full transaction
3287 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3289 if (root->log_root) {
3290 free_log_tree(trans, root->log_root);
3291 root->log_root = NULL;
3292 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3297 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3298 struct btrfs_fs_info *fs_info)
3300 if (fs_info->log_root_tree) {
3301 free_log_tree(trans, fs_info->log_root_tree);
3302 fs_info->log_root_tree = NULL;
3308 * Check if an inode was logged in the current transaction. We can't always rely
3309 * on an inode's logged_trans value, because it's an in-memory only field and
3310 * therefore not persisted. This means that its value is lost if the inode gets
3311 * evicted and loaded again from disk (in which case it has a value of 0, and
3312 * certainly it is smaller then any possible transaction ID), when that happens
3313 * the full_sync flag is set in the inode's runtime flags, so on that case we
3314 * assume eviction happened and ignore the logged_trans value, assuming the
3315 * worst case, that the inode was logged before in the current transaction.
3317 static bool inode_logged(struct btrfs_trans_handle *trans,
3318 struct btrfs_inode *inode)
3320 if (inode->logged_trans == trans->transid)
3323 if (inode->last_trans == trans->transid &&
3324 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
3325 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3332 * If both a file and directory are logged, and unlinks or renames are
3333 * mixed in, we have a few interesting corners:
3335 * create file X in dir Y
3336 * link file X to X.link in dir Y
3338 * unlink file X but leave X.link
3341 * After a crash we would expect only X.link to exist. But file X
3342 * didn't get fsync'd again so the log has back refs for X and X.link.
3344 * We solve this by removing directory entries and inode backrefs from the
3345 * log when a file that was logged in the current transaction is
3346 * unlinked. Any later fsync will include the updated log entries, and
3347 * we'll be able to reconstruct the proper directory items from backrefs.
3349 * This optimizations allows us to avoid relogging the entire inode
3350 * or the entire directory.
3352 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3353 struct btrfs_root *root,
3354 const char *name, int name_len,
3355 struct btrfs_inode *dir, u64 index)
3357 struct btrfs_root *log;
3358 struct btrfs_dir_item *di;
3359 struct btrfs_path *path;
3363 u64 dir_ino = btrfs_ino(dir);
3365 if (!inode_logged(trans, dir))
3368 ret = join_running_log_trans(root);
3372 mutex_lock(&dir->log_mutex);
3374 log = root->log_root;
3375 path = btrfs_alloc_path();
3381 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3382 name, name_len, -1);
3388 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3389 bytes_del += name_len;
3395 btrfs_release_path(path);
3396 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3397 index, name, name_len, -1);
3403 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3404 bytes_del += name_len;
3411 /* update the directory size in the log to reflect the names
3415 struct btrfs_key key;
3417 key.objectid = dir_ino;
3419 key.type = BTRFS_INODE_ITEM_KEY;
3420 btrfs_release_path(path);
3422 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3428 struct btrfs_inode_item *item;
3431 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3432 struct btrfs_inode_item);
3433 i_size = btrfs_inode_size(path->nodes[0], item);
3434 if (i_size > bytes_del)
3435 i_size -= bytes_del;
3438 btrfs_set_inode_size(path->nodes[0], item, i_size);
3439 btrfs_mark_buffer_dirty(path->nodes[0]);
3442 btrfs_release_path(path);
3445 btrfs_free_path(path);
3447 mutex_unlock(&dir->log_mutex);
3448 if (err == -ENOSPC) {
3449 btrfs_set_log_full_commit(trans);
3451 } else if (err < 0 && err != -ENOENT) {
3452 /* ENOENT can be returned if the entry hasn't been fsynced yet */
3453 btrfs_abort_transaction(trans, err);
3456 btrfs_end_log_trans(root);
3461 /* see comments for btrfs_del_dir_entries_in_log */
3462 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3463 struct btrfs_root *root,
3464 const char *name, int name_len,
3465 struct btrfs_inode *inode, u64 dirid)
3467 struct btrfs_root *log;
3471 if (!inode_logged(trans, inode))
3474 ret = join_running_log_trans(root);
3477 log = root->log_root;
3478 mutex_lock(&inode->log_mutex);
3480 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3482 mutex_unlock(&inode->log_mutex);
3483 if (ret == -ENOSPC) {
3484 btrfs_set_log_full_commit(trans);
3486 } else if (ret < 0 && ret != -ENOENT)
3487 btrfs_abort_transaction(trans, ret);
3488 btrfs_end_log_trans(root);
3494 * creates a range item in the log for 'dirid'. first_offset and
3495 * last_offset tell us which parts of the key space the log should
3496 * be considered authoritative for.
3498 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3499 struct btrfs_root *log,
3500 struct btrfs_path *path,
3501 int key_type, u64 dirid,
3502 u64 first_offset, u64 last_offset)
3505 struct btrfs_key key;
3506 struct btrfs_dir_log_item *item;
3508 key.objectid = dirid;
3509 key.offset = first_offset;
3510 if (key_type == BTRFS_DIR_ITEM_KEY)
3511 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3513 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3514 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3518 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3519 struct btrfs_dir_log_item);
3520 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3521 btrfs_mark_buffer_dirty(path->nodes[0]);
3522 btrfs_release_path(path);
3527 * log all the items included in the current transaction for a given
3528 * directory. This also creates the range items in the log tree required
3529 * to replay anything deleted before the fsync
3531 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3532 struct btrfs_root *root, struct btrfs_inode *inode,
3533 struct btrfs_path *path,
3534 struct btrfs_path *dst_path, int key_type,
3535 struct btrfs_log_ctx *ctx,
3536 u64 min_offset, u64 *last_offset_ret)
3538 struct btrfs_key min_key;
3539 struct btrfs_root *log = root->log_root;
3540 struct extent_buffer *src;
3545 u64 first_offset = min_offset;
3546 u64 last_offset = (u64)-1;
3547 u64 ino = btrfs_ino(inode);
3549 log = root->log_root;
3551 min_key.objectid = ino;
3552 min_key.type = key_type;
3553 min_key.offset = min_offset;
3555 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3558 * we didn't find anything from this transaction, see if there
3559 * is anything at all
3561 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3562 min_key.objectid = ino;
3563 min_key.type = key_type;
3564 min_key.offset = (u64)-1;
3565 btrfs_release_path(path);
3566 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3568 btrfs_release_path(path);
3571 ret = btrfs_previous_item(root, path, ino, key_type);
3573 /* if ret == 0 there are items for this type,
3574 * create a range to tell us the last key of this type.
3575 * otherwise, there are no items in this directory after
3576 * *min_offset, and we create a range to indicate that.
3579 struct btrfs_key tmp;
3580 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3582 if (key_type == tmp.type)
3583 first_offset = max(min_offset, tmp.offset) + 1;
3588 /* go backward to find any previous key */
3589 ret = btrfs_previous_item(root, path, ino, key_type);
3591 struct btrfs_key tmp;
3592 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3593 if (key_type == tmp.type) {
3594 first_offset = tmp.offset;
3595 ret = overwrite_item(trans, log, dst_path,
3596 path->nodes[0], path->slots[0],
3604 btrfs_release_path(path);
3607 * Find the first key from this transaction again. See the note for
3608 * log_new_dir_dentries, if we're logging a directory recursively we
3609 * won't be holding its i_mutex, which means we can modify the directory
3610 * while we're logging it. If we remove an entry between our first
3611 * search and this search we'll not find the key again and can just
3615 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3620 * we have a block from this transaction, log every item in it
3621 * from our directory
3624 struct btrfs_key tmp;
3625 src = path->nodes[0];
3626 nritems = btrfs_header_nritems(src);
3627 for (i = path->slots[0]; i < nritems; i++) {
3628 struct btrfs_dir_item *di;
3630 btrfs_item_key_to_cpu(src, &min_key, i);
3632 if (min_key.objectid != ino || min_key.type != key_type)
3635 if (need_resched()) {
3636 btrfs_release_path(path);
3641 ret = overwrite_item(trans, log, dst_path, src, i,
3649 * We must make sure that when we log a directory entry,
3650 * the corresponding inode, after log replay, has a
3651 * matching link count. For example:
3657 * xfs_io -c "fsync" mydir
3659 * <mount fs and log replay>
3661 * Would result in a fsync log that when replayed, our
3662 * file inode would have a link count of 1, but we get
3663 * two directory entries pointing to the same inode.
3664 * After removing one of the names, it would not be
3665 * possible to remove the other name, which resulted
3666 * always in stale file handle errors, and would not
3667 * be possible to rmdir the parent directory, since
3668 * its i_size could never decrement to the value
3669 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3671 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3672 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3674 (btrfs_dir_transid(src, di) == trans->transid ||
3675 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3676 tmp.type != BTRFS_ROOT_ITEM_KEY)
3677 ctx->log_new_dentries = true;
3679 path->slots[0] = nritems;
3682 * look ahead to the next item and see if it is also
3683 * from this directory and from this transaction
3685 ret = btrfs_next_leaf(root, path);
3688 last_offset = (u64)-1;
3693 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3694 if (tmp.objectid != ino || tmp.type != key_type) {
3695 last_offset = (u64)-1;
3698 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3699 ret = overwrite_item(trans, log, dst_path,
3700 path->nodes[0], path->slots[0],
3705 last_offset = tmp.offset;
3710 btrfs_release_path(path);
3711 btrfs_release_path(dst_path);
3714 *last_offset_ret = last_offset;
3716 * insert the log range keys to indicate where the log
3719 ret = insert_dir_log_key(trans, log, path, key_type,
3720 ino, first_offset, last_offset);
3728 * logging directories is very similar to logging inodes, We find all the items
3729 * from the current transaction and write them to the log.
3731 * The recovery code scans the directory in the subvolume, and if it finds a
3732 * key in the range logged that is not present in the log tree, then it means
3733 * that dir entry was unlinked during the transaction.
3735 * In order for that scan to work, we must include one key smaller than
3736 * the smallest logged by this transaction and one key larger than the largest
3737 * key logged by this transaction.
3739 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3740 struct btrfs_root *root, struct btrfs_inode *inode,
3741 struct btrfs_path *path,
3742 struct btrfs_path *dst_path,
3743 struct btrfs_log_ctx *ctx)
3748 int key_type = BTRFS_DIR_ITEM_KEY;
3754 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3755 ctx, min_key, &max_key);
3758 if (max_key == (u64)-1)
3760 min_key = max_key + 1;
3763 if (key_type == BTRFS_DIR_ITEM_KEY) {
3764 key_type = BTRFS_DIR_INDEX_KEY;
3771 * a helper function to drop items from the log before we relog an
3772 * inode. max_key_type indicates the highest item type to remove.
3773 * This cannot be run for file data extents because it does not
3774 * free the extents they point to.
3776 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3777 struct btrfs_root *log,
3778 struct btrfs_path *path,
3779 u64 objectid, int max_key_type)
3782 struct btrfs_key key;
3783 struct btrfs_key found_key;
3786 key.objectid = objectid;
3787 key.type = max_key_type;
3788 key.offset = (u64)-1;
3791 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3792 BUG_ON(ret == 0); /* Logic error */
3796 if (path->slots[0] == 0)
3800 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3803 if (found_key.objectid != objectid)
3806 found_key.offset = 0;
3808 ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot);
3812 ret = btrfs_del_items(trans, log, path, start_slot,
3813 path->slots[0] - start_slot + 1);
3815 * If start slot isn't 0 then we don't need to re-search, we've
3816 * found the last guy with the objectid in this tree.
3818 if (ret || start_slot != 0)
3820 btrfs_release_path(path);
3822 btrfs_release_path(path);
3828 static void fill_inode_item(struct btrfs_trans_handle *trans,
3829 struct extent_buffer *leaf,
3830 struct btrfs_inode_item *item,
3831 struct inode *inode, int log_inode_only,
3834 struct btrfs_map_token token;
3836 btrfs_init_map_token(&token, leaf);
3838 if (log_inode_only) {
3839 /* set the generation to zero so the recover code
3840 * can tell the difference between an logging
3841 * just to say 'this inode exists' and a logging
3842 * to say 'update this inode with these values'
3844 btrfs_set_token_inode_generation(&token, item, 0);
3845 btrfs_set_token_inode_size(&token, item, logged_isize);
3847 btrfs_set_token_inode_generation(&token, item,
3848 BTRFS_I(inode)->generation);
3849 btrfs_set_token_inode_size(&token, item, inode->i_size);
3852 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3853 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3854 btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3855 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3857 btrfs_set_token_timespec_sec(&token, &item->atime,
3858 inode->i_atime.tv_sec);
3859 btrfs_set_token_timespec_nsec(&token, &item->atime,
3860 inode->i_atime.tv_nsec);
3862 btrfs_set_token_timespec_sec(&token, &item->mtime,
3863 inode->i_mtime.tv_sec);
3864 btrfs_set_token_timespec_nsec(&token, &item->mtime,
3865 inode->i_mtime.tv_nsec);
3867 btrfs_set_token_timespec_sec(&token, &item->ctime,
3868 inode->i_ctime.tv_sec);
3869 btrfs_set_token_timespec_nsec(&token, &item->ctime,
3870 inode->i_ctime.tv_nsec);
3872 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
3874 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
3875 btrfs_set_token_inode_transid(&token, item, trans->transid);
3876 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
3877 btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags);
3878 btrfs_set_token_inode_block_group(&token, item, 0);
3881 static int log_inode_item(struct btrfs_trans_handle *trans,
3882 struct btrfs_root *log, struct btrfs_path *path,
3883 struct btrfs_inode *inode)
3885 struct btrfs_inode_item *inode_item;
3888 ret = btrfs_insert_empty_item(trans, log, path,
3889 &inode->location, sizeof(*inode_item));
3890 if (ret && ret != -EEXIST)
3892 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3893 struct btrfs_inode_item);
3894 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3896 btrfs_release_path(path);
3900 static int log_csums(struct btrfs_trans_handle *trans,
3901 struct btrfs_inode *inode,
3902 struct btrfs_root *log_root,
3903 struct btrfs_ordered_sum *sums)
3905 const u64 lock_end = sums->bytenr + sums->len - 1;
3906 struct extent_state *cached_state = NULL;
3910 * If this inode was not used for reflink operations in the current
3911 * transaction with new extents, then do the fast path, no need to
3912 * worry about logging checksum items with overlapping ranges.
3914 if (inode->last_reflink_trans < trans->transid)
3915 return btrfs_csum_file_blocks(trans, log_root, sums);
3918 * Serialize logging for checksums. This is to avoid racing with the
3919 * same checksum being logged by another task that is logging another
3920 * file which happens to refer to the same extent as well. Such races
3921 * can leave checksum items in the log with overlapping ranges.
3923 ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr,
3924 lock_end, &cached_state);
3928 * Due to extent cloning, we might have logged a csum item that covers a
3929 * subrange of a cloned extent, and later we can end up logging a csum
3930 * item for a larger subrange of the same extent or the entire range.
3931 * This would leave csum items in the log tree that cover the same range
3932 * and break the searches for checksums in the log tree, resulting in
3933 * some checksums missing in the fs/subvolume tree. So just delete (or
3934 * trim and adjust) any existing csum items in the log for this range.
3936 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
3938 ret = btrfs_csum_file_blocks(trans, log_root, sums);
3940 unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end,
3946 static noinline int copy_items(struct btrfs_trans_handle *trans,
3947 struct btrfs_inode *inode,
3948 struct btrfs_path *dst_path,
3949 struct btrfs_path *src_path,
3950 int start_slot, int nr, int inode_only,
3953 struct btrfs_fs_info *fs_info = trans->fs_info;
3954 unsigned long src_offset;
3955 unsigned long dst_offset;
3956 struct btrfs_root *log = inode->root->log_root;
3957 struct btrfs_file_extent_item *extent;
3958 struct btrfs_inode_item *inode_item;
3959 struct extent_buffer *src = src_path->nodes[0];
3961 struct btrfs_key *ins_keys;
3965 struct list_head ordered_sums;
3966 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3968 INIT_LIST_HEAD(&ordered_sums);
3970 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3971 nr * sizeof(u32), GFP_NOFS);
3975 ins_sizes = (u32 *)ins_data;
3976 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3978 for (i = 0; i < nr; i++) {
3979 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3980 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3982 ret = btrfs_insert_empty_items(trans, log, dst_path,
3983 ins_keys, ins_sizes, nr);
3989 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3990 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3991 dst_path->slots[0]);
3993 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3995 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3996 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3998 struct btrfs_inode_item);
3999 fill_inode_item(trans, dst_path->nodes[0], inode_item,
4001 inode_only == LOG_INODE_EXISTS,
4004 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4005 src_offset, ins_sizes[i]);
4008 /* take a reference on file data extents so that truncates
4009 * or deletes of this inode don't have to relog the inode
4012 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4015 extent = btrfs_item_ptr(src, start_slot + i,
4016 struct btrfs_file_extent_item);
4018 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4021 found_type = btrfs_file_extent_type(src, extent);
4022 if (found_type == BTRFS_FILE_EXTENT_REG) {
4024 ds = btrfs_file_extent_disk_bytenr(src,
4026 /* ds == 0 is a hole */
4030 dl = btrfs_file_extent_disk_num_bytes(src,
4032 cs = btrfs_file_extent_offset(src, extent);
4033 cl = btrfs_file_extent_num_bytes(src,
4035 if (btrfs_file_extent_compression(src,
4041 ret = btrfs_lookup_csums_range(
4043 ds + cs, ds + cs + cl - 1,
4051 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4052 btrfs_release_path(dst_path);
4056 * we have to do this after the loop above to avoid changing the
4057 * log tree while trying to change the log tree.
4059 while (!list_empty(&ordered_sums)) {
4060 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4061 struct btrfs_ordered_sum,
4064 ret = log_csums(trans, inode, log, sums);
4065 list_del(&sums->list);
4072 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4074 struct extent_map *em1, *em2;
4076 em1 = list_entry(a, struct extent_map, list);
4077 em2 = list_entry(b, struct extent_map, list);
4079 if (em1->start < em2->start)
4081 else if (em1->start > em2->start)
4086 static int log_extent_csums(struct btrfs_trans_handle *trans,
4087 struct btrfs_inode *inode,
4088 struct btrfs_root *log_root,
4089 const struct extent_map *em,
4090 struct btrfs_log_ctx *ctx)
4092 struct btrfs_ordered_extent *ordered;
4095 u64 mod_start = em->mod_start;
4096 u64 mod_len = em->mod_len;
4097 LIST_HEAD(ordered_sums);
4100 if (inode->flags & BTRFS_INODE_NODATASUM ||
4101 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4102 em->block_start == EXTENT_MAP_HOLE)
4105 list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
4106 const u64 ordered_end = ordered->file_offset + ordered->num_bytes;
4107 const u64 mod_end = mod_start + mod_len;
4108 struct btrfs_ordered_sum *sums;
4113 if (ordered_end <= mod_start)
4115 if (mod_end <= ordered->file_offset)
4119 * We are going to copy all the csums on this ordered extent, so
4120 * go ahead and adjust mod_start and mod_len in case this ordered
4121 * extent has already been logged.
4123 if (ordered->file_offset > mod_start) {
4124 if (ordered_end >= mod_end)
4125 mod_len = ordered->file_offset - mod_start;
4127 * If we have this case
4129 * |--------- logged extent ---------|
4130 * |----- ordered extent ----|
4132 * Just don't mess with mod_start and mod_len, we'll
4133 * just end up logging more csums than we need and it
4137 if (ordered_end < mod_end) {
4138 mod_len = mod_end - ordered_end;
4139 mod_start = ordered_end;
4146 * To keep us from looping for the above case of an ordered
4147 * extent that falls inside of the logged extent.
4149 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags))
4152 list_for_each_entry(sums, &ordered->list, list) {
4153 ret = log_csums(trans, inode, log_root, sums);
4159 /* We're done, found all csums in the ordered extents. */
4163 /* If we're compressed we have to save the entire range of csums. */
4164 if (em->compress_type) {
4166 csum_len = max(em->block_len, em->orig_block_len);
4168 csum_offset = mod_start - em->start;
4172 /* block start is already adjusted for the file extent offset. */
4173 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4174 em->block_start + csum_offset,
4175 em->block_start + csum_offset +
4176 csum_len - 1, &ordered_sums, 0);
4180 while (!list_empty(&ordered_sums)) {
4181 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4182 struct btrfs_ordered_sum,
4185 ret = log_csums(trans, inode, log_root, sums);
4186 list_del(&sums->list);
4193 static int log_one_extent(struct btrfs_trans_handle *trans,
4194 struct btrfs_inode *inode, struct btrfs_root *root,
4195 const struct extent_map *em,
4196 struct btrfs_path *path,
4197 struct btrfs_log_ctx *ctx)
4199 struct btrfs_drop_extents_args drop_args = { 0 };
4200 struct btrfs_root *log = root->log_root;
4201 struct btrfs_file_extent_item *fi;
4202 struct extent_buffer *leaf;
4203 struct btrfs_map_token token;
4204 struct btrfs_key key;
4205 u64 extent_offset = em->start - em->orig_start;
4209 ret = log_extent_csums(trans, inode, log, em, ctx);
4213 drop_args.path = path;
4214 drop_args.start = em->start;
4215 drop_args.end = em->start + em->len;
4216 drop_args.replace_extent = true;
4217 drop_args.extent_item_size = sizeof(*fi);
4218 ret = btrfs_drop_extents(trans, log, inode, &drop_args);
4222 if (!drop_args.extent_inserted) {
4223 key.objectid = btrfs_ino(inode);
4224 key.type = BTRFS_EXTENT_DATA_KEY;
4225 key.offset = em->start;
4227 ret = btrfs_insert_empty_item(trans, log, path, &key,
4232 leaf = path->nodes[0];
4233 btrfs_init_map_token(&token, leaf);
4234 fi = btrfs_item_ptr(leaf, path->slots[0],
4235 struct btrfs_file_extent_item);
4237 btrfs_set_token_file_extent_generation(&token, fi, trans->transid);
4238 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4239 btrfs_set_token_file_extent_type(&token, fi,
4240 BTRFS_FILE_EXTENT_PREALLOC);
4242 btrfs_set_token_file_extent_type(&token, fi,
4243 BTRFS_FILE_EXTENT_REG);
4245 block_len = max(em->block_len, em->orig_block_len);
4246 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4247 btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4249 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4250 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4251 btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4254 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4256 btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0);
4257 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0);
4260 btrfs_set_token_file_extent_offset(&token, fi, extent_offset);
4261 btrfs_set_token_file_extent_num_bytes(&token, fi, em->len);
4262 btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes);
4263 btrfs_set_token_file_extent_compression(&token, fi, em->compress_type);
4264 btrfs_set_token_file_extent_encryption(&token, fi, 0);
4265 btrfs_set_token_file_extent_other_encoding(&token, fi, 0);
4266 btrfs_mark_buffer_dirty(leaf);
4268 btrfs_release_path(path);
4274 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4275 * lose them after doing a fast fsync and replaying the log. We scan the
4276 * subvolume's root instead of iterating the inode's extent map tree because
4277 * otherwise we can log incorrect extent items based on extent map conversion.
4278 * That can happen due to the fact that extent maps are merged when they
4279 * are not in the extent map tree's list of modified extents.
4281 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4282 struct btrfs_inode *inode,
4283 struct btrfs_path *path)
4285 struct btrfs_root *root = inode->root;
4286 struct btrfs_key key;
4287 const u64 i_size = i_size_read(&inode->vfs_inode);
4288 const u64 ino = btrfs_ino(inode);
4289 struct btrfs_path *dst_path = NULL;
4290 bool dropped_extents = false;
4291 u64 truncate_offset = i_size;
4292 struct extent_buffer *leaf;
4298 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4302 key.type = BTRFS_EXTENT_DATA_KEY;
4303 key.offset = i_size;
4304 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4309 * We must check if there is a prealloc extent that starts before the
4310 * i_size and crosses the i_size boundary. This is to ensure later we
4311 * truncate down to the end of that extent and not to the i_size, as
4312 * otherwise we end up losing part of the prealloc extent after a log
4313 * replay and with an implicit hole if there is another prealloc extent
4314 * that starts at an offset beyond i_size.
4316 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4321 struct btrfs_file_extent_item *ei;
4323 leaf = path->nodes[0];
4324 slot = path->slots[0];
4325 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4327 if (btrfs_file_extent_type(leaf, ei) ==
4328 BTRFS_FILE_EXTENT_PREALLOC) {
4331 btrfs_item_key_to_cpu(leaf, &key, slot);
4332 extent_end = key.offset +
4333 btrfs_file_extent_num_bytes(leaf, ei);
4335 if (extent_end > i_size)
4336 truncate_offset = extent_end;
4343 leaf = path->nodes[0];
4344 slot = path->slots[0];
4346 if (slot >= btrfs_header_nritems(leaf)) {
4348 ret = copy_items(trans, inode, dst_path, path,
4349 start_slot, ins_nr, 1, 0);
4354 ret = btrfs_next_leaf(root, path);
4364 btrfs_item_key_to_cpu(leaf, &key, slot);
4365 if (key.objectid > ino)
4367 if (WARN_ON_ONCE(key.objectid < ino) ||
4368 key.type < BTRFS_EXTENT_DATA_KEY ||
4369 key.offset < i_size) {
4373 if (!dropped_extents) {
4375 * Avoid logging extent items logged in past fsync calls
4376 * and leading to duplicate keys in the log tree.
4379 ret = btrfs_truncate_inode_items(trans,
4381 inode, truncate_offset,
4382 BTRFS_EXTENT_DATA_KEY);
4383 } while (ret == -EAGAIN);
4386 dropped_extents = true;
4393 dst_path = btrfs_alloc_path();
4401 ret = copy_items(trans, inode, dst_path, path,
4402 start_slot, ins_nr, 1, 0);
4404 btrfs_release_path(path);
4405 btrfs_free_path(dst_path);
4409 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4410 struct btrfs_root *root,
4411 struct btrfs_inode *inode,
4412 struct btrfs_path *path,
4413 struct btrfs_log_ctx *ctx)
4415 struct btrfs_ordered_extent *ordered;
4416 struct btrfs_ordered_extent *tmp;
4417 struct extent_map *em, *n;
4418 struct list_head extents;
4419 struct extent_map_tree *tree = &inode->extent_tree;
4424 INIT_LIST_HEAD(&extents);
4426 write_lock(&tree->lock);
4427 test_gen = root->fs_info->last_trans_committed;
4429 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4430 list_del_init(&em->list);
4432 * Just an arbitrary number, this can be really CPU intensive
4433 * once we start getting a lot of extents, and really once we
4434 * have a bunch of extents we just want to commit since it will
4437 if (++num > 32768) {
4438 list_del_init(&tree->modified_extents);
4443 if (em->generation <= test_gen)
4446 /* We log prealloc extents beyond eof later. */
4447 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4448 em->start >= i_size_read(&inode->vfs_inode))
4451 /* Need a ref to keep it from getting evicted from cache */
4452 refcount_inc(&em->refs);
4453 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4454 list_add_tail(&em->list, &extents);
4458 list_sort(NULL, &extents, extent_cmp);
4460 while (!list_empty(&extents)) {
4461 em = list_entry(extents.next, struct extent_map, list);
4463 list_del_init(&em->list);
4466 * If we had an error we just need to delete everybody from our
4470 clear_em_logging(tree, em);
4471 free_extent_map(em);
4475 write_unlock(&tree->lock);
4477 ret = log_one_extent(trans, inode, root, em, path, ctx);
4478 write_lock(&tree->lock);
4479 clear_em_logging(tree, em);
4480 free_extent_map(em);
4482 WARN_ON(!list_empty(&extents));
4483 write_unlock(&tree->lock);
4485 btrfs_release_path(path);
4487 ret = btrfs_log_prealloc_extents(trans, inode, path);
4492 * We have logged all extents successfully, now make sure the commit of
4493 * the current transaction waits for the ordered extents to complete
4494 * before it commits and wipes out the log trees, otherwise we would
4495 * lose data if an ordered extents completes after the transaction
4496 * commits and a power failure happens after the transaction commit.
4498 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
4499 list_del_init(&ordered->log_list);
4500 set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
4502 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4503 spin_lock_irq(&inode->ordered_tree.lock);
4504 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4505 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
4506 atomic_inc(&trans->transaction->pending_ordered);
4508 spin_unlock_irq(&inode->ordered_tree.lock);
4510 btrfs_put_ordered_extent(ordered);
4516 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4517 struct btrfs_path *path, u64 *size_ret)
4519 struct btrfs_key key;
4522 key.objectid = btrfs_ino(inode);
4523 key.type = BTRFS_INODE_ITEM_KEY;
4526 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4529 } else if (ret > 0) {
4532 struct btrfs_inode_item *item;
4534 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4535 struct btrfs_inode_item);
4536 *size_ret = btrfs_inode_size(path->nodes[0], item);
4538 * If the in-memory inode's i_size is smaller then the inode
4539 * size stored in the btree, return the inode's i_size, so
4540 * that we get a correct inode size after replaying the log
4541 * when before a power failure we had a shrinking truncate
4542 * followed by addition of a new name (rename / new hard link).
4543 * Otherwise return the inode size from the btree, to avoid
4544 * data loss when replaying a log due to previously doing a
4545 * write that expands the inode's size and logging a new name
4546 * immediately after.
4548 if (*size_ret > inode->vfs_inode.i_size)
4549 *size_ret = inode->vfs_inode.i_size;
4552 btrfs_release_path(path);
4557 * At the moment we always log all xattrs. This is to figure out at log replay
4558 * time which xattrs must have their deletion replayed. If a xattr is missing
4559 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4560 * because if a xattr is deleted, the inode is fsynced and a power failure
4561 * happens, causing the log to be replayed the next time the fs is mounted,
4562 * we want the xattr to not exist anymore (same behaviour as other filesystems
4563 * with a journal, ext3/4, xfs, f2fs, etc).
4565 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4566 struct btrfs_root *root,
4567 struct btrfs_inode *inode,
4568 struct btrfs_path *path,
4569 struct btrfs_path *dst_path)
4572 struct btrfs_key key;
4573 const u64 ino = btrfs_ino(inode);
4576 bool found_xattrs = false;
4578 if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
4582 key.type = BTRFS_XATTR_ITEM_KEY;
4585 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4590 int slot = path->slots[0];
4591 struct extent_buffer *leaf = path->nodes[0];
4592 int nritems = btrfs_header_nritems(leaf);
4594 if (slot >= nritems) {
4596 ret = copy_items(trans, inode, dst_path, path,
4597 start_slot, ins_nr, 1, 0);
4602 ret = btrfs_next_leaf(root, path);
4610 btrfs_item_key_to_cpu(leaf, &key, slot);
4611 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4618 found_xattrs = true;
4622 ret = copy_items(trans, inode, dst_path, path,
4623 start_slot, ins_nr, 1, 0);
4629 set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
4635 * When using the NO_HOLES feature if we punched a hole that causes the
4636 * deletion of entire leafs or all the extent items of the first leaf (the one
4637 * that contains the inode item and references) we may end up not processing
4638 * any extents, because there are no leafs with a generation matching the
4639 * current transaction that have extent items for our inode. So we need to find
4640 * if any holes exist and then log them. We also need to log holes after any
4641 * truncate operation that changes the inode's size.
4643 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
4644 struct btrfs_root *root,
4645 struct btrfs_inode *inode,
4646 struct btrfs_path *path)
4648 struct btrfs_fs_info *fs_info = root->fs_info;
4649 struct btrfs_key key;
4650 const u64 ino = btrfs_ino(inode);
4651 const u64 i_size = i_size_read(&inode->vfs_inode);
4652 u64 prev_extent_end = 0;
4655 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
4659 key.type = BTRFS_EXTENT_DATA_KEY;
4662 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4667 struct extent_buffer *leaf = path->nodes[0];
4669 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4670 ret = btrfs_next_leaf(root, path);
4677 leaf = path->nodes[0];
4680 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4681 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
4684 /* We have a hole, log it. */
4685 if (prev_extent_end < key.offset) {
4686 const u64 hole_len = key.offset - prev_extent_end;
4689 * Release the path to avoid deadlocks with other code
4690 * paths that search the root while holding locks on
4691 * leafs from the log root.
4693 btrfs_release_path(path);
4694 ret = btrfs_insert_file_extent(trans, root->log_root,
4695 ino, prev_extent_end, 0,
4696 0, hole_len, 0, hole_len,
4702 * Search for the same key again in the root. Since it's
4703 * an extent item and we are holding the inode lock, the
4704 * key must still exist. If it doesn't just emit warning
4705 * and return an error to fall back to a transaction
4708 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4711 if (WARN_ON(ret > 0))
4713 leaf = path->nodes[0];
4716 prev_extent_end = btrfs_file_extent_end(path);
4721 if (prev_extent_end < i_size) {
4724 btrfs_release_path(path);
4725 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
4726 ret = btrfs_insert_file_extent(trans, root->log_root,
4727 ino, prev_extent_end, 0, 0,
4728 hole_len, 0, hole_len,
4738 * When we are logging a new inode X, check if it doesn't have a reference that
4739 * matches the reference from some other inode Y created in a past transaction
4740 * and that was renamed in the current transaction. If we don't do this, then at
4741 * log replay time we can lose inode Y (and all its files if it's a directory):
4744 * echo "hello world" > /mnt/x/foobar
4747 * mkdir /mnt/x # or touch /mnt/x
4748 * xfs_io -c fsync /mnt/x
4750 * mount fs, trigger log replay
4752 * After the log replay procedure, we would lose the first directory and all its
4753 * files (file foobar).
4754 * For the case where inode Y is not a directory we simply end up losing it:
4756 * echo "123" > /mnt/foo
4758 * mv /mnt/foo /mnt/bar
4759 * echo "abc" > /mnt/foo
4760 * xfs_io -c fsync /mnt/foo
4763 * We also need this for cases where a snapshot entry is replaced by some other
4764 * entry (file or directory) otherwise we end up with an unreplayable log due to
4765 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4766 * if it were a regular entry:
4769 * btrfs subvolume snapshot /mnt /mnt/x/snap
4770 * btrfs subvolume delete /mnt/x/snap
4773 * fsync /mnt/x or fsync some new file inside it
4776 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4777 * the same transaction.
4779 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4781 const struct btrfs_key *key,
4782 struct btrfs_inode *inode,
4783 u64 *other_ino, u64 *other_parent)
4786 struct btrfs_path *search_path;
4789 u32 item_size = btrfs_item_size_nr(eb, slot);
4791 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4793 search_path = btrfs_alloc_path();
4796 search_path->search_commit_root = 1;
4797 search_path->skip_locking = 1;
4799 while (cur_offset < item_size) {
4803 unsigned long name_ptr;
4804 struct btrfs_dir_item *di;
4806 if (key->type == BTRFS_INODE_REF_KEY) {
4807 struct btrfs_inode_ref *iref;
4809 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4810 parent = key->offset;
4811 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4812 name_ptr = (unsigned long)(iref + 1);
4813 this_len = sizeof(*iref) + this_name_len;
4815 struct btrfs_inode_extref *extref;
4817 extref = (struct btrfs_inode_extref *)(ptr +
4819 parent = btrfs_inode_extref_parent(eb, extref);
4820 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4821 name_ptr = (unsigned long)&extref->name;
4822 this_len = sizeof(*extref) + this_name_len;
4825 if (this_name_len > name_len) {
4828 new_name = krealloc(name, this_name_len, GFP_NOFS);
4833 name_len = this_name_len;
4837 read_extent_buffer(eb, name, name_ptr, this_name_len);
4838 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4839 parent, name, this_name_len, 0);
4840 if (di && !IS_ERR(di)) {
4841 struct btrfs_key di_key;
4843 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4845 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4846 if (di_key.objectid != key->objectid) {
4848 *other_ino = di_key.objectid;
4849 *other_parent = parent;
4857 } else if (IS_ERR(di)) {
4861 btrfs_release_path(search_path);
4863 cur_offset += this_len;
4867 btrfs_free_path(search_path);
4872 struct btrfs_ino_list {
4875 struct list_head list;
4878 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4879 struct btrfs_root *root,
4880 struct btrfs_path *path,
4881 struct btrfs_log_ctx *ctx,
4882 u64 ino, u64 parent)
4884 struct btrfs_ino_list *ino_elem;
4885 LIST_HEAD(inode_list);
4888 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4891 ino_elem->ino = ino;
4892 ino_elem->parent = parent;
4893 list_add_tail(&ino_elem->list, &inode_list);
4895 while (!list_empty(&inode_list)) {
4896 struct btrfs_fs_info *fs_info = root->fs_info;
4897 struct btrfs_key key;
4898 struct inode *inode;
4900 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
4902 ino = ino_elem->ino;
4903 parent = ino_elem->parent;
4904 list_del(&ino_elem->list);
4909 btrfs_release_path(path);
4911 inode = btrfs_iget(fs_info->sb, ino, root);
4913 * If the other inode that had a conflicting dir entry was
4914 * deleted in the current transaction, we need to log its parent
4917 if (IS_ERR(inode)) {
4918 ret = PTR_ERR(inode);
4919 if (ret == -ENOENT) {
4920 inode = btrfs_iget(fs_info->sb, parent, root);
4921 if (IS_ERR(inode)) {
4922 ret = PTR_ERR(inode);
4924 ret = btrfs_log_inode(trans, root,
4926 LOG_OTHER_INODE_ALL,
4928 btrfs_add_delayed_iput(inode);
4934 * If the inode was already logged skip it - otherwise we can
4935 * hit an infinite loop. Example:
4937 * From the commit root (previous transaction) we have the
4940 * inode 257 a directory
4941 * inode 258 with references "zz" and "zz_link" on inode 257
4942 * inode 259 with reference "a" on inode 257
4944 * And in the current (uncommitted) transaction we have:
4946 * inode 257 a directory, unchanged
4947 * inode 258 with references "a" and "a2" on inode 257
4948 * inode 259 with reference "zz_link" on inode 257
4949 * inode 261 with reference "zz" on inode 257
4951 * When logging inode 261 the following infinite loop could
4952 * happen if we don't skip already logged inodes:
4954 * - we detect inode 258 as a conflicting inode, with inode 261
4955 * on reference "zz", and log it;
4957 * - we detect inode 259 as a conflicting inode, with inode 258
4958 * on reference "a", and log it;
4960 * - we detect inode 258 as a conflicting inode, with inode 259
4961 * on reference "zz_link", and log it - again! After this we
4962 * repeat the above steps forever.
4964 spin_lock(&BTRFS_I(inode)->lock);
4966 * Check the inode's logged_trans only instead of
4967 * btrfs_inode_in_log(). This is because the last_log_commit of
4968 * the inode is not updated when we only log that it exists and
4969 * it has the full sync bit set (see btrfs_log_inode()).
4971 if (BTRFS_I(inode)->logged_trans == trans->transid) {
4972 spin_unlock(&BTRFS_I(inode)->lock);
4973 btrfs_add_delayed_iput(inode);
4976 spin_unlock(&BTRFS_I(inode)->lock);
4978 * We are safe logging the other inode without acquiring its
4979 * lock as long as we log with the LOG_INODE_EXISTS mode. We
4980 * are safe against concurrent renames of the other inode as
4981 * well because during a rename we pin the log and update the
4982 * log with the new name before we unpin it.
4984 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
4985 LOG_OTHER_INODE, ctx);
4987 btrfs_add_delayed_iput(inode);
4992 key.type = BTRFS_INODE_REF_KEY;
4994 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4996 btrfs_add_delayed_iput(inode);
5001 struct extent_buffer *leaf = path->nodes[0];
5002 int slot = path->slots[0];
5004 u64 other_parent = 0;
5006 if (slot >= btrfs_header_nritems(leaf)) {
5007 ret = btrfs_next_leaf(root, path);
5010 } else if (ret > 0) {
5017 btrfs_item_key_to_cpu(leaf, &key, slot);
5018 if (key.objectid != ino ||
5019 (key.type != BTRFS_INODE_REF_KEY &&
5020 key.type != BTRFS_INODE_EXTREF_KEY)) {
5025 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5026 BTRFS_I(inode), &other_ino,
5031 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5036 ino_elem->ino = other_ino;
5037 ino_elem->parent = other_parent;
5038 list_add_tail(&ino_elem->list, &inode_list);
5043 btrfs_add_delayed_iput(inode);
5049 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5050 struct btrfs_inode *inode,
5051 struct btrfs_key *min_key,
5052 const struct btrfs_key *max_key,
5053 struct btrfs_path *path,
5054 struct btrfs_path *dst_path,
5055 const u64 logged_isize,
5056 const bool recursive_logging,
5057 const int inode_only,
5058 struct btrfs_log_ctx *ctx,
5059 bool *need_log_inode_item)
5061 struct btrfs_root *root = inode->root;
5062 int ins_start_slot = 0;
5067 ret = btrfs_search_forward(root, min_key, path, trans->transid);
5075 /* Note, ins_nr might be > 0 here, cleanup outside the loop */
5076 if (min_key->objectid != max_key->objectid)
5078 if (min_key->type > max_key->type)
5081 if (min_key->type == BTRFS_INODE_ITEM_KEY)
5082 *need_log_inode_item = false;
5084 if ((min_key->type == BTRFS_INODE_REF_KEY ||
5085 min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5086 inode->generation == trans->transid &&
5087 !recursive_logging) {
5089 u64 other_parent = 0;
5091 ret = btrfs_check_ref_name_override(path->nodes[0],
5092 path->slots[0], min_key, inode,
5093 &other_ino, &other_parent);
5096 } else if (ret > 0 && ctx &&
5097 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5102 ins_start_slot = path->slots[0];
5104 ret = copy_items(trans, inode, dst_path, path,
5105 ins_start_slot, ins_nr,
5106 inode_only, logged_isize);
5111 ret = log_conflicting_inodes(trans, root, path,
5112 ctx, other_ino, other_parent);
5115 btrfs_release_path(path);
5120 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5121 if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5124 ret = copy_items(trans, inode, dst_path, path,
5126 ins_nr, inode_only, logged_isize);
5133 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5136 } else if (!ins_nr) {
5137 ins_start_slot = path->slots[0];
5142 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5143 ins_nr, inode_only, logged_isize);
5147 ins_start_slot = path->slots[0];
5150 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5151 btrfs_item_key_to_cpu(path->nodes[0], min_key,
5156 ret = copy_items(trans, inode, dst_path, path,
5157 ins_start_slot, ins_nr, inode_only,
5163 btrfs_release_path(path);
5165 if (min_key->offset < (u64)-1) {
5167 } else if (min_key->type < max_key->type) {
5169 min_key->offset = 0;
5175 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5176 ins_nr, inode_only, logged_isize);
5181 /* log a single inode in the tree log.
5182 * At least one parent directory for this inode must exist in the tree
5183 * or be logged already.
5185 * Any items from this inode changed by the current transaction are copied
5186 * to the log tree. An extra reference is taken on any extents in this
5187 * file, allowing us to avoid a whole pile of corner cases around logging
5188 * blocks that have been removed from the tree.
5190 * See LOG_INODE_ALL and related defines for a description of what inode_only
5193 * This handles both files and directories.
5195 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5196 struct btrfs_root *root, struct btrfs_inode *inode,
5198 struct btrfs_log_ctx *ctx)
5200 struct btrfs_path *path;
5201 struct btrfs_path *dst_path;
5202 struct btrfs_key min_key;
5203 struct btrfs_key max_key;
5204 struct btrfs_root *log = root->log_root;
5207 bool fast_search = false;
5208 u64 ino = btrfs_ino(inode);
5209 struct extent_map_tree *em_tree = &inode->extent_tree;
5210 u64 logged_isize = 0;
5211 bool need_log_inode_item = true;
5212 bool xattrs_logged = false;
5213 bool recursive_logging = false;
5215 path = btrfs_alloc_path();
5218 dst_path = btrfs_alloc_path();
5220 btrfs_free_path(path);
5224 min_key.objectid = ino;
5225 min_key.type = BTRFS_INODE_ITEM_KEY;
5228 max_key.objectid = ino;
5231 /* today the code can only do partial logging of directories */
5232 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5233 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5234 &inode->runtime_flags) &&
5235 inode_only >= LOG_INODE_EXISTS))
5236 max_key.type = BTRFS_XATTR_ITEM_KEY;
5238 max_key.type = (u8)-1;
5239 max_key.offset = (u64)-1;
5242 * Only run delayed items if we are a directory. We want to make sure
5243 * all directory indexes hit the fs/subvolume tree so we can find them
5244 * and figure out which index ranges have to be logged.
5246 * Otherwise commit the delayed inode only if the full sync flag is set,
5247 * as we want to make sure an up to date version is in the subvolume
5248 * tree so copy_inode_items_to_log() / copy_items() can find it and copy
5249 * it to the log tree. For a non full sync, we always log the inode item
5250 * based on the in-memory struct btrfs_inode which is always up to date.
5252 if (S_ISDIR(inode->vfs_inode.i_mode))
5253 ret = btrfs_commit_inode_delayed_items(trans, inode);
5254 else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5255 ret = btrfs_commit_inode_delayed_inode(inode);
5258 btrfs_free_path(path);
5259 btrfs_free_path(dst_path);
5263 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5264 recursive_logging = true;
5265 if (inode_only == LOG_OTHER_INODE)
5266 inode_only = LOG_INODE_EXISTS;
5268 inode_only = LOG_INODE_ALL;
5269 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5271 mutex_lock(&inode->log_mutex);
5275 * a brute force approach to making sure we get the most uptodate
5276 * copies of everything.
5278 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5279 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5281 if (inode_only == LOG_INODE_EXISTS)
5282 max_key_type = BTRFS_XATTR_ITEM_KEY;
5283 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5285 if (inode_only == LOG_INODE_EXISTS) {
5287 * Make sure the new inode item we write to the log has
5288 * the same isize as the current one (if it exists).
5289 * This is necessary to prevent data loss after log
5290 * replay, and also to prevent doing a wrong expanding
5291 * truncate - for e.g. create file, write 4K into offset
5292 * 0, fsync, write 4K into offset 4096, add hard link,
5293 * fsync some other file (to sync log), power fail - if
5294 * we use the inode's current i_size, after log replay
5295 * we get a 8Kb file, with the last 4Kb extent as a hole
5296 * (zeroes), as if an expanding truncate happened,
5297 * instead of getting a file of 4Kb only.
5299 err = logged_inode_size(log, inode, path, &logged_isize);
5303 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5304 &inode->runtime_flags)) {
5305 if (inode_only == LOG_INODE_EXISTS) {
5306 max_key.type = BTRFS_XATTR_ITEM_KEY;
5307 ret = drop_objectid_items(trans, log, path, ino,
5310 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5311 &inode->runtime_flags);
5312 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5313 &inode->runtime_flags);
5315 ret = btrfs_truncate_inode_items(trans,
5321 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5322 &inode->runtime_flags) ||
5323 inode_only == LOG_INODE_EXISTS) {
5324 if (inode_only == LOG_INODE_ALL)
5326 max_key.type = BTRFS_XATTR_ITEM_KEY;
5327 ret = drop_objectid_items(trans, log, path, ino,
5330 if (inode_only == LOG_INODE_ALL)
5341 err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5342 path, dst_path, logged_isize,
5343 recursive_logging, inode_only, ctx,
5344 &need_log_inode_item);
5348 btrfs_release_path(path);
5349 btrfs_release_path(dst_path);
5350 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5353 xattrs_logged = true;
5354 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5355 btrfs_release_path(path);
5356 btrfs_release_path(dst_path);
5357 err = btrfs_log_holes(trans, root, inode, path);
5362 btrfs_release_path(path);
5363 btrfs_release_path(dst_path);
5364 if (need_log_inode_item) {
5365 err = log_inode_item(trans, log, dst_path, inode);
5366 if (!err && !xattrs_logged) {
5367 err = btrfs_log_all_xattrs(trans, root, inode, path,
5369 btrfs_release_path(path);
5375 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5381 } else if (inode_only == LOG_INODE_ALL) {
5382 struct extent_map *em, *n;
5384 write_lock(&em_tree->lock);
5385 list_for_each_entry_safe(em, n, &em_tree->modified_extents, list)
5386 list_del_init(&em->list);
5387 write_unlock(&em_tree->lock);
5390 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5391 ret = log_directory_changes(trans, root, inode, path, dst_path,
5400 * If we are logging that an ancestor inode exists as part of logging a
5401 * new name from a link or rename operation, don't mark the inode as
5402 * logged - otherwise if an explicit fsync is made against an ancestor,
5403 * the fsync considers the inode in the log and doesn't sync the log,
5404 * resulting in the ancestor missing after a power failure unless the
5405 * log was synced as part of an fsync against any other unrelated inode.
5406 * So keep it simple for this case and just don't flag the ancestors as
5410 !(S_ISDIR(inode->vfs_inode.i_mode) && ctx->logging_new_name &&
5411 &inode->vfs_inode != ctx->inode)) {
5412 spin_lock(&inode->lock);
5413 inode->logged_trans = trans->transid;
5415 * Don't update last_log_commit if we logged that an inode exists
5416 * after it was loaded to memory (full_sync bit set).
5417 * This is to prevent data loss when we do a write to the inode,
5418 * then the inode gets evicted after all delalloc was flushed,
5419 * then we log it exists (due to a rename for example) and then
5420 * fsync it. This last fsync would do nothing (not logging the
5421 * extents previously written).
5423 if (inode_only != LOG_INODE_EXISTS ||
5424 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5425 inode->last_log_commit = inode->last_sub_trans;
5426 spin_unlock(&inode->lock);
5429 mutex_unlock(&inode->log_mutex);
5431 btrfs_free_path(path);
5432 btrfs_free_path(dst_path);
5437 * Check if we must fallback to a transaction commit when logging an inode.
5438 * This must be called after logging the inode and is used only in the context
5439 * when fsyncing an inode requires the need to log some other inode - in which
5440 * case we can't lock the i_mutex of each other inode we need to log as that
5441 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5442 * log inodes up or down in the hierarchy) or rename operations for example. So
5443 * we take the log_mutex of the inode after we have logged it and then check for
5444 * its last_unlink_trans value - this is safe because any task setting
5445 * last_unlink_trans must take the log_mutex and it must do this before it does
5446 * the actual unlink operation, so if we do this check before a concurrent task
5447 * sets last_unlink_trans it means we've logged a consistent version/state of
5448 * all the inode items, otherwise we are not sure and must do a transaction
5449 * commit (the concurrent task might have only updated last_unlink_trans before
5450 * we logged the inode or it might have also done the unlink).
5452 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5453 struct btrfs_inode *inode)
5455 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5458 mutex_lock(&inode->log_mutex);
5459 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5461 * Make sure any commits to the log are forced to be full
5464 btrfs_set_log_full_commit(trans);
5467 mutex_unlock(&inode->log_mutex);
5473 * follow the dentry parent pointers up the chain and see if any
5474 * of the directories in it require a full commit before they can
5475 * be logged. Returns zero if nothing special needs to be done or 1 if
5476 * a full commit is required.
5478 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5479 struct btrfs_inode *inode,
5480 struct dentry *parent,
5481 struct super_block *sb,
5485 struct dentry *old_parent = NULL;
5488 * for regular files, if its inode is already on disk, we don't
5489 * have to worry about the parents at all. This is because
5490 * we can use the last_unlink_trans field to record renames
5491 * and other fun in this file.
5493 if (S_ISREG(inode->vfs_inode.i_mode) &&
5494 inode->generation <= last_committed &&
5495 inode->last_unlink_trans <= last_committed)
5498 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5499 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5501 inode = BTRFS_I(d_inode(parent));
5505 if (btrfs_must_commit_transaction(trans, inode)) {
5510 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5513 if (IS_ROOT(parent)) {
5514 inode = BTRFS_I(d_inode(parent));
5515 if (btrfs_must_commit_transaction(trans, inode))
5520 parent = dget_parent(parent);
5522 old_parent = parent;
5523 inode = BTRFS_I(d_inode(parent));
5531 struct btrfs_dir_list {
5533 struct list_head list;
5537 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5538 * details about the why it is needed.
5539 * This is a recursive operation - if an existing dentry corresponds to a
5540 * directory, that directory's new entries are logged too (same behaviour as
5541 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5542 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5543 * complains about the following circular lock dependency / possible deadlock:
5547 * lock(&type->i_mutex_dir_key#3/2);
5548 * lock(sb_internal#2);
5549 * lock(&type->i_mutex_dir_key#3/2);
5550 * lock(&sb->s_type->i_mutex_key#14);
5552 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5553 * sb_start_intwrite() in btrfs_start_transaction().
5554 * Not locking i_mutex of the inodes is still safe because:
5556 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5557 * that while logging the inode new references (names) are added or removed
5558 * from the inode, leaving the logged inode item with a link count that does
5559 * not match the number of logged inode reference items. This is fine because
5560 * at log replay time we compute the real number of links and correct the
5561 * link count in the inode item (see replay_one_buffer() and
5562 * link_to_fixup_dir());
5564 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5565 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5566 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5567 * has a size that doesn't match the sum of the lengths of all the logged
5568 * names. This does not result in a problem because if a dir_item key is
5569 * logged but its matching dir_index key is not logged, at log replay time we
5570 * don't use it to replay the respective name (see replay_one_name()). On the
5571 * other hand if only the dir_index key ends up being logged, the respective
5572 * name is added to the fs/subvol tree with both the dir_item and dir_index
5573 * keys created (see replay_one_name()).
5574 * The directory's inode item with a wrong i_size is not a problem as well,
5575 * since we don't use it at log replay time to set the i_size in the inode
5576 * item of the fs/subvol tree (see overwrite_item()).
5578 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5579 struct btrfs_root *root,
5580 struct btrfs_inode *start_inode,
5581 struct btrfs_log_ctx *ctx)
5583 struct btrfs_fs_info *fs_info = root->fs_info;
5584 struct btrfs_root *log = root->log_root;
5585 struct btrfs_path *path;
5586 LIST_HEAD(dir_list);
5587 struct btrfs_dir_list *dir_elem;
5590 path = btrfs_alloc_path();
5594 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5596 btrfs_free_path(path);
5599 dir_elem->ino = btrfs_ino(start_inode);
5600 list_add_tail(&dir_elem->list, &dir_list);
5602 while (!list_empty(&dir_list)) {
5603 struct extent_buffer *leaf;
5604 struct btrfs_key min_key;
5608 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5611 goto next_dir_inode;
5613 min_key.objectid = dir_elem->ino;
5614 min_key.type = BTRFS_DIR_ITEM_KEY;
5617 btrfs_release_path(path);
5618 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5620 goto next_dir_inode;
5621 } else if (ret > 0) {
5623 goto next_dir_inode;
5627 leaf = path->nodes[0];
5628 nritems = btrfs_header_nritems(leaf);
5629 for (i = path->slots[0]; i < nritems; i++) {
5630 struct btrfs_dir_item *di;
5631 struct btrfs_key di_key;
5632 struct inode *di_inode;
5633 struct btrfs_dir_list *new_dir_elem;
5634 int log_mode = LOG_INODE_EXISTS;
5637 btrfs_item_key_to_cpu(leaf, &min_key, i);
5638 if (min_key.objectid != dir_elem->ino ||
5639 min_key.type != BTRFS_DIR_ITEM_KEY)
5640 goto next_dir_inode;
5642 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5643 type = btrfs_dir_type(leaf, di);
5644 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5645 type != BTRFS_FT_DIR)
5647 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5648 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5651 btrfs_release_path(path);
5652 di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
5653 if (IS_ERR(di_inode)) {
5654 ret = PTR_ERR(di_inode);
5655 goto next_dir_inode;
5658 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5659 btrfs_add_delayed_iput(di_inode);
5663 ctx->log_new_dentries = false;
5664 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5665 log_mode = LOG_INODE_ALL;
5666 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5669 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5671 btrfs_add_delayed_iput(di_inode);
5673 goto next_dir_inode;
5674 if (ctx->log_new_dentries) {
5675 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5677 if (!new_dir_elem) {
5679 goto next_dir_inode;
5681 new_dir_elem->ino = di_key.objectid;
5682 list_add_tail(&new_dir_elem->list, &dir_list);
5687 ret = btrfs_next_leaf(log, path);
5689 goto next_dir_inode;
5690 } else if (ret > 0) {
5692 goto next_dir_inode;
5696 if (min_key.offset < (u64)-1) {
5701 list_del(&dir_elem->list);
5705 btrfs_free_path(path);
5709 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5710 struct btrfs_inode *inode,
5711 struct btrfs_log_ctx *ctx)
5713 struct btrfs_fs_info *fs_info = trans->fs_info;
5715 struct btrfs_path *path;
5716 struct btrfs_key key;
5717 struct btrfs_root *root = inode->root;
5718 const u64 ino = btrfs_ino(inode);
5720 path = btrfs_alloc_path();
5723 path->skip_locking = 1;
5724 path->search_commit_root = 1;
5727 key.type = BTRFS_INODE_REF_KEY;
5729 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5734 struct extent_buffer *leaf = path->nodes[0];
5735 int slot = path->slots[0];
5740 if (slot >= btrfs_header_nritems(leaf)) {
5741 ret = btrfs_next_leaf(root, path);
5749 btrfs_item_key_to_cpu(leaf, &key, slot);
5750 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5751 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5754 item_size = btrfs_item_size_nr(leaf, slot);
5755 ptr = btrfs_item_ptr_offset(leaf, slot);
5756 while (cur_offset < item_size) {
5757 struct btrfs_key inode_key;
5758 struct inode *dir_inode;
5760 inode_key.type = BTRFS_INODE_ITEM_KEY;
5761 inode_key.offset = 0;
5763 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5764 struct btrfs_inode_extref *extref;
5766 extref = (struct btrfs_inode_extref *)
5768 inode_key.objectid = btrfs_inode_extref_parent(
5770 cur_offset += sizeof(*extref);
5771 cur_offset += btrfs_inode_extref_name_len(leaf,
5774 inode_key.objectid = key.offset;
5775 cur_offset = item_size;
5778 dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
5781 * If the parent inode was deleted, return an error to
5782 * fallback to a transaction commit. This is to prevent
5783 * getting an inode that was moved from one parent A to
5784 * a parent B, got its former parent A deleted and then
5785 * it got fsync'ed, from existing at both parents after
5786 * a log replay (and the old parent still existing).
5793 * mv /mnt/B/bar /mnt/A/bar
5794 * mv -T /mnt/A /mnt/B
5798 * If we ignore the old parent B which got deleted,
5799 * after a log replay we would have file bar linked
5800 * at both parents and the old parent B would still
5803 if (IS_ERR(dir_inode)) {
5804 ret = PTR_ERR(dir_inode);
5809 ctx->log_new_dentries = false;
5810 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5811 LOG_INODE_ALL, ctx);
5813 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5815 if (!ret && ctx && ctx->log_new_dentries)
5816 ret = log_new_dir_dentries(trans, root,
5817 BTRFS_I(dir_inode), ctx);
5818 btrfs_add_delayed_iput(dir_inode);
5826 btrfs_free_path(path);
5830 static int log_new_ancestors(struct btrfs_trans_handle *trans,
5831 struct btrfs_root *root,
5832 struct btrfs_path *path,
5833 struct btrfs_log_ctx *ctx)
5835 struct btrfs_key found_key;
5837 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5840 struct btrfs_fs_info *fs_info = root->fs_info;
5841 const u64 last_committed = fs_info->last_trans_committed;
5842 struct extent_buffer *leaf = path->nodes[0];
5843 int slot = path->slots[0];
5844 struct btrfs_key search_key;
5845 struct inode *inode;
5849 btrfs_release_path(path);
5851 ino = found_key.offset;
5853 search_key.objectid = found_key.offset;
5854 search_key.type = BTRFS_INODE_ITEM_KEY;
5855 search_key.offset = 0;
5856 inode = btrfs_iget(fs_info->sb, ino, root);
5858 return PTR_ERR(inode);
5860 if (BTRFS_I(inode)->generation > last_committed)
5861 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5862 LOG_INODE_EXISTS, ctx);
5863 btrfs_add_delayed_iput(inode);
5867 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
5870 search_key.type = BTRFS_INODE_REF_KEY;
5871 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5875 leaf = path->nodes[0];
5876 slot = path->slots[0];
5877 if (slot >= btrfs_header_nritems(leaf)) {
5878 ret = btrfs_next_leaf(root, path);
5883 leaf = path->nodes[0];
5884 slot = path->slots[0];
5887 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5888 if (found_key.objectid != search_key.objectid ||
5889 found_key.type != BTRFS_INODE_REF_KEY)
5895 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
5896 struct btrfs_inode *inode,
5897 struct dentry *parent,
5898 struct btrfs_log_ctx *ctx)
5900 struct btrfs_root *root = inode->root;
5901 struct btrfs_fs_info *fs_info = root->fs_info;
5902 struct dentry *old_parent = NULL;
5903 struct super_block *sb = inode->vfs_inode.i_sb;
5907 if (!parent || d_really_is_negative(parent) ||
5911 inode = BTRFS_I(d_inode(parent));
5912 if (root != inode->root)
5915 if (inode->generation > fs_info->last_trans_committed) {
5916 ret = btrfs_log_inode(trans, root, inode,
5917 LOG_INODE_EXISTS, ctx);
5921 if (IS_ROOT(parent))
5924 parent = dget_parent(parent);
5926 old_parent = parent;
5933 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
5934 struct btrfs_inode *inode,
5935 struct dentry *parent,
5936 struct btrfs_log_ctx *ctx)
5938 struct btrfs_root *root = inode->root;
5939 const u64 ino = btrfs_ino(inode);
5940 struct btrfs_path *path;
5941 struct btrfs_key search_key;
5945 * For a single hard link case, go through a fast path that does not
5946 * need to iterate the fs/subvolume tree.
5948 if (inode->vfs_inode.i_nlink < 2)
5949 return log_new_ancestors_fast(trans, inode, parent, ctx);
5951 path = btrfs_alloc_path();
5955 search_key.objectid = ino;
5956 search_key.type = BTRFS_INODE_REF_KEY;
5957 search_key.offset = 0;
5959 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5966 struct extent_buffer *leaf = path->nodes[0];
5967 int slot = path->slots[0];
5968 struct btrfs_key found_key;
5970 if (slot >= btrfs_header_nritems(leaf)) {
5971 ret = btrfs_next_leaf(root, path);
5979 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5980 if (found_key.objectid != ino ||
5981 found_key.type > BTRFS_INODE_EXTREF_KEY)
5985 * Don't deal with extended references because they are rare
5986 * cases and too complex to deal with (we would need to keep
5987 * track of which subitem we are processing for each item in
5988 * this loop, etc). So just return some error to fallback to
5989 * a transaction commit.
5991 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
5997 * Logging ancestors needs to do more searches on the fs/subvol
5998 * tree, so it releases the path as needed to avoid deadlocks.
5999 * Keep track of the last inode ref key and resume from that key
6000 * after logging all new ancestors for the current hard link.
6002 memcpy(&search_key, &found_key, sizeof(search_key));
6004 ret = log_new_ancestors(trans, root, path, ctx);
6007 btrfs_release_path(path);
6012 btrfs_free_path(path);
6017 * helper function around btrfs_log_inode to make sure newly created
6018 * parent directories also end up in the log. A minimal inode and backref
6019 * only logging is done of any parent directories that are older than
6020 * the last committed transaction
6022 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6023 struct btrfs_inode *inode,
6024 struct dentry *parent,
6026 struct btrfs_log_ctx *ctx)
6028 struct btrfs_root *root = inode->root;
6029 struct btrfs_fs_info *fs_info = root->fs_info;
6030 struct super_block *sb;
6032 u64 last_committed = fs_info->last_trans_committed;
6033 bool log_dentries = false;
6035 sb = inode->vfs_inode.i_sb;
6037 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6043 * The prev transaction commit doesn't complete, we need do
6044 * full commit by ourselves.
6046 if (fs_info->last_trans_log_full_commit >
6047 fs_info->last_trans_committed) {
6052 if (btrfs_root_refs(&root->root_item) == 0) {
6057 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
6063 * Skip already logged inodes or inodes corresponding to tmpfiles
6064 * (since logging them is pointless, a link count of 0 means they
6065 * will never be accessible).
6067 if (btrfs_inode_in_log(inode, trans->transid) ||
6068 inode->vfs_inode.i_nlink == 0) {
6069 ret = BTRFS_NO_LOG_SYNC;
6073 ret = start_log_trans(trans, root, ctx);
6077 ret = btrfs_log_inode(trans, root, inode, inode_only, ctx);
6082 * for regular files, if its inode is already on disk, we don't
6083 * have to worry about the parents at all. This is because
6084 * we can use the last_unlink_trans field to record renames
6085 * and other fun in this file.
6087 if (S_ISREG(inode->vfs_inode.i_mode) &&
6088 inode->generation <= last_committed &&
6089 inode->last_unlink_trans <= last_committed) {
6094 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
6095 log_dentries = true;
6098 * On unlink we must make sure all our current and old parent directory
6099 * inodes are fully logged. This is to prevent leaving dangling
6100 * directory index entries in directories that were our parents but are
6101 * not anymore. Not doing this results in old parent directory being
6102 * impossible to delete after log replay (rmdir will always fail with
6103 * error -ENOTEMPTY).
6109 * ln testdir/foo testdir/bar
6111 * unlink testdir/bar
6112 * xfs_io -c fsync testdir/foo
6114 * mount fs, triggers log replay
6116 * If we don't log the parent directory (testdir), after log replay the
6117 * directory still has an entry pointing to the file inode using the bar
6118 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6119 * the file inode has a link count of 1.
6125 * ln foo testdir/foo2
6126 * ln foo testdir/foo3
6128 * unlink testdir/foo3
6129 * xfs_io -c fsync foo
6131 * mount fs, triggers log replay
6133 * Similar as the first example, after log replay the parent directory
6134 * testdir still has an entry pointing to the inode file with name foo3
6135 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6136 * and has a link count of 2.
6138 if (inode->last_unlink_trans > last_committed) {
6139 ret = btrfs_log_all_parents(trans, inode, ctx);
6144 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6149 ret = log_new_dir_dentries(trans, root, inode, ctx);
6154 btrfs_set_log_full_commit(trans);
6159 btrfs_remove_log_ctx(root, ctx);
6160 btrfs_end_log_trans(root);
6166 * it is not safe to log dentry if the chunk root has added new
6167 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6168 * If this returns 1, you must commit the transaction to safely get your
6171 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6172 struct dentry *dentry,
6173 struct btrfs_log_ctx *ctx)
6175 struct dentry *parent = dget_parent(dentry);
6178 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6179 LOG_INODE_ALL, ctx);
6186 * should be called during mount to recover any replay any log trees
6189 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6192 struct btrfs_path *path;
6193 struct btrfs_trans_handle *trans;
6194 struct btrfs_key key;
6195 struct btrfs_key found_key;
6196 struct btrfs_root *log;
6197 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6198 struct walk_control wc = {
6199 .process_func = process_one_buffer,
6200 .stage = LOG_WALK_PIN_ONLY,
6203 path = btrfs_alloc_path();
6207 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6209 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6210 if (IS_ERR(trans)) {
6211 ret = PTR_ERR(trans);
6218 ret = walk_log_tree(trans, log_root_tree, &wc);
6220 btrfs_handle_fs_error(fs_info, ret,
6221 "Failed to pin buffers while recovering log root tree.");
6226 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6227 key.offset = (u64)-1;
6228 key.type = BTRFS_ROOT_ITEM_KEY;
6231 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6234 btrfs_handle_fs_error(fs_info, ret,
6235 "Couldn't find tree log root.");
6239 if (path->slots[0] == 0)
6243 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6245 btrfs_release_path(path);
6246 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6249 log = btrfs_read_tree_root(log_root_tree, &found_key);
6252 btrfs_handle_fs_error(fs_info, ret,
6253 "Couldn't read tree log root.");
6257 wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
6259 if (IS_ERR(wc.replay_dest)) {
6260 ret = PTR_ERR(wc.replay_dest);
6263 * We didn't find the subvol, likely because it was
6264 * deleted. This is ok, simply skip this log and go to
6267 * We need to exclude the root because we can't have
6268 * other log replays overwriting this log as we'll read
6269 * it back in a few more times. This will keep our
6270 * block from being modified, and we'll just bail for
6271 * each subsequent pass.
6274 ret = btrfs_pin_extent_for_log_replay(trans,
6277 btrfs_put_root(log);
6281 btrfs_handle_fs_error(fs_info, ret,
6282 "Couldn't read target root for tree log recovery.");
6286 wc.replay_dest->log_root = log;
6287 btrfs_record_root_in_trans(trans, wc.replay_dest);
6288 ret = walk_log_tree(trans, log, &wc);
6290 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6291 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6295 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6296 struct btrfs_root *root = wc.replay_dest;
6298 btrfs_release_path(path);
6301 * We have just replayed everything, and the highest
6302 * objectid of fs roots probably has changed in case
6303 * some inode_item's got replayed.
6305 * root->objectid_mutex is not acquired as log replay
6306 * could only happen during mount.
6308 ret = btrfs_find_highest_objectid(root,
6309 &root->highest_objectid);
6312 wc.replay_dest->log_root = NULL;
6313 btrfs_put_root(wc.replay_dest);
6314 btrfs_put_root(log);
6319 if (found_key.offset == 0)
6321 key.offset = found_key.offset - 1;
6323 btrfs_release_path(path);
6325 /* step one is to pin it all, step two is to replay just inodes */
6328 wc.process_func = replay_one_buffer;
6329 wc.stage = LOG_WALK_REPLAY_INODES;
6332 /* step three is to replay everything */
6333 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6338 btrfs_free_path(path);
6340 /* step 4: commit the transaction, which also unpins the blocks */
6341 ret = btrfs_commit_transaction(trans);
6345 log_root_tree->log_root = NULL;
6346 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6347 btrfs_put_root(log_root_tree);
6352 btrfs_end_transaction(wc.trans);
6353 btrfs_free_path(path);
6358 * there are some corner cases where we want to force a full
6359 * commit instead of allowing a directory to be logged.
6361 * They revolve around files there were unlinked from the directory, and
6362 * this function updates the parent directory so that a full commit is
6363 * properly done if it is fsync'd later after the unlinks are done.
6365 * Must be called before the unlink operations (updates to the subvolume tree,
6366 * inodes, etc) are done.
6368 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6369 struct btrfs_inode *dir, struct btrfs_inode *inode,
6373 * when we're logging a file, if it hasn't been renamed
6374 * or unlinked, and its inode is fully committed on disk,
6375 * we don't have to worry about walking up the directory chain
6376 * to log its parents.
6378 * So, we use the last_unlink_trans field to put this transid
6379 * into the file. When the file is logged we check it and
6380 * don't log the parents if the file is fully on disk.
6382 mutex_lock(&inode->log_mutex);
6383 inode->last_unlink_trans = trans->transid;
6384 mutex_unlock(&inode->log_mutex);
6387 * if this directory was already logged any new
6388 * names for this file/dir will get recorded
6390 if (dir->logged_trans == trans->transid)
6394 * if the inode we're about to unlink was logged,
6395 * the log will be properly updated for any new names
6397 if (inode->logged_trans == trans->transid)
6401 * when renaming files across directories, if the directory
6402 * there we're unlinking from gets fsync'd later on, there's
6403 * no way to find the destination directory later and fsync it
6404 * properly. So, we have to be conservative and force commits
6405 * so the new name gets discovered.
6410 /* we can safely do the unlink without any special recording */
6414 mutex_lock(&dir->log_mutex);
6415 dir->last_unlink_trans = trans->transid;
6416 mutex_unlock(&dir->log_mutex);
6420 * Make sure that if someone attempts to fsync the parent directory of a deleted
6421 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6422 * that after replaying the log tree of the parent directory's root we will not
6423 * see the snapshot anymore and at log replay time we will not see any log tree
6424 * corresponding to the deleted snapshot's root, which could lead to replaying
6425 * it after replaying the log tree of the parent directory (which would replay
6426 * the snapshot delete operation).
6428 * Must be called before the actual snapshot destroy operation (updates to the
6429 * parent root and tree of tree roots trees, etc) are done.
6431 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6432 struct btrfs_inode *dir)
6434 mutex_lock(&dir->log_mutex);
6435 dir->last_unlink_trans = trans->transid;
6436 mutex_unlock(&dir->log_mutex);
6440 * Call this after adding a new name for a file and it will properly
6441 * update the log to reflect the new name.
6443 void btrfs_log_new_name(struct btrfs_trans_handle *trans,
6444 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6445 struct dentry *parent)
6447 struct btrfs_fs_info *fs_info = trans->fs_info;
6448 struct btrfs_log_ctx ctx;
6451 * this will force the logging code to walk the dentry chain
6454 if (!S_ISDIR(inode->vfs_inode.i_mode))
6455 inode->last_unlink_trans = trans->transid;
6458 * if this inode hasn't been logged and directory we're renaming it
6459 * from hasn't been logged, we don't need to log it
6461 if (inode->logged_trans <= fs_info->last_trans_committed &&
6462 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6465 btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
6466 ctx.logging_new_name = true;
6468 * We don't care about the return value. If we fail to log the new name
6469 * then we know the next attempt to sync the log will fallback to a full
6470 * transaction commit (due to a call to btrfs_set_log_full_commit()), so
6471 * we don't need to worry about getting a log committed that has an
6472 * inconsistent state after a rename operation.
6474 btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);