Merge tag 'pstore-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[linux-2.6-microblaze.git] / fs / btrfs / backref.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5
6 #include <linux/mm.h>
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "backref.h"
12 #include "ulist.h"
13 #include "transaction.h"
14 #include "delayed-ref.h"
15 #include "locking.h"
16 #include "misc.h"
17
18 /* Just an arbitrary number so we can be sure this happened */
19 #define BACKREF_FOUND_SHARED 6
20
21 struct extent_inode_elem {
22         u64 inum;
23         u64 offset;
24         struct extent_inode_elem *next;
25 };
26
27 static int check_extent_in_eb(const struct btrfs_key *key,
28                               const struct extent_buffer *eb,
29                               const struct btrfs_file_extent_item *fi,
30                               u64 extent_item_pos,
31                               struct extent_inode_elem **eie,
32                               bool ignore_offset)
33 {
34         u64 offset = 0;
35         struct extent_inode_elem *e;
36
37         if (!ignore_offset &&
38             !btrfs_file_extent_compression(eb, fi) &&
39             !btrfs_file_extent_encryption(eb, fi) &&
40             !btrfs_file_extent_other_encoding(eb, fi)) {
41                 u64 data_offset;
42                 u64 data_len;
43
44                 data_offset = btrfs_file_extent_offset(eb, fi);
45                 data_len = btrfs_file_extent_num_bytes(eb, fi);
46
47                 if (extent_item_pos < data_offset ||
48                     extent_item_pos >= data_offset + data_len)
49                         return 1;
50                 offset = extent_item_pos - data_offset;
51         }
52
53         e = kmalloc(sizeof(*e), GFP_NOFS);
54         if (!e)
55                 return -ENOMEM;
56
57         e->next = *eie;
58         e->inum = key->objectid;
59         e->offset = key->offset + offset;
60         *eie = e;
61
62         return 0;
63 }
64
65 static void free_inode_elem_list(struct extent_inode_elem *eie)
66 {
67         struct extent_inode_elem *eie_next;
68
69         for (; eie; eie = eie_next) {
70                 eie_next = eie->next;
71                 kfree(eie);
72         }
73 }
74
75 static int find_extent_in_eb(const struct extent_buffer *eb,
76                              u64 wanted_disk_byte, u64 extent_item_pos,
77                              struct extent_inode_elem **eie,
78                              bool ignore_offset)
79 {
80         u64 disk_byte;
81         struct btrfs_key key;
82         struct btrfs_file_extent_item *fi;
83         int slot;
84         int nritems;
85         int extent_type;
86         int ret;
87
88         /*
89          * from the shared data ref, we only have the leaf but we need
90          * the key. thus, we must look into all items and see that we
91          * find one (some) with a reference to our extent item.
92          */
93         nritems = btrfs_header_nritems(eb);
94         for (slot = 0; slot < nritems; ++slot) {
95                 btrfs_item_key_to_cpu(eb, &key, slot);
96                 if (key.type != BTRFS_EXTENT_DATA_KEY)
97                         continue;
98                 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
99                 extent_type = btrfs_file_extent_type(eb, fi);
100                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
101                         continue;
102                 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
103                 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
104                 if (disk_byte != wanted_disk_byte)
105                         continue;
106
107                 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
108                 if (ret < 0)
109                         return ret;
110         }
111
112         return 0;
113 }
114
115 struct preftree {
116         struct rb_root_cached root;
117         unsigned int count;
118 };
119
120 #define PREFTREE_INIT   { .root = RB_ROOT_CACHED, .count = 0 }
121
122 struct preftrees {
123         struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
124         struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
125         struct preftree indirect_missing_keys;
126 };
127
128 /*
129  * Checks for a shared extent during backref search.
130  *
131  * The share_count tracks prelim_refs (direct and indirect) having a
132  * ref->count >0:
133  *  - incremented when a ref->count transitions to >0
134  *  - decremented when a ref->count transitions to <1
135  */
136 struct share_check {
137         u64 root_objectid;
138         u64 inum;
139         int share_count;
140 };
141
142 static inline int extent_is_shared(struct share_check *sc)
143 {
144         return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
145 }
146
147 static struct kmem_cache *btrfs_prelim_ref_cache;
148
149 int __init btrfs_prelim_ref_init(void)
150 {
151         btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
152                                         sizeof(struct prelim_ref),
153                                         0,
154                                         SLAB_MEM_SPREAD,
155                                         NULL);
156         if (!btrfs_prelim_ref_cache)
157                 return -ENOMEM;
158         return 0;
159 }
160
161 void __cold btrfs_prelim_ref_exit(void)
162 {
163         kmem_cache_destroy(btrfs_prelim_ref_cache);
164 }
165
166 static void free_pref(struct prelim_ref *ref)
167 {
168         kmem_cache_free(btrfs_prelim_ref_cache, ref);
169 }
170
171 /*
172  * Return 0 when both refs are for the same block (and can be merged).
173  * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
174  * indicates a 'higher' block.
175  */
176 static int prelim_ref_compare(struct prelim_ref *ref1,
177                               struct prelim_ref *ref2)
178 {
179         if (ref1->level < ref2->level)
180                 return -1;
181         if (ref1->level > ref2->level)
182                 return 1;
183         if (ref1->root_id < ref2->root_id)
184                 return -1;
185         if (ref1->root_id > ref2->root_id)
186                 return 1;
187         if (ref1->key_for_search.type < ref2->key_for_search.type)
188                 return -1;
189         if (ref1->key_for_search.type > ref2->key_for_search.type)
190                 return 1;
191         if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
192                 return -1;
193         if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
194                 return 1;
195         if (ref1->key_for_search.offset < ref2->key_for_search.offset)
196                 return -1;
197         if (ref1->key_for_search.offset > ref2->key_for_search.offset)
198                 return 1;
199         if (ref1->parent < ref2->parent)
200                 return -1;
201         if (ref1->parent > ref2->parent)
202                 return 1;
203
204         return 0;
205 }
206
207 static void update_share_count(struct share_check *sc, int oldcount,
208                                int newcount)
209 {
210         if ((!sc) || (oldcount == 0 && newcount < 1))
211                 return;
212
213         if (oldcount > 0 && newcount < 1)
214                 sc->share_count--;
215         else if (oldcount < 1 && newcount > 0)
216                 sc->share_count++;
217 }
218
219 /*
220  * Add @newref to the @root rbtree, merging identical refs.
221  *
222  * Callers should assume that newref has been freed after calling.
223  */
224 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
225                               struct preftree *preftree,
226                               struct prelim_ref *newref,
227                               struct share_check *sc)
228 {
229         struct rb_root_cached *root;
230         struct rb_node **p;
231         struct rb_node *parent = NULL;
232         struct prelim_ref *ref;
233         int result;
234         bool leftmost = true;
235
236         root = &preftree->root;
237         p = &root->rb_root.rb_node;
238
239         while (*p) {
240                 parent = *p;
241                 ref = rb_entry(parent, struct prelim_ref, rbnode);
242                 result = prelim_ref_compare(ref, newref);
243                 if (result < 0) {
244                         p = &(*p)->rb_left;
245                 } else if (result > 0) {
246                         p = &(*p)->rb_right;
247                         leftmost = false;
248                 } else {
249                         /* Identical refs, merge them and free @newref */
250                         struct extent_inode_elem *eie = ref->inode_list;
251
252                         while (eie && eie->next)
253                                 eie = eie->next;
254
255                         if (!eie)
256                                 ref->inode_list = newref->inode_list;
257                         else
258                                 eie->next = newref->inode_list;
259                         trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
260                                                      preftree->count);
261                         /*
262                          * A delayed ref can have newref->count < 0.
263                          * The ref->count is updated to follow any
264                          * BTRFS_[ADD|DROP]_DELAYED_REF actions.
265                          */
266                         update_share_count(sc, ref->count,
267                                            ref->count + newref->count);
268                         ref->count += newref->count;
269                         free_pref(newref);
270                         return;
271                 }
272         }
273
274         update_share_count(sc, 0, newref->count);
275         preftree->count++;
276         trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
277         rb_link_node(&newref->rbnode, parent, p);
278         rb_insert_color_cached(&newref->rbnode, root, leftmost);
279 }
280
281 /*
282  * Release the entire tree.  We don't care about internal consistency so
283  * just free everything and then reset the tree root.
284  */
285 static void prelim_release(struct preftree *preftree)
286 {
287         struct prelim_ref *ref, *next_ref;
288
289         rbtree_postorder_for_each_entry_safe(ref, next_ref,
290                                              &preftree->root.rb_root, rbnode)
291                 free_pref(ref);
292
293         preftree->root = RB_ROOT_CACHED;
294         preftree->count = 0;
295 }
296
297 /*
298  * the rules for all callers of this function are:
299  * - obtaining the parent is the goal
300  * - if you add a key, you must know that it is a correct key
301  * - if you cannot add the parent or a correct key, then we will look into the
302  *   block later to set a correct key
303  *
304  * delayed refs
305  * ============
306  *        backref type | shared | indirect | shared | indirect
307  * information         |   tree |     tree |   data |     data
308  * --------------------+--------+----------+--------+----------
309  *      parent logical |    y   |     -    |    -   |     -
310  *      key to resolve |    -   |     y    |    y   |     y
311  *  tree block logical |    -   |     -    |    -   |     -
312  *  root for resolving |    y   |     y    |    y   |     y
313  *
314  * - column 1:       we've the parent -> done
315  * - column 2, 3, 4: we use the key to find the parent
316  *
317  * on disk refs (inline or keyed)
318  * ==============================
319  *        backref type | shared | indirect | shared | indirect
320  * information         |   tree |     tree |   data |     data
321  * --------------------+--------+----------+--------+----------
322  *      parent logical |    y   |     -    |    y   |     -
323  *      key to resolve |    -   |     -    |    -   |     y
324  *  tree block logical |    y   |     y    |    y   |     y
325  *  root for resolving |    -   |     y    |    y   |     y
326  *
327  * - column 1, 3: we've the parent -> done
328  * - column 2:    we take the first key from the block to find the parent
329  *                (see add_missing_keys)
330  * - column 4:    we use the key to find the parent
331  *
332  * additional information that's available but not required to find the parent
333  * block might help in merging entries to gain some speed.
334  */
335 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
336                           struct preftree *preftree, u64 root_id,
337                           const struct btrfs_key *key, int level, u64 parent,
338                           u64 wanted_disk_byte, int count,
339                           struct share_check *sc, gfp_t gfp_mask)
340 {
341         struct prelim_ref *ref;
342
343         if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
344                 return 0;
345
346         ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
347         if (!ref)
348                 return -ENOMEM;
349
350         ref->root_id = root_id;
351         if (key)
352                 ref->key_for_search = *key;
353         else
354                 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
355
356         ref->inode_list = NULL;
357         ref->level = level;
358         ref->count = count;
359         ref->parent = parent;
360         ref->wanted_disk_byte = wanted_disk_byte;
361         prelim_ref_insert(fs_info, preftree, ref, sc);
362         return extent_is_shared(sc);
363 }
364
365 /* direct refs use root == 0, key == NULL */
366 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
367                           struct preftrees *preftrees, int level, u64 parent,
368                           u64 wanted_disk_byte, int count,
369                           struct share_check *sc, gfp_t gfp_mask)
370 {
371         return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
372                               parent, wanted_disk_byte, count, sc, gfp_mask);
373 }
374
375 /* indirect refs use parent == 0 */
376 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
377                             struct preftrees *preftrees, u64 root_id,
378                             const struct btrfs_key *key, int level,
379                             u64 wanted_disk_byte, int count,
380                             struct share_check *sc, gfp_t gfp_mask)
381 {
382         struct preftree *tree = &preftrees->indirect;
383
384         if (!key)
385                 tree = &preftrees->indirect_missing_keys;
386         return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
387                               wanted_disk_byte, count, sc, gfp_mask);
388 }
389
390 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
391 {
392         struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
393         struct rb_node *parent = NULL;
394         struct prelim_ref *ref = NULL;
395         struct prelim_ref target = {};
396         int result;
397
398         target.parent = bytenr;
399
400         while (*p) {
401                 parent = *p;
402                 ref = rb_entry(parent, struct prelim_ref, rbnode);
403                 result = prelim_ref_compare(ref, &target);
404
405                 if (result < 0)
406                         p = &(*p)->rb_left;
407                 else if (result > 0)
408                         p = &(*p)->rb_right;
409                 else
410                         return 1;
411         }
412         return 0;
413 }
414
415 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
416                            struct ulist *parents,
417                            struct preftrees *preftrees, struct prelim_ref *ref,
418                            int level, u64 time_seq, const u64 *extent_item_pos,
419                            bool ignore_offset)
420 {
421         int ret = 0;
422         int slot;
423         struct extent_buffer *eb;
424         struct btrfs_key key;
425         struct btrfs_key *key_for_search = &ref->key_for_search;
426         struct btrfs_file_extent_item *fi;
427         struct extent_inode_elem *eie = NULL, *old = NULL;
428         u64 disk_byte;
429         u64 wanted_disk_byte = ref->wanted_disk_byte;
430         u64 count = 0;
431         u64 data_offset;
432
433         if (level != 0) {
434                 eb = path->nodes[level];
435                 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
436                 if (ret < 0)
437                         return ret;
438                 return 0;
439         }
440
441         /*
442          * 1. We normally enter this function with the path already pointing to
443          *    the first item to check. But sometimes, we may enter it with
444          *    slot == nritems.
445          * 2. We are searching for normal backref but bytenr of this leaf
446          *    matches shared data backref
447          * 3. The leaf owner is not equal to the root we are searching
448          *
449          * For these cases, go to the next leaf before we continue.
450          */
451         eb = path->nodes[0];
452         if (path->slots[0] >= btrfs_header_nritems(eb) ||
453             is_shared_data_backref(preftrees, eb->start) ||
454             ref->root_id != btrfs_header_owner(eb)) {
455                 if (time_seq == SEQ_LAST)
456                         ret = btrfs_next_leaf(root, path);
457                 else
458                         ret = btrfs_next_old_leaf(root, path, time_seq);
459         }
460
461         while (!ret && count < ref->count) {
462                 eb = path->nodes[0];
463                 slot = path->slots[0];
464
465                 btrfs_item_key_to_cpu(eb, &key, slot);
466
467                 if (key.objectid != key_for_search->objectid ||
468                     key.type != BTRFS_EXTENT_DATA_KEY)
469                         break;
470
471                 /*
472                  * We are searching for normal backref but bytenr of this leaf
473                  * matches shared data backref, OR
474                  * the leaf owner is not equal to the root we are searching for
475                  */
476                 if (slot == 0 &&
477                     (is_shared_data_backref(preftrees, eb->start) ||
478                      ref->root_id != btrfs_header_owner(eb))) {
479                         if (time_seq == SEQ_LAST)
480                                 ret = btrfs_next_leaf(root, path);
481                         else
482                                 ret = btrfs_next_old_leaf(root, path, time_seq);
483                         continue;
484                 }
485                 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
486                 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
487                 data_offset = btrfs_file_extent_offset(eb, fi);
488
489                 if (disk_byte == wanted_disk_byte) {
490                         eie = NULL;
491                         old = NULL;
492                         if (ref->key_for_search.offset == key.offset - data_offset)
493                                 count++;
494                         else
495                                 goto next;
496                         if (extent_item_pos) {
497                                 ret = check_extent_in_eb(&key, eb, fi,
498                                                 *extent_item_pos,
499                                                 &eie, ignore_offset);
500                                 if (ret < 0)
501                                         break;
502                         }
503                         if (ret > 0)
504                                 goto next;
505                         ret = ulist_add_merge_ptr(parents, eb->start,
506                                                   eie, (void **)&old, GFP_NOFS);
507                         if (ret < 0)
508                                 break;
509                         if (!ret && extent_item_pos) {
510                                 while (old->next)
511                                         old = old->next;
512                                 old->next = eie;
513                         }
514                         eie = NULL;
515                 }
516 next:
517                 if (time_seq == SEQ_LAST)
518                         ret = btrfs_next_item(root, path);
519                 else
520                         ret = btrfs_next_old_item(root, path, time_seq);
521         }
522
523         if (ret > 0)
524                 ret = 0;
525         else if (ret < 0)
526                 free_inode_elem_list(eie);
527         return ret;
528 }
529
530 /*
531  * resolve an indirect backref in the form (root_id, key, level)
532  * to a logical address
533  */
534 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
535                                 struct btrfs_path *path, u64 time_seq,
536                                 struct preftrees *preftrees,
537                                 struct prelim_ref *ref, struct ulist *parents,
538                                 const u64 *extent_item_pos, bool ignore_offset)
539 {
540         struct btrfs_root *root;
541         struct extent_buffer *eb;
542         int ret = 0;
543         int root_level;
544         int level = ref->level;
545         struct btrfs_key search_key = ref->key_for_search;
546
547         /*
548          * If we're search_commit_root we could possibly be holding locks on
549          * other tree nodes.  This happens when qgroups does backref walks when
550          * adding new delayed refs.  To deal with this we need to look in cache
551          * for the root, and if we don't find it then we need to search the
552          * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
553          * here.
554          */
555         if (path->search_commit_root)
556                 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
557         else
558                 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
559         if (IS_ERR(root)) {
560                 ret = PTR_ERR(root);
561                 goto out_free;
562         }
563
564         if (!path->search_commit_root &&
565             test_bit(BTRFS_ROOT_DELETING, &root->state)) {
566                 ret = -ENOENT;
567                 goto out;
568         }
569
570         if (btrfs_is_testing(fs_info)) {
571                 ret = -ENOENT;
572                 goto out;
573         }
574
575         if (path->search_commit_root)
576                 root_level = btrfs_header_level(root->commit_root);
577         else if (time_seq == SEQ_LAST)
578                 root_level = btrfs_header_level(root->node);
579         else
580                 root_level = btrfs_old_root_level(root, time_seq);
581
582         if (root_level + 1 == level)
583                 goto out;
584
585         /*
586          * We can often find data backrefs with an offset that is too large
587          * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
588          * subtracting a file's offset with the data offset of its
589          * corresponding extent data item. This can happen for example in the
590          * clone ioctl.
591          *
592          * So if we detect such case we set the search key's offset to zero to
593          * make sure we will find the matching file extent item at
594          * add_all_parents(), otherwise we will miss it because the offset
595          * taken form the backref is much larger then the offset of the file
596          * extent item. This can make us scan a very large number of file
597          * extent items, but at least it will not make us miss any.
598          *
599          * This is an ugly workaround for a behaviour that should have never
600          * existed, but it does and a fix for the clone ioctl would touch a lot
601          * of places, cause backwards incompatibility and would not fix the
602          * problem for extents cloned with older kernels.
603          */
604         if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
605             search_key.offset >= LLONG_MAX)
606                 search_key.offset = 0;
607         path->lowest_level = level;
608         if (time_seq == SEQ_LAST)
609                 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
610         else
611                 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
612
613         btrfs_debug(fs_info,
614                 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
615                  ref->root_id, level, ref->count, ret,
616                  ref->key_for_search.objectid, ref->key_for_search.type,
617                  ref->key_for_search.offset);
618         if (ret < 0)
619                 goto out;
620
621         eb = path->nodes[level];
622         while (!eb) {
623                 if (WARN_ON(!level)) {
624                         ret = 1;
625                         goto out;
626                 }
627                 level--;
628                 eb = path->nodes[level];
629         }
630
631         ret = add_all_parents(root, path, parents, preftrees, ref, level,
632                               time_seq, extent_item_pos, ignore_offset);
633 out:
634         btrfs_put_root(root);
635 out_free:
636         path->lowest_level = 0;
637         btrfs_release_path(path);
638         return ret;
639 }
640
641 static struct extent_inode_elem *
642 unode_aux_to_inode_list(struct ulist_node *node)
643 {
644         if (!node)
645                 return NULL;
646         return (struct extent_inode_elem *)(uintptr_t)node->aux;
647 }
648
649 /*
650  * We maintain three separate rbtrees: one for direct refs, one for
651  * indirect refs which have a key, and one for indirect refs which do not
652  * have a key. Each tree does merge on insertion.
653  *
654  * Once all of the references are located, we iterate over the tree of
655  * indirect refs with missing keys. An appropriate key is located and
656  * the ref is moved onto the tree for indirect refs. After all missing
657  * keys are thus located, we iterate over the indirect ref tree, resolve
658  * each reference, and then insert the resolved reference onto the
659  * direct tree (merging there too).
660  *
661  * New backrefs (i.e., for parent nodes) are added to the appropriate
662  * rbtree as they are encountered. The new backrefs are subsequently
663  * resolved as above.
664  */
665 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
666                                  struct btrfs_path *path, u64 time_seq,
667                                  struct preftrees *preftrees,
668                                  const u64 *extent_item_pos,
669                                  struct share_check *sc, bool ignore_offset)
670 {
671         int err;
672         int ret = 0;
673         struct ulist *parents;
674         struct ulist_node *node;
675         struct ulist_iterator uiter;
676         struct rb_node *rnode;
677
678         parents = ulist_alloc(GFP_NOFS);
679         if (!parents)
680                 return -ENOMEM;
681
682         /*
683          * We could trade memory usage for performance here by iterating
684          * the tree, allocating new refs for each insertion, and then
685          * freeing the entire indirect tree when we're done.  In some test
686          * cases, the tree can grow quite large (~200k objects).
687          */
688         while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
689                 struct prelim_ref *ref;
690
691                 ref = rb_entry(rnode, struct prelim_ref, rbnode);
692                 if (WARN(ref->parent,
693                          "BUG: direct ref found in indirect tree")) {
694                         ret = -EINVAL;
695                         goto out;
696                 }
697
698                 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
699                 preftrees->indirect.count--;
700
701                 if (ref->count == 0) {
702                         free_pref(ref);
703                         continue;
704                 }
705
706                 if (sc && sc->root_objectid &&
707                     ref->root_id != sc->root_objectid) {
708                         free_pref(ref);
709                         ret = BACKREF_FOUND_SHARED;
710                         goto out;
711                 }
712                 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
713                                            ref, parents, extent_item_pos,
714                                            ignore_offset);
715                 /*
716                  * we can only tolerate ENOENT,otherwise,we should catch error
717                  * and return directly.
718                  */
719                 if (err == -ENOENT) {
720                         prelim_ref_insert(fs_info, &preftrees->direct, ref,
721                                           NULL);
722                         continue;
723                 } else if (err) {
724                         free_pref(ref);
725                         ret = err;
726                         goto out;
727                 }
728
729                 /* we put the first parent into the ref at hand */
730                 ULIST_ITER_INIT(&uiter);
731                 node = ulist_next(parents, &uiter);
732                 ref->parent = node ? node->val : 0;
733                 ref->inode_list = unode_aux_to_inode_list(node);
734
735                 /* Add a prelim_ref(s) for any other parent(s). */
736                 while ((node = ulist_next(parents, &uiter))) {
737                         struct prelim_ref *new_ref;
738
739                         new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
740                                                    GFP_NOFS);
741                         if (!new_ref) {
742                                 free_pref(ref);
743                                 ret = -ENOMEM;
744                                 goto out;
745                         }
746                         memcpy(new_ref, ref, sizeof(*ref));
747                         new_ref->parent = node->val;
748                         new_ref->inode_list = unode_aux_to_inode_list(node);
749                         prelim_ref_insert(fs_info, &preftrees->direct,
750                                           new_ref, NULL);
751                 }
752
753                 /*
754                  * Now it's a direct ref, put it in the direct tree. We must
755                  * do this last because the ref could be merged/freed here.
756                  */
757                 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
758
759                 ulist_reinit(parents);
760                 cond_resched();
761         }
762 out:
763         ulist_free(parents);
764         return ret;
765 }
766
767 /*
768  * read tree blocks and add keys where required.
769  */
770 static int add_missing_keys(struct btrfs_fs_info *fs_info,
771                             struct preftrees *preftrees, bool lock)
772 {
773         struct prelim_ref *ref;
774         struct extent_buffer *eb;
775         struct preftree *tree = &preftrees->indirect_missing_keys;
776         struct rb_node *node;
777
778         while ((node = rb_first_cached(&tree->root))) {
779                 ref = rb_entry(node, struct prelim_ref, rbnode);
780                 rb_erase_cached(node, &tree->root);
781
782                 BUG_ON(ref->parent);    /* should not be a direct ref */
783                 BUG_ON(ref->key_for_search.type);
784                 BUG_ON(!ref->wanted_disk_byte);
785
786                 eb = read_tree_block(fs_info, ref->wanted_disk_byte,
787                                      ref->root_id, 0, ref->level - 1, NULL);
788                 if (IS_ERR(eb)) {
789                         free_pref(ref);
790                         return PTR_ERR(eb);
791                 } else if (!extent_buffer_uptodate(eb)) {
792                         free_pref(ref);
793                         free_extent_buffer(eb);
794                         return -EIO;
795                 }
796                 if (lock)
797                         btrfs_tree_read_lock(eb);
798                 if (btrfs_header_level(eb) == 0)
799                         btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
800                 else
801                         btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
802                 if (lock)
803                         btrfs_tree_read_unlock(eb);
804                 free_extent_buffer(eb);
805                 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
806                 cond_resched();
807         }
808         return 0;
809 }
810
811 /*
812  * add all currently queued delayed refs from this head whose seq nr is
813  * smaller or equal that seq to the list
814  */
815 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
816                             struct btrfs_delayed_ref_head *head, u64 seq,
817                             struct preftrees *preftrees, struct share_check *sc)
818 {
819         struct btrfs_delayed_ref_node *node;
820         struct btrfs_delayed_extent_op *extent_op = head->extent_op;
821         struct btrfs_key key;
822         struct btrfs_key tmp_op_key;
823         struct rb_node *n;
824         int count;
825         int ret = 0;
826
827         if (extent_op && extent_op->update_key)
828                 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
829
830         spin_lock(&head->lock);
831         for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
832                 node = rb_entry(n, struct btrfs_delayed_ref_node,
833                                 ref_node);
834                 if (node->seq > seq)
835                         continue;
836
837                 switch (node->action) {
838                 case BTRFS_ADD_DELAYED_EXTENT:
839                 case BTRFS_UPDATE_DELAYED_HEAD:
840                         WARN_ON(1);
841                         continue;
842                 case BTRFS_ADD_DELAYED_REF:
843                         count = node->ref_mod;
844                         break;
845                 case BTRFS_DROP_DELAYED_REF:
846                         count = node->ref_mod * -1;
847                         break;
848                 default:
849                         BUG();
850                 }
851                 switch (node->type) {
852                 case BTRFS_TREE_BLOCK_REF_KEY: {
853                         /* NORMAL INDIRECT METADATA backref */
854                         struct btrfs_delayed_tree_ref *ref;
855
856                         ref = btrfs_delayed_node_to_tree_ref(node);
857                         ret = add_indirect_ref(fs_info, preftrees, ref->root,
858                                                &tmp_op_key, ref->level + 1,
859                                                node->bytenr, count, sc,
860                                                GFP_ATOMIC);
861                         break;
862                 }
863                 case BTRFS_SHARED_BLOCK_REF_KEY: {
864                         /* SHARED DIRECT METADATA backref */
865                         struct btrfs_delayed_tree_ref *ref;
866
867                         ref = btrfs_delayed_node_to_tree_ref(node);
868
869                         ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
870                                              ref->parent, node->bytenr, count,
871                                              sc, GFP_ATOMIC);
872                         break;
873                 }
874                 case BTRFS_EXTENT_DATA_REF_KEY: {
875                         /* NORMAL INDIRECT DATA backref */
876                         struct btrfs_delayed_data_ref *ref;
877                         ref = btrfs_delayed_node_to_data_ref(node);
878
879                         key.objectid = ref->objectid;
880                         key.type = BTRFS_EXTENT_DATA_KEY;
881                         key.offset = ref->offset;
882
883                         /*
884                          * Found a inum that doesn't match our known inum, we
885                          * know it's shared.
886                          */
887                         if (sc && sc->inum && ref->objectid != sc->inum) {
888                                 ret = BACKREF_FOUND_SHARED;
889                                 goto out;
890                         }
891
892                         ret = add_indirect_ref(fs_info, preftrees, ref->root,
893                                                &key, 0, node->bytenr, count, sc,
894                                                GFP_ATOMIC);
895                         break;
896                 }
897                 case BTRFS_SHARED_DATA_REF_KEY: {
898                         /* SHARED DIRECT FULL backref */
899                         struct btrfs_delayed_data_ref *ref;
900
901                         ref = btrfs_delayed_node_to_data_ref(node);
902
903                         ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
904                                              node->bytenr, count, sc,
905                                              GFP_ATOMIC);
906                         break;
907                 }
908                 default:
909                         WARN_ON(1);
910                 }
911                 /*
912                  * We must ignore BACKREF_FOUND_SHARED until all delayed
913                  * refs have been checked.
914                  */
915                 if (ret && (ret != BACKREF_FOUND_SHARED))
916                         break;
917         }
918         if (!ret)
919                 ret = extent_is_shared(sc);
920 out:
921         spin_unlock(&head->lock);
922         return ret;
923 }
924
925 /*
926  * add all inline backrefs for bytenr to the list
927  *
928  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
929  */
930 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
931                            struct btrfs_path *path, u64 bytenr,
932                            int *info_level, struct preftrees *preftrees,
933                            struct share_check *sc)
934 {
935         int ret = 0;
936         int slot;
937         struct extent_buffer *leaf;
938         struct btrfs_key key;
939         struct btrfs_key found_key;
940         unsigned long ptr;
941         unsigned long end;
942         struct btrfs_extent_item *ei;
943         u64 flags;
944         u64 item_size;
945
946         /*
947          * enumerate all inline refs
948          */
949         leaf = path->nodes[0];
950         slot = path->slots[0];
951
952         item_size = btrfs_item_size_nr(leaf, slot);
953         BUG_ON(item_size < sizeof(*ei));
954
955         ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
956         flags = btrfs_extent_flags(leaf, ei);
957         btrfs_item_key_to_cpu(leaf, &found_key, slot);
958
959         ptr = (unsigned long)(ei + 1);
960         end = (unsigned long)ei + item_size;
961
962         if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
963             flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
964                 struct btrfs_tree_block_info *info;
965
966                 info = (struct btrfs_tree_block_info *)ptr;
967                 *info_level = btrfs_tree_block_level(leaf, info);
968                 ptr += sizeof(struct btrfs_tree_block_info);
969                 BUG_ON(ptr > end);
970         } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
971                 *info_level = found_key.offset;
972         } else {
973                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
974         }
975
976         while (ptr < end) {
977                 struct btrfs_extent_inline_ref *iref;
978                 u64 offset;
979                 int type;
980
981                 iref = (struct btrfs_extent_inline_ref *)ptr;
982                 type = btrfs_get_extent_inline_ref_type(leaf, iref,
983                                                         BTRFS_REF_TYPE_ANY);
984                 if (type == BTRFS_REF_TYPE_INVALID)
985                         return -EUCLEAN;
986
987                 offset = btrfs_extent_inline_ref_offset(leaf, iref);
988
989                 switch (type) {
990                 case BTRFS_SHARED_BLOCK_REF_KEY:
991                         ret = add_direct_ref(fs_info, preftrees,
992                                              *info_level + 1, offset,
993                                              bytenr, 1, NULL, GFP_NOFS);
994                         break;
995                 case BTRFS_SHARED_DATA_REF_KEY: {
996                         struct btrfs_shared_data_ref *sdref;
997                         int count;
998
999                         sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1000                         count = btrfs_shared_data_ref_count(leaf, sdref);
1001
1002                         ret = add_direct_ref(fs_info, preftrees, 0, offset,
1003                                              bytenr, count, sc, GFP_NOFS);
1004                         break;
1005                 }
1006                 case BTRFS_TREE_BLOCK_REF_KEY:
1007                         ret = add_indirect_ref(fs_info, preftrees, offset,
1008                                                NULL, *info_level + 1,
1009                                                bytenr, 1, NULL, GFP_NOFS);
1010                         break;
1011                 case BTRFS_EXTENT_DATA_REF_KEY: {
1012                         struct btrfs_extent_data_ref *dref;
1013                         int count;
1014                         u64 root;
1015
1016                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1017                         count = btrfs_extent_data_ref_count(leaf, dref);
1018                         key.objectid = btrfs_extent_data_ref_objectid(leaf,
1019                                                                       dref);
1020                         key.type = BTRFS_EXTENT_DATA_KEY;
1021                         key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1022
1023                         if (sc && sc->inum && key.objectid != sc->inum) {
1024                                 ret = BACKREF_FOUND_SHARED;
1025                                 break;
1026                         }
1027
1028                         root = btrfs_extent_data_ref_root(leaf, dref);
1029
1030                         ret = add_indirect_ref(fs_info, preftrees, root,
1031                                                &key, 0, bytenr, count,
1032                                                sc, GFP_NOFS);
1033                         break;
1034                 }
1035                 default:
1036                         WARN_ON(1);
1037                 }
1038                 if (ret)
1039                         return ret;
1040                 ptr += btrfs_extent_inline_ref_size(type);
1041         }
1042
1043         return 0;
1044 }
1045
1046 /*
1047  * add all non-inline backrefs for bytenr to the list
1048  *
1049  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1050  */
1051 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1052                           struct btrfs_path *path, u64 bytenr,
1053                           int info_level, struct preftrees *preftrees,
1054                           struct share_check *sc)
1055 {
1056         struct btrfs_root *extent_root = fs_info->extent_root;
1057         int ret;
1058         int slot;
1059         struct extent_buffer *leaf;
1060         struct btrfs_key key;
1061
1062         while (1) {
1063                 ret = btrfs_next_item(extent_root, path);
1064                 if (ret < 0)
1065                         break;
1066                 if (ret) {
1067                         ret = 0;
1068                         break;
1069                 }
1070
1071                 slot = path->slots[0];
1072                 leaf = path->nodes[0];
1073                 btrfs_item_key_to_cpu(leaf, &key, slot);
1074
1075                 if (key.objectid != bytenr)
1076                         break;
1077                 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1078                         continue;
1079                 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1080                         break;
1081
1082                 switch (key.type) {
1083                 case BTRFS_SHARED_BLOCK_REF_KEY:
1084                         /* SHARED DIRECT METADATA backref */
1085                         ret = add_direct_ref(fs_info, preftrees,
1086                                              info_level + 1, key.offset,
1087                                              bytenr, 1, NULL, GFP_NOFS);
1088                         break;
1089                 case BTRFS_SHARED_DATA_REF_KEY: {
1090                         /* SHARED DIRECT FULL backref */
1091                         struct btrfs_shared_data_ref *sdref;
1092                         int count;
1093
1094                         sdref = btrfs_item_ptr(leaf, slot,
1095                                               struct btrfs_shared_data_ref);
1096                         count = btrfs_shared_data_ref_count(leaf, sdref);
1097                         ret = add_direct_ref(fs_info, preftrees, 0,
1098                                              key.offset, bytenr, count,
1099                                              sc, GFP_NOFS);
1100                         break;
1101                 }
1102                 case BTRFS_TREE_BLOCK_REF_KEY:
1103                         /* NORMAL INDIRECT METADATA backref */
1104                         ret = add_indirect_ref(fs_info, preftrees, key.offset,
1105                                                NULL, info_level + 1, bytenr,
1106                                                1, NULL, GFP_NOFS);
1107                         break;
1108                 case BTRFS_EXTENT_DATA_REF_KEY: {
1109                         /* NORMAL INDIRECT DATA backref */
1110                         struct btrfs_extent_data_ref *dref;
1111                         int count;
1112                         u64 root;
1113
1114                         dref = btrfs_item_ptr(leaf, slot,
1115                                               struct btrfs_extent_data_ref);
1116                         count = btrfs_extent_data_ref_count(leaf, dref);
1117                         key.objectid = btrfs_extent_data_ref_objectid(leaf,
1118                                                                       dref);
1119                         key.type = BTRFS_EXTENT_DATA_KEY;
1120                         key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1121
1122                         if (sc && sc->inum && key.objectid != sc->inum) {
1123                                 ret = BACKREF_FOUND_SHARED;
1124                                 break;
1125                         }
1126
1127                         root = btrfs_extent_data_ref_root(leaf, dref);
1128                         ret = add_indirect_ref(fs_info, preftrees, root,
1129                                                &key, 0, bytenr, count,
1130                                                sc, GFP_NOFS);
1131                         break;
1132                 }
1133                 default:
1134                         WARN_ON(1);
1135                 }
1136                 if (ret)
1137                         return ret;
1138
1139         }
1140
1141         return ret;
1142 }
1143
1144 /*
1145  * this adds all existing backrefs (inline backrefs, backrefs and delayed
1146  * refs) for the given bytenr to the refs list, merges duplicates and resolves
1147  * indirect refs to their parent bytenr.
1148  * When roots are found, they're added to the roots list
1149  *
1150  * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1151  * much like trans == NULL case, the difference only lies in it will not
1152  * commit root.
1153  * The special case is for qgroup to search roots in commit_transaction().
1154  *
1155  * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1156  * shared extent is detected.
1157  *
1158  * Otherwise this returns 0 for success and <0 for an error.
1159  *
1160  * If ignore_offset is set to false, only extent refs whose offsets match
1161  * extent_item_pos are returned.  If true, every extent ref is returned
1162  * and extent_item_pos is ignored.
1163  *
1164  * FIXME some caching might speed things up
1165  */
1166 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1167                              struct btrfs_fs_info *fs_info, u64 bytenr,
1168                              u64 time_seq, struct ulist *refs,
1169                              struct ulist *roots, const u64 *extent_item_pos,
1170                              struct share_check *sc, bool ignore_offset)
1171 {
1172         struct btrfs_key key;
1173         struct btrfs_path *path;
1174         struct btrfs_delayed_ref_root *delayed_refs = NULL;
1175         struct btrfs_delayed_ref_head *head;
1176         int info_level = 0;
1177         int ret;
1178         struct prelim_ref *ref;
1179         struct rb_node *node;
1180         struct extent_inode_elem *eie = NULL;
1181         struct preftrees preftrees = {
1182                 .direct = PREFTREE_INIT,
1183                 .indirect = PREFTREE_INIT,
1184                 .indirect_missing_keys = PREFTREE_INIT
1185         };
1186
1187         key.objectid = bytenr;
1188         key.offset = (u64)-1;
1189         if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1190                 key.type = BTRFS_METADATA_ITEM_KEY;
1191         else
1192                 key.type = BTRFS_EXTENT_ITEM_KEY;
1193
1194         path = btrfs_alloc_path();
1195         if (!path)
1196                 return -ENOMEM;
1197         if (!trans) {
1198                 path->search_commit_root = 1;
1199                 path->skip_locking = 1;
1200         }
1201
1202         if (time_seq == SEQ_LAST)
1203                 path->skip_locking = 1;
1204
1205         /*
1206          * grab both a lock on the path and a lock on the delayed ref head.
1207          * We need both to get a consistent picture of how the refs look
1208          * at a specified point in time
1209          */
1210 again:
1211         head = NULL;
1212
1213         ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1214         if (ret < 0)
1215                 goto out;
1216         BUG_ON(ret == 0);
1217
1218 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1219         if (trans && likely(trans->type != __TRANS_DUMMY) &&
1220             time_seq != SEQ_LAST) {
1221 #else
1222         if (trans && time_seq != SEQ_LAST) {
1223 #endif
1224                 /*
1225                  * look if there are updates for this ref queued and lock the
1226                  * head
1227                  */
1228                 delayed_refs = &trans->transaction->delayed_refs;
1229                 spin_lock(&delayed_refs->lock);
1230                 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1231                 if (head) {
1232                         if (!mutex_trylock(&head->mutex)) {
1233                                 refcount_inc(&head->refs);
1234                                 spin_unlock(&delayed_refs->lock);
1235
1236                                 btrfs_release_path(path);
1237
1238                                 /*
1239                                  * Mutex was contended, block until it's
1240                                  * released and try again
1241                                  */
1242                                 mutex_lock(&head->mutex);
1243                                 mutex_unlock(&head->mutex);
1244                                 btrfs_put_delayed_ref_head(head);
1245                                 goto again;
1246                         }
1247                         spin_unlock(&delayed_refs->lock);
1248                         ret = add_delayed_refs(fs_info, head, time_seq,
1249                                                &preftrees, sc);
1250                         mutex_unlock(&head->mutex);
1251                         if (ret)
1252                                 goto out;
1253                 } else {
1254                         spin_unlock(&delayed_refs->lock);
1255                 }
1256         }
1257
1258         if (path->slots[0]) {
1259                 struct extent_buffer *leaf;
1260                 int slot;
1261
1262                 path->slots[0]--;
1263                 leaf = path->nodes[0];
1264                 slot = path->slots[0];
1265                 btrfs_item_key_to_cpu(leaf, &key, slot);
1266                 if (key.objectid == bytenr &&
1267                     (key.type == BTRFS_EXTENT_ITEM_KEY ||
1268                      key.type == BTRFS_METADATA_ITEM_KEY)) {
1269                         ret = add_inline_refs(fs_info, path, bytenr,
1270                                               &info_level, &preftrees, sc);
1271                         if (ret)
1272                                 goto out;
1273                         ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1274                                              &preftrees, sc);
1275                         if (ret)
1276                                 goto out;
1277                 }
1278         }
1279
1280         btrfs_release_path(path);
1281
1282         ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1283         if (ret)
1284                 goto out;
1285
1286         WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1287
1288         ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1289                                     extent_item_pos, sc, ignore_offset);
1290         if (ret)
1291                 goto out;
1292
1293         WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1294
1295         /*
1296          * This walks the tree of merged and resolved refs. Tree blocks are
1297          * read in as needed. Unique entries are added to the ulist, and
1298          * the list of found roots is updated.
1299          *
1300          * We release the entire tree in one go before returning.
1301          */
1302         node = rb_first_cached(&preftrees.direct.root);
1303         while (node) {
1304                 ref = rb_entry(node, struct prelim_ref, rbnode);
1305                 node = rb_next(&ref->rbnode);
1306                 /*
1307                  * ref->count < 0 can happen here if there are delayed
1308                  * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1309                  * prelim_ref_insert() relies on this when merging
1310                  * identical refs to keep the overall count correct.
1311                  * prelim_ref_insert() will merge only those refs
1312                  * which compare identically.  Any refs having
1313                  * e.g. different offsets would not be merged,
1314                  * and would retain their original ref->count < 0.
1315                  */
1316                 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1317                         if (sc && sc->root_objectid &&
1318                             ref->root_id != sc->root_objectid) {
1319                                 ret = BACKREF_FOUND_SHARED;
1320                                 goto out;
1321                         }
1322
1323                         /* no parent == root of tree */
1324                         ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1325                         if (ret < 0)
1326                                 goto out;
1327                 }
1328                 if (ref->count && ref->parent) {
1329                         if (extent_item_pos && !ref->inode_list &&
1330                             ref->level == 0) {
1331                                 struct extent_buffer *eb;
1332
1333                                 eb = read_tree_block(fs_info, ref->parent, 0,
1334                                                      0, ref->level, NULL);
1335                                 if (IS_ERR(eb)) {
1336                                         ret = PTR_ERR(eb);
1337                                         goto out;
1338                                 } else if (!extent_buffer_uptodate(eb)) {
1339                                         free_extent_buffer(eb);
1340                                         ret = -EIO;
1341                                         goto out;
1342                                 }
1343
1344                                 if (!path->skip_locking)
1345                                         btrfs_tree_read_lock(eb);
1346                                 ret = find_extent_in_eb(eb, bytenr,
1347                                                         *extent_item_pos, &eie, ignore_offset);
1348                                 if (!path->skip_locking)
1349                                         btrfs_tree_read_unlock(eb);
1350                                 free_extent_buffer(eb);
1351                                 if (ret < 0)
1352                                         goto out;
1353                                 ref->inode_list = eie;
1354                         }
1355                         ret = ulist_add_merge_ptr(refs, ref->parent,
1356                                                   ref->inode_list,
1357                                                   (void **)&eie, GFP_NOFS);
1358                         if (ret < 0)
1359                                 goto out;
1360                         if (!ret && extent_item_pos) {
1361                                 /*
1362                                  * we've recorded that parent, so we must extend
1363                                  * its inode list here
1364                                  */
1365                                 BUG_ON(!eie);
1366                                 while (eie->next)
1367                                         eie = eie->next;
1368                                 eie->next = ref->inode_list;
1369                         }
1370                         eie = NULL;
1371                 }
1372                 cond_resched();
1373         }
1374
1375 out:
1376         btrfs_free_path(path);
1377
1378         prelim_release(&preftrees.direct);
1379         prelim_release(&preftrees.indirect);
1380         prelim_release(&preftrees.indirect_missing_keys);
1381
1382         if (ret < 0)
1383                 free_inode_elem_list(eie);
1384         return ret;
1385 }
1386
1387 static void free_leaf_list(struct ulist *blocks)
1388 {
1389         struct ulist_node *node = NULL;
1390         struct extent_inode_elem *eie;
1391         struct ulist_iterator uiter;
1392
1393         ULIST_ITER_INIT(&uiter);
1394         while ((node = ulist_next(blocks, &uiter))) {
1395                 if (!node->aux)
1396                         continue;
1397                 eie = unode_aux_to_inode_list(node);
1398                 free_inode_elem_list(eie);
1399                 node->aux = 0;
1400         }
1401
1402         ulist_free(blocks);
1403 }
1404
1405 /*
1406  * Finds all leafs with a reference to the specified combination of bytenr and
1407  * offset. key_list_head will point to a list of corresponding keys (caller must
1408  * free each list element). The leafs will be stored in the leafs ulist, which
1409  * must be freed with ulist_free.
1410  *
1411  * returns 0 on success, <0 on error
1412  */
1413 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1414                          struct btrfs_fs_info *fs_info, u64 bytenr,
1415                          u64 time_seq, struct ulist **leafs,
1416                          const u64 *extent_item_pos, bool ignore_offset)
1417 {
1418         int ret;
1419
1420         *leafs = ulist_alloc(GFP_NOFS);
1421         if (!*leafs)
1422                 return -ENOMEM;
1423
1424         ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1425                                 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1426         if (ret < 0 && ret != -ENOENT) {
1427                 free_leaf_list(*leafs);
1428                 return ret;
1429         }
1430
1431         return 0;
1432 }
1433
1434 /*
1435  * walk all backrefs for a given extent to find all roots that reference this
1436  * extent. Walking a backref means finding all extents that reference this
1437  * extent and in turn walk the backrefs of those, too. Naturally this is a
1438  * recursive process, but here it is implemented in an iterative fashion: We
1439  * find all referencing extents for the extent in question and put them on a
1440  * list. In turn, we find all referencing extents for those, further appending
1441  * to the list. The way we iterate the list allows adding more elements after
1442  * the current while iterating. The process stops when we reach the end of the
1443  * list. Found roots are added to the roots list.
1444  *
1445  * returns 0 on success, < 0 on error.
1446  */
1447 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1448                                      struct btrfs_fs_info *fs_info, u64 bytenr,
1449                                      u64 time_seq, struct ulist **roots,
1450                                      bool ignore_offset)
1451 {
1452         struct ulist *tmp;
1453         struct ulist_node *node = NULL;
1454         struct ulist_iterator uiter;
1455         int ret;
1456
1457         tmp = ulist_alloc(GFP_NOFS);
1458         if (!tmp)
1459                 return -ENOMEM;
1460         *roots = ulist_alloc(GFP_NOFS);
1461         if (!*roots) {
1462                 ulist_free(tmp);
1463                 return -ENOMEM;
1464         }
1465
1466         ULIST_ITER_INIT(&uiter);
1467         while (1) {
1468                 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1469                                         tmp, *roots, NULL, NULL, ignore_offset);
1470                 if (ret < 0 && ret != -ENOENT) {
1471                         ulist_free(tmp);
1472                         ulist_free(*roots);
1473                         *roots = NULL;
1474                         return ret;
1475                 }
1476                 node = ulist_next(tmp, &uiter);
1477                 if (!node)
1478                         break;
1479                 bytenr = node->val;
1480                 cond_resched();
1481         }
1482
1483         ulist_free(tmp);
1484         return 0;
1485 }
1486
1487 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1488                          struct btrfs_fs_info *fs_info, u64 bytenr,
1489                          u64 time_seq, struct ulist **roots,
1490                          bool ignore_offset)
1491 {
1492         int ret;
1493
1494         if (!trans)
1495                 down_read(&fs_info->commit_root_sem);
1496         ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1497                                         time_seq, roots, ignore_offset);
1498         if (!trans)
1499                 up_read(&fs_info->commit_root_sem);
1500         return ret;
1501 }
1502
1503 /**
1504  * btrfs_check_shared - tell us whether an extent is shared
1505  *
1506  * btrfs_check_shared uses the backref walking code but will short
1507  * circuit as soon as it finds a root or inode that doesn't match the
1508  * one passed in. This provides a significant performance benefit for
1509  * callers (such as fiemap) which want to know whether the extent is
1510  * shared but do not need a ref count.
1511  *
1512  * This attempts to attach to the running transaction in order to account for
1513  * delayed refs, but continues on even when no running transaction exists.
1514  *
1515  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1516  */
1517 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1518                 struct ulist *roots, struct ulist *tmp)
1519 {
1520         struct btrfs_fs_info *fs_info = root->fs_info;
1521         struct btrfs_trans_handle *trans;
1522         struct ulist_iterator uiter;
1523         struct ulist_node *node;
1524         struct seq_list elem = SEQ_LIST_INIT(elem);
1525         int ret = 0;
1526         struct share_check shared = {
1527                 .root_objectid = root->root_key.objectid,
1528                 .inum = inum,
1529                 .share_count = 0,
1530         };
1531
1532         ulist_init(roots);
1533         ulist_init(tmp);
1534
1535         trans = btrfs_join_transaction_nostart(root);
1536         if (IS_ERR(trans)) {
1537                 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1538                         ret = PTR_ERR(trans);
1539                         goto out;
1540                 }
1541                 trans = NULL;
1542                 down_read(&fs_info->commit_root_sem);
1543         } else {
1544                 btrfs_get_tree_mod_seq(fs_info, &elem);
1545         }
1546
1547         ULIST_ITER_INIT(&uiter);
1548         while (1) {
1549                 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1550                                         roots, NULL, &shared, false);
1551                 if (ret == BACKREF_FOUND_SHARED) {
1552                         /* this is the only condition under which we return 1 */
1553                         ret = 1;
1554                         break;
1555                 }
1556                 if (ret < 0 && ret != -ENOENT)
1557                         break;
1558                 ret = 0;
1559                 node = ulist_next(tmp, &uiter);
1560                 if (!node)
1561                         break;
1562                 bytenr = node->val;
1563                 shared.share_count = 0;
1564                 cond_resched();
1565         }
1566
1567         if (trans) {
1568                 btrfs_put_tree_mod_seq(fs_info, &elem);
1569                 btrfs_end_transaction(trans);
1570         } else {
1571                 up_read(&fs_info->commit_root_sem);
1572         }
1573 out:
1574         ulist_release(roots);
1575         ulist_release(tmp);
1576         return ret;
1577 }
1578
1579 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1580                           u64 start_off, struct btrfs_path *path,
1581                           struct btrfs_inode_extref **ret_extref,
1582                           u64 *found_off)
1583 {
1584         int ret, slot;
1585         struct btrfs_key key;
1586         struct btrfs_key found_key;
1587         struct btrfs_inode_extref *extref;
1588         const struct extent_buffer *leaf;
1589         unsigned long ptr;
1590
1591         key.objectid = inode_objectid;
1592         key.type = BTRFS_INODE_EXTREF_KEY;
1593         key.offset = start_off;
1594
1595         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1596         if (ret < 0)
1597                 return ret;
1598
1599         while (1) {
1600                 leaf = path->nodes[0];
1601                 slot = path->slots[0];
1602                 if (slot >= btrfs_header_nritems(leaf)) {
1603                         /*
1604                          * If the item at offset is not found,
1605                          * btrfs_search_slot will point us to the slot
1606                          * where it should be inserted. In our case
1607                          * that will be the slot directly before the
1608                          * next INODE_REF_KEY_V2 item. In the case
1609                          * that we're pointing to the last slot in a
1610                          * leaf, we must move one leaf over.
1611                          */
1612                         ret = btrfs_next_leaf(root, path);
1613                         if (ret) {
1614                                 if (ret >= 1)
1615                                         ret = -ENOENT;
1616                                 break;
1617                         }
1618                         continue;
1619                 }
1620
1621                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1622
1623                 /*
1624                  * Check that we're still looking at an extended ref key for
1625                  * this particular objectid. If we have different
1626                  * objectid or type then there are no more to be found
1627                  * in the tree and we can exit.
1628                  */
1629                 ret = -ENOENT;
1630                 if (found_key.objectid != inode_objectid)
1631                         break;
1632                 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1633                         break;
1634
1635                 ret = 0;
1636                 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1637                 extref = (struct btrfs_inode_extref *)ptr;
1638                 *ret_extref = extref;
1639                 if (found_off)
1640                         *found_off = found_key.offset;
1641                 break;
1642         }
1643
1644         return ret;
1645 }
1646
1647 /*
1648  * this iterates to turn a name (from iref/extref) into a full filesystem path.
1649  * Elements of the path are separated by '/' and the path is guaranteed to be
1650  * 0-terminated. the path is only given within the current file system.
1651  * Therefore, it never starts with a '/'. the caller is responsible to provide
1652  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1653  * the start point of the resulting string is returned. this pointer is within
1654  * dest, normally.
1655  * in case the path buffer would overflow, the pointer is decremented further
1656  * as if output was written to the buffer, though no more output is actually
1657  * generated. that way, the caller can determine how much space would be
1658  * required for the path to fit into the buffer. in that case, the returned
1659  * value will be smaller than dest. callers must check this!
1660  */
1661 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1662                         u32 name_len, unsigned long name_off,
1663                         struct extent_buffer *eb_in, u64 parent,
1664                         char *dest, u32 size)
1665 {
1666         int slot;
1667         u64 next_inum;
1668         int ret;
1669         s64 bytes_left = ((s64)size) - 1;
1670         struct extent_buffer *eb = eb_in;
1671         struct btrfs_key found_key;
1672         struct btrfs_inode_ref *iref;
1673
1674         if (bytes_left >= 0)
1675                 dest[bytes_left] = '\0';
1676
1677         while (1) {
1678                 bytes_left -= name_len;
1679                 if (bytes_left >= 0)
1680                         read_extent_buffer(eb, dest + bytes_left,
1681                                            name_off, name_len);
1682                 if (eb != eb_in) {
1683                         if (!path->skip_locking)
1684                                 btrfs_tree_read_unlock(eb);
1685                         free_extent_buffer(eb);
1686                 }
1687                 ret = btrfs_find_item(fs_root, path, parent, 0,
1688                                 BTRFS_INODE_REF_KEY, &found_key);
1689                 if (ret > 0)
1690                         ret = -ENOENT;
1691                 if (ret)
1692                         break;
1693
1694                 next_inum = found_key.offset;
1695
1696                 /* regular exit ahead */
1697                 if (parent == next_inum)
1698                         break;
1699
1700                 slot = path->slots[0];
1701                 eb = path->nodes[0];
1702                 /* make sure we can use eb after releasing the path */
1703                 if (eb != eb_in) {
1704                         path->nodes[0] = NULL;
1705                         path->locks[0] = 0;
1706                 }
1707                 btrfs_release_path(path);
1708                 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1709
1710                 name_len = btrfs_inode_ref_name_len(eb, iref);
1711                 name_off = (unsigned long)(iref + 1);
1712
1713                 parent = next_inum;
1714                 --bytes_left;
1715                 if (bytes_left >= 0)
1716                         dest[bytes_left] = '/';
1717         }
1718
1719         btrfs_release_path(path);
1720
1721         if (ret)
1722                 return ERR_PTR(ret);
1723
1724         return dest + bytes_left;
1725 }
1726
1727 /*
1728  * this makes the path point to (logical EXTENT_ITEM *)
1729  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1730  * tree blocks and <0 on error.
1731  */
1732 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1733                         struct btrfs_path *path, struct btrfs_key *found_key,
1734                         u64 *flags_ret)
1735 {
1736         int ret;
1737         u64 flags;
1738         u64 size = 0;
1739         u32 item_size;
1740         const struct extent_buffer *eb;
1741         struct btrfs_extent_item *ei;
1742         struct btrfs_key key;
1743
1744         if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1745                 key.type = BTRFS_METADATA_ITEM_KEY;
1746         else
1747                 key.type = BTRFS_EXTENT_ITEM_KEY;
1748         key.objectid = logical;
1749         key.offset = (u64)-1;
1750
1751         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1752         if (ret < 0)
1753                 return ret;
1754
1755         ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1756         if (ret) {
1757                 if (ret > 0)
1758                         ret = -ENOENT;
1759                 return ret;
1760         }
1761         btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1762         if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1763                 size = fs_info->nodesize;
1764         else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1765                 size = found_key->offset;
1766
1767         if (found_key->objectid > logical ||
1768             found_key->objectid + size <= logical) {
1769                 btrfs_debug(fs_info,
1770                         "logical %llu is not within any extent", logical);
1771                 return -ENOENT;
1772         }
1773
1774         eb = path->nodes[0];
1775         item_size = btrfs_item_size_nr(eb, path->slots[0]);
1776         BUG_ON(item_size < sizeof(*ei));
1777
1778         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1779         flags = btrfs_extent_flags(eb, ei);
1780
1781         btrfs_debug(fs_info,
1782                 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1783                  logical, logical - found_key->objectid, found_key->objectid,
1784                  found_key->offset, flags, item_size);
1785
1786         WARN_ON(!flags_ret);
1787         if (flags_ret) {
1788                 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1789                         *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1790                 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1791                         *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1792                 else
1793                         BUG();
1794                 return 0;
1795         }
1796
1797         return -EIO;
1798 }
1799
1800 /*
1801  * helper function to iterate extent inline refs. ptr must point to a 0 value
1802  * for the first call and may be modified. it is used to track state.
1803  * if more refs exist, 0 is returned and the next call to
1804  * get_extent_inline_ref must pass the modified ptr parameter to get the
1805  * next ref. after the last ref was processed, 1 is returned.
1806  * returns <0 on error
1807  */
1808 static int get_extent_inline_ref(unsigned long *ptr,
1809                                  const struct extent_buffer *eb,
1810                                  const struct btrfs_key *key,
1811                                  const struct btrfs_extent_item *ei,
1812                                  u32 item_size,
1813                                  struct btrfs_extent_inline_ref **out_eiref,
1814                                  int *out_type)
1815 {
1816         unsigned long end;
1817         u64 flags;
1818         struct btrfs_tree_block_info *info;
1819
1820         if (!*ptr) {
1821                 /* first call */
1822                 flags = btrfs_extent_flags(eb, ei);
1823                 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1824                         if (key->type == BTRFS_METADATA_ITEM_KEY) {
1825                                 /* a skinny metadata extent */
1826                                 *out_eiref =
1827                                      (struct btrfs_extent_inline_ref *)(ei + 1);
1828                         } else {
1829                                 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1830                                 info = (struct btrfs_tree_block_info *)(ei + 1);
1831                                 *out_eiref =
1832                                    (struct btrfs_extent_inline_ref *)(info + 1);
1833                         }
1834                 } else {
1835                         *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1836                 }
1837                 *ptr = (unsigned long)*out_eiref;
1838                 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1839                         return -ENOENT;
1840         }
1841
1842         end = (unsigned long)ei + item_size;
1843         *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1844         *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1845                                                      BTRFS_REF_TYPE_ANY);
1846         if (*out_type == BTRFS_REF_TYPE_INVALID)
1847                 return -EUCLEAN;
1848
1849         *ptr += btrfs_extent_inline_ref_size(*out_type);
1850         WARN_ON(*ptr > end);
1851         if (*ptr == end)
1852                 return 1; /* last */
1853
1854         return 0;
1855 }
1856
1857 /*
1858  * reads the tree block backref for an extent. tree level and root are returned
1859  * through out_level and out_root. ptr must point to a 0 value for the first
1860  * call and may be modified (see get_extent_inline_ref comment).
1861  * returns 0 if data was provided, 1 if there was no more data to provide or
1862  * <0 on error.
1863  */
1864 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1865                             struct btrfs_key *key, struct btrfs_extent_item *ei,
1866                             u32 item_size, u64 *out_root, u8 *out_level)
1867 {
1868         int ret;
1869         int type;
1870         struct btrfs_extent_inline_ref *eiref;
1871
1872         if (*ptr == (unsigned long)-1)
1873                 return 1;
1874
1875         while (1) {
1876                 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1877                                               &eiref, &type);
1878                 if (ret < 0)
1879                         return ret;
1880
1881                 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1882                     type == BTRFS_SHARED_BLOCK_REF_KEY)
1883                         break;
1884
1885                 if (ret == 1)
1886                         return 1;
1887         }
1888
1889         /* we can treat both ref types equally here */
1890         *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1891
1892         if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1893                 struct btrfs_tree_block_info *info;
1894
1895                 info = (struct btrfs_tree_block_info *)(ei + 1);
1896                 *out_level = btrfs_tree_block_level(eb, info);
1897         } else {
1898                 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1899                 *out_level = (u8)key->offset;
1900         }
1901
1902         if (ret == 1)
1903                 *ptr = (unsigned long)-1;
1904
1905         return 0;
1906 }
1907
1908 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1909                              struct extent_inode_elem *inode_list,
1910                              u64 root, u64 extent_item_objectid,
1911                              iterate_extent_inodes_t *iterate, void *ctx)
1912 {
1913         struct extent_inode_elem *eie;
1914         int ret = 0;
1915
1916         for (eie = inode_list; eie; eie = eie->next) {
1917                 btrfs_debug(fs_info,
1918                             "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1919                             extent_item_objectid, eie->inum,
1920                             eie->offset, root);
1921                 ret = iterate(eie->inum, eie->offset, root, ctx);
1922                 if (ret) {
1923                         btrfs_debug(fs_info,
1924                                     "stopping iteration for %llu due to ret=%d",
1925                                     extent_item_objectid, ret);
1926                         break;
1927                 }
1928         }
1929
1930         return ret;
1931 }
1932
1933 /*
1934  * calls iterate() for every inode that references the extent identified by
1935  * the given parameters.
1936  * when the iterator function returns a non-zero value, iteration stops.
1937  */
1938 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1939                                 u64 extent_item_objectid, u64 extent_item_pos,
1940                                 int search_commit_root,
1941                                 iterate_extent_inodes_t *iterate, void *ctx,
1942                                 bool ignore_offset)
1943 {
1944         int ret;
1945         struct btrfs_trans_handle *trans = NULL;
1946         struct ulist *refs = NULL;
1947         struct ulist *roots = NULL;
1948         struct ulist_node *ref_node = NULL;
1949         struct ulist_node *root_node = NULL;
1950         struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1951         struct ulist_iterator ref_uiter;
1952         struct ulist_iterator root_uiter;
1953
1954         btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1955                         extent_item_objectid);
1956
1957         if (!search_commit_root) {
1958                 trans = btrfs_attach_transaction(fs_info->extent_root);
1959                 if (IS_ERR(trans)) {
1960                         if (PTR_ERR(trans) != -ENOENT &&
1961                             PTR_ERR(trans) != -EROFS)
1962                                 return PTR_ERR(trans);
1963                         trans = NULL;
1964                 }
1965         }
1966
1967         if (trans)
1968                 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1969         else
1970                 down_read(&fs_info->commit_root_sem);
1971
1972         ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1973                                    tree_mod_seq_elem.seq, &refs,
1974                                    &extent_item_pos, ignore_offset);
1975         if (ret)
1976                 goto out;
1977
1978         ULIST_ITER_INIT(&ref_uiter);
1979         while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1980                 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1981                                                 tree_mod_seq_elem.seq, &roots,
1982                                                 ignore_offset);
1983                 if (ret)
1984                         break;
1985                 ULIST_ITER_INIT(&root_uiter);
1986                 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1987                         btrfs_debug(fs_info,
1988                                     "root %llu references leaf %llu, data list %#llx",
1989                                     root_node->val, ref_node->val,
1990                                     ref_node->aux);
1991                         ret = iterate_leaf_refs(fs_info,
1992                                                 (struct extent_inode_elem *)
1993                                                 (uintptr_t)ref_node->aux,
1994                                                 root_node->val,
1995                                                 extent_item_objectid,
1996                                                 iterate, ctx);
1997                 }
1998                 ulist_free(roots);
1999         }
2000
2001         free_leaf_list(refs);
2002 out:
2003         if (trans) {
2004                 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2005                 btrfs_end_transaction(trans);
2006         } else {
2007                 up_read(&fs_info->commit_root_sem);
2008         }
2009
2010         return ret;
2011 }
2012
2013 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2014                                 struct btrfs_path *path,
2015                                 iterate_extent_inodes_t *iterate, void *ctx,
2016                                 bool ignore_offset)
2017 {
2018         int ret;
2019         u64 extent_item_pos;
2020         u64 flags = 0;
2021         struct btrfs_key found_key;
2022         int search_commit_root = path->search_commit_root;
2023
2024         ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2025         btrfs_release_path(path);
2026         if (ret < 0)
2027                 return ret;
2028         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2029                 return -EINVAL;
2030
2031         extent_item_pos = logical - found_key.objectid;
2032         ret = iterate_extent_inodes(fs_info, found_key.objectid,
2033                                         extent_item_pos, search_commit_root,
2034                                         iterate, ctx, ignore_offset);
2035
2036         return ret;
2037 }
2038
2039 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2040                               struct extent_buffer *eb, void *ctx);
2041
2042 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2043                               struct btrfs_path *path,
2044                               iterate_irefs_t *iterate, void *ctx)
2045 {
2046         int ret = 0;
2047         int slot;
2048         u32 cur;
2049         u32 len;
2050         u32 name_len;
2051         u64 parent = 0;
2052         int found = 0;
2053         struct extent_buffer *eb;
2054         struct btrfs_item *item;
2055         struct btrfs_inode_ref *iref;
2056         struct btrfs_key found_key;
2057
2058         while (!ret) {
2059                 ret = btrfs_find_item(fs_root, path, inum,
2060                                 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2061                                 &found_key);
2062
2063                 if (ret < 0)
2064                         break;
2065                 if (ret) {
2066                         ret = found ? 0 : -ENOENT;
2067                         break;
2068                 }
2069                 ++found;
2070
2071                 parent = found_key.offset;
2072                 slot = path->slots[0];
2073                 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2074                 if (!eb) {
2075                         ret = -ENOMEM;
2076                         break;
2077                 }
2078                 btrfs_release_path(path);
2079
2080                 item = btrfs_item_nr(slot);
2081                 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2082
2083                 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2084                         name_len = btrfs_inode_ref_name_len(eb, iref);
2085                         /* path must be released before calling iterate()! */
2086                         btrfs_debug(fs_root->fs_info,
2087                                 "following ref at offset %u for inode %llu in tree %llu",
2088                                 cur, found_key.objectid,
2089                                 fs_root->root_key.objectid);
2090                         ret = iterate(parent, name_len,
2091                                       (unsigned long)(iref + 1), eb, ctx);
2092                         if (ret)
2093                                 break;
2094                         len = sizeof(*iref) + name_len;
2095                         iref = (struct btrfs_inode_ref *)((char *)iref + len);
2096                 }
2097                 free_extent_buffer(eb);
2098         }
2099
2100         btrfs_release_path(path);
2101
2102         return ret;
2103 }
2104
2105 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2106                                  struct btrfs_path *path,
2107                                  iterate_irefs_t *iterate, void *ctx)
2108 {
2109         int ret;
2110         int slot;
2111         u64 offset = 0;
2112         u64 parent;
2113         int found = 0;
2114         struct extent_buffer *eb;
2115         struct btrfs_inode_extref *extref;
2116         u32 item_size;
2117         u32 cur_offset;
2118         unsigned long ptr;
2119
2120         while (1) {
2121                 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2122                                             &offset);
2123                 if (ret < 0)
2124                         break;
2125                 if (ret) {
2126                         ret = found ? 0 : -ENOENT;
2127                         break;
2128                 }
2129                 ++found;
2130
2131                 slot = path->slots[0];
2132                 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2133                 if (!eb) {
2134                         ret = -ENOMEM;
2135                         break;
2136                 }
2137                 btrfs_release_path(path);
2138
2139                 item_size = btrfs_item_size_nr(eb, slot);
2140                 ptr = btrfs_item_ptr_offset(eb, slot);
2141                 cur_offset = 0;
2142
2143                 while (cur_offset < item_size) {
2144                         u32 name_len;
2145
2146                         extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2147                         parent = btrfs_inode_extref_parent(eb, extref);
2148                         name_len = btrfs_inode_extref_name_len(eb, extref);
2149                         ret = iterate(parent, name_len,
2150                                       (unsigned long)&extref->name, eb, ctx);
2151                         if (ret)
2152                                 break;
2153
2154                         cur_offset += btrfs_inode_extref_name_len(eb, extref);
2155                         cur_offset += sizeof(*extref);
2156                 }
2157                 free_extent_buffer(eb);
2158
2159                 offset++;
2160         }
2161
2162         btrfs_release_path(path);
2163
2164         return ret;
2165 }
2166
2167 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2168                          struct btrfs_path *path, iterate_irefs_t *iterate,
2169                          void *ctx)
2170 {
2171         int ret;
2172         int found_refs = 0;
2173
2174         ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2175         if (!ret)
2176                 ++found_refs;
2177         else if (ret != -ENOENT)
2178                 return ret;
2179
2180         ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2181         if (ret == -ENOENT && found_refs)
2182                 return 0;
2183
2184         return ret;
2185 }
2186
2187 /*
2188  * returns 0 if the path could be dumped (probably truncated)
2189  * returns <0 in case of an error
2190  */
2191 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2192                          struct extent_buffer *eb, void *ctx)
2193 {
2194         struct inode_fs_paths *ipath = ctx;
2195         char *fspath;
2196         char *fspath_min;
2197         int i = ipath->fspath->elem_cnt;
2198         const int s_ptr = sizeof(char *);
2199         u32 bytes_left;
2200
2201         bytes_left = ipath->fspath->bytes_left > s_ptr ?
2202                                         ipath->fspath->bytes_left - s_ptr : 0;
2203
2204         fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2205         fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2206                                    name_off, eb, inum, fspath_min, bytes_left);
2207         if (IS_ERR(fspath))
2208                 return PTR_ERR(fspath);
2209
2210         if (fspath > fspath_min) {
2211                 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2212                 ++ipath->fspath->elem_cnt;
2213                 ipath->fspath->bytes_left = fspath - fspath_min;
2214         } else {
2215                 ++ipath->fspath->elem_missed;
2216                 ipath->fspath->bytes_missing += fspath_min - fspath;
2217                 ipath->fspath->bytes_left = 0;
2218         }
2219
2220         return 0;
2221 }
2222
2223 /*
2224  * this dumps all file system paths to the inode into the ipath struct, provided
2225  * is has been created large enough. each path is zero-terminated and accessed
2226  * from ipath->fspath->val[i].
2227  * when it returns, there are ipath->fspath->elem_cnt number of paths available
2228  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2229  * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2230  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2231  * have been needed to return all paths.
2232  */
2233 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2234 {
2235         return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2236                              inode_to_path, ipath);
2237 }
2238
2239 struct btrfs_data_container *init_data_container(u32 total_bytes)
2240 {
2241         struct btrfs_data_container *data;
2242         size_t alloc_bytes;
2243
2244         alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2245         data = kvmalloc(alloc_bytes, GFP_KERNEL);
2246         if (!data)
2247                 return ERR_PTR(-ENOMEM);
2248
2249         if (total_bytes >= sizeof(*data)) {
2250                 data->bytes_left = total_bytes - sizeof(*data);
2251                 data->bytes_missing = 0;
2252         } else {
2253                 data->bytes_missing = sizeof(*data) - total_bytes;
2254                 data->bytes_left = 0;
2255         }
2256
2257         data->elem_cnt = 0;
2258         data->elem_missed = 0;
2259
2260         return data;
2261 }
2262
2263 /*
2264  * allocates space to return multiple file system paths for an inode.
2265  * total_bytes to allocate are passed, note that space usable for actual path
2266  * information will be total_bytes - sizeof(struct inode_fs_paths).
2267  * the returned pointer must be freed with free_ipath() in the end.
2268  */
2269 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2270                                         struct btrfs_path *path)
2271 {
2272         struct inode_fs_paths *ifp;
2273         struct btrfs_data_container *fspath;
2274
2275         fspath = init_data_container(total_bytes);
2276         if (IS_ERR(fspath))
2277                 return ERR_CAST(fspath);
2278
2279         ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2280         if (!ifp) {
2281                 kvfree(fspath);
2282                 return ERR_PTR(-ENOMEM);
2283         }
2284
2285         ifp->btrfs_path = path;
2286         ifp->fspath = fspath;
2287         ifp->fs_root = fs_root;
2288
2289         return ifp;
2290 }
2291
2292 void free_ipath(struct inode_fs_paths *ipath)
2293 {
2294         if (!ipath)
2295                 return;
2296         kvfree(ipath->fspath);
2297         kfree(ipath);
2298 }
2299
2300 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2301                 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2302 {
2303         struct btrfs_backref_iter *ret;
2304
2305         ret = kzalloc(sizeof(*ret), gfp_flag);
2306         if (!ret)
2307                 return NULL;
2308
2309         ret->path = btrfs_alloc_path();
2310         if (!ret->path) {
2311                 kfree(ret);
2312                 return NULL;
2313         }
2314
2315         /* Current backref iterator only supports iteration in commit root */
2316         ret->path->search_commit_root = 1;
2317         ret->path->skip_locking = 1;
2318         ret->fs_info = fs_info;
2319
2320         return ret;
2321 }
2322
2323 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2324 {
2325         struct btrfs_fs_info *fs_info = iter->fs_info;
2326         struct btrfs_path *path = iter->path;
2327         struct btrfs_extent_item *ei;
2328         struct btrfs_key key;
2329         int ret;
2330
2331         key.objectid = bytenr;
2332         key.type = BTRFS_METADATA_ITEM_KEY;
2333         key.offset = (u64)-1;
2334         iter->bytenr = bytenr;
2335
2336         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2337         if (ret < 0)
2338                 return ret;
2339         if (ret == 0) {
2340                 ret = -EUCLEAN;
2341                 goto release;
2342         }
2343         if (path->slots[0] == 0) {
2344                 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2345                 ret = -EUCLEAN;
2346                 goto release;
2347         }
2348         path->slots[0]--;
2349
2350         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2351         if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2352              key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2353                 ret = -ENOENT;
2354                 goto release;
2355         }
2356         memcpy(&iter->cur_key, &key, sizeof(key));
2357         iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2358                                                     path->slots[0]);
2359         iter->end_ptr = (u32)(iter->item_ptr +
2360                         btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2361         ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2362                             struct btrfs_extent_item);
2363
2364         /*
2365          * Only support iteration on tree backref yet.
2366          *
2367          * This is an extra precaution for non skinny-metadata, where
2368          * EXTENT_ITEM is also used for tree blocks, that we can only use
2369          * extent flags to determine if it's a tree block.
2370          */
2371         if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2372                 ret = -ENOTSUPP;
2373                 goto release;
2374         }
2375         iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2376
2377         /* If there is no inline backref, go search for keyed backref */
2378         if (iter->cur_ptr >= iter->end_ptr) {
2379                 ret = btrfs_next_item(fs_info->extent_root, path);
2380
2381                 /* No inline nor keyed ref */
2382                 if (ret > 0) {
2383                         ret = -ENOENT;
2384                         goto release;
2385                 }
2386                 if (ret < 0)
2387                         goto release;
2388
2389                 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2390                                 path->slots[0]);
2391                 if (iter->cur_key.objectid != bytenr ||
2392                     (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2393                      iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2394                         ret = -ENOENT;
2395                         goto release;
2396                 }
2397                 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2398                                                            path->slots[0]);
2399                 iter->item_ptr = iter->cur_ptr;
2400                 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2401                                       path->nodes[0], path->slots[0]));
2402         }
2403
2404         return 0;
2405 release:
2406         btrfs_backref_iter_release(iter);
2407         return ret;
2408 }
2409
2410 /*
2411  * Go to the next backref item of current bytenr, can be either inlined or
2412  * keyed.
2413  *
2414  * Caller needs to check whether it's inline ref or not by iter->cur_key.
2415  *
2416  * Return 0 if we get next backref without problem.
2417  * Return >0 if there is no extra backref for this bytenr.
2418  * Return <0 if there is something wrong happened.
2419  */
2420 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2421 {
2422         struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2423         struct btrfs_path *path = iter->path;
2424         struct btrfs_extent_inline_ref *iref;
2425         int ret;
2426         u32 size;
2427
2428         if (btrfs_backref_iter_is_inline_ref(iter)) {
2429                 /* We're still inside the inline refs */
2430                 ASSERT(iter->cur_ptr < iter->end_ptr);
2431
2432                 if (btrfs_backref_has_tree_block_info(iter)) {
2433                         /* First tree block info */
2434                         size = sizeof(struct btrfs_tree_block_info);
2435                 } else {
2436                         /* Use inline ref type to determine the size */
2437                         int type;
2438
2439                         iref = (struct btrfs_extent_inline_ref *)
2440                                 ((unsigned long)iter->cur_ptr);
2441                         type = btrfs_extent_inline_ref_type(eb, iref);
2442
2443                         size = btrfs_extent_inline_ref_size(type);
2444                 }
2445                 iter->cur_ptr += size;
2446                 if (iter->cur_ptr < iter->end_ptr)
2447                         return 0;
2448
2449                 /* All inline items iterated, fall through */
2450         }
2451
2452         /* We're at keyed items, there is no inline item, go to the next one */
2453         ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2454         if (ret)
2455                 return ret;
2456
2457         btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2458         if (iter->cur_key.objectid != iter->bytenr ||
2459             (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2460              iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2461                 return 1;
2462         iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2463                                         path->slots[0]);
2464         iter->cur_ptr = iter->item_ptr;
2465         iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2466                                                 path->slots[0]);
2467         return 0;
2468 }
2469
2470 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2471                               struct btrfs_backref_cache *cache, int is_reloc)
2472 {
2473         int i;
2474
2475         cache->rb_root = RB_ROOT;
2476         for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2477                 INIT_LIST_HEAD(&cache->pending[i]);
2478         INIT_LIST_HEAD(&cache->changed);
2479         INIT_LIST_HEAD(&cache->detached);
2480         INIT_LIST_HEAD(&cache->leaves);
2481         INIT_LIST_HEAD(&cache->pending_edge);
2482         INIT_LIST_HEAD(&cache->useless_node);
2483         cache->fs_info = fs_info;
2484         cache->is_reloc = is_reloc;
2485 }
2486
2487 struct btrfs_backref_node *btrfs_backref_alloc_node(
2488                 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2489 {
2490         struct btrfs_backref_node *node;
2491
2492         ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2493         node = kzalloc(sizeof(*node), GFP_NOFS);
2494         if (!node)
2495                 return node;
2496
2497         INIT_LIST_HEAD(&node->list);
2498         INIT_LIST_HEAD(&node->upper);
2499         INIT_LIST_HEAD(&node->lower);
2500         RB_CLEAR_NODE(&node->rb_node);
2501         cache->nr_nodes++;
2502         node->level = level;
2503         node->bytenr = bytenr;
2504
2505         return node;
2506 }
2507
2508 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2509                 struct btrfs_backref_cache *cache)
2510 {
2511         struct btrfs_backref_edge *edge;
2512
2513         edge = kzalloc(sizeof(*edge), GFP_NOFS);
2514         if (edge)
2515                 cache->nr_edges++;
2516         return edge;
2517 }
2518
2519 /*
2520  * Drop the backref node from cache, also cleaning up all its
2521  * upper edges and any uncached nodes in the path.
2522  *
2523  * This cleanup happens bottom up, thus the node should either
2524  * be the lowest node in the cache or a detached node.
2525  */
2526 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2527                                 struct btrfs_backref_node *node)
2528 {
2529         struct btrfs_backref_node *upper;
2530         struct btrfs_backref_edge *edge;
2531
2532         if (!node)
2533                 return;
2534
2535         BUG_ON(!node->lowest && !node->detached);
2536         while (!list_empty(&node->upper)) {
2537                 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2538                                   list[LOWER]);
2539                 upper = edge->node[UPPER];
2540                 list_del(&edge->list[LOWER]);
2541                 list_del(&edge->list[UPPER]);
2542                 btrfs_backref_free_edge(cache, edge);
2543
2544                 if (RB_EMPTY_NODE(&upper->rb_node)) {
2545                         BUG_ON(!list_empty(&node->upper));
2546                         btrfs_backref_drop_node(cache, node);
2547                         node = upper;
2548                         node->lowest = 1;
2549                         continue;
2550                 }
2551                 /*
2552                  * Add the node to leaf node list if no other child block
2553                  * cached.
2554                  */
2555                 if (list_empty(&upper->lower)) {
2556                         list_add_tail(&upper->lower, &cache->leaves);
2557                         upper->lowest = 1;
2558                 }
2559         }
2560
2561         btrfs_backref_drop_node(cache, node);
2562 }
2563
2564 /*
2565  * Release all nodes/edges from current cache
2566  */
2567 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2568 {
2569         struct btrfs_backref_node *node;
2570         int i;
2571
2572         while (!list_empty(&cache->detached)) {
2573                 node = list_entry(cache->detached.next,
2574                                   struct btrfs_backref_node, list);
2575                 btrfs_backref_cleanup_node(cache, node);
2576         }
2577
2578         while (!list_empty(&cache->leaves)) {
2579                 node = list_entry(cache->leaves.next,
2580                                   struct btrfs_backref_node, lower);
2581                 btrfs_backref_cleanup_node(cache, node);
2582         }
2583
2584         cache->last_trans = 0;
2585
2586         for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2587                 ASSERT(list_empty(&cache->pending[i]));
2588         ASSERT(list_empty(&cache->pending_edge));
2589         ASSERT(list_empty(&cache->useless_node));
2590         ASSERT(list_empty(&cache->changed));
2591         ASSERT(list_empty(&cache->detached));
2592         ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2593         ASSERT(!cache->nr_nodes);
2594         ASSERT(!cache->nr_edges);
2595 }
2596
2597 /*
2598  * Handle direct tree backref
2599  *
2600  * Direct tree backref means, the backref item shows its parent bytenr
2601  * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2602  *
2603  * @ref_key:    The converted backref key.
2604  *              For keyed backref, it's the item key.
2605  *              For inlined backref, objectid is the bytenr,
2606  *              type is btrfs_inline_ref_type, offset is
2607  *              btrfs_inline_ref_offset.
2608  */
2609 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2610                                       struct btrfs_key *ref_key,
2611                                       struct btrfs_backref_node *cur)
2612 {
2613         struct btrfs_backref_edge *edge;
2614         struct btrfs_backref_node *upper;
2615         struct rb_node *rb_node;
2616
2617         ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2618
2619         /* Only reloc root uses backref pointing to itself */
2620         if (ref_key->objectid == ref_key->offset) {
2621                 struct btrfs_root *root;
2622
2623                 cur->is_reloc_root = 1;
2624                 /* Only reloc backref cache cares about a specific root */
2625                 if (cache->is_reloc) {
2626                         root = find_reloc_root(cache->fs_info, cur->bytenr);
2627                         if (WARN_ON(!root))
2628                                 return -ENOENT;
2629                         cur->root = root;
2630                 } else {
2631                         /*
2632                          * For generic purpose backref cache, reloc root node
2633                          * is useless.
2634                          */
2635                         list_add(&cur->list, &cache->useless_node);
2636                 }
2637                 return 0;
2638         }
2639
2640         edge = btrfs_backref_alloc_edge(cache);
2641         if (!edge)
2642                 return -ENOMEM;
2643
2644         rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2645         if (!rb_node) {
2646                 /* Parent node not yet cached */
2647                 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2648                                            cur->level + 1);
2649                 if (!upper) {
2650                         btrfs_backref_free_edge(cache, edge);
2651                         return -ENOMEM;
2652                 }
2653
2654                 /*
2655                  *  Backrefs for the upper level block isn't cached, add the
2656                  *  block to pending list
2657                  */
2658                 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2659         } else {
2660                 /* Parent node already cached */
2661                 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2662                 ASSERT(upper->checked);
2663                 INIT_LIST_HEAD(&edge->list[UPPER]);
2664         }
2665         btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2666         return 0;
2667 }
2668
2669 /*
2670  * Handle indirect tree backref
2671  *
2672  * Indirect tree backref means, we only know which tree the node belongs to.
2673  * We still need to do a tree search to find out the parents. This is for
2674  * TREE_BLOCK_REF backref (keyed or inlined).
2675  *
2676  * @ref_key:    The same as @ref_key in  handle_direct_tree_backref()
2677  * @tree_key:   The first key of this tree block.
2678  * @path:       A clean (released) path, to avoid allocating path everytime
2679  *              the function get called.
2680  */
2681 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2682                                         struct btrfs_path *path,
2683                                         struct btrfs_key *ref_key,
2684                                         struct btrfs_key *tree_key,
2685                                         struct btrfs_backref_node *cur)
2686 {
2687         struct btrfs_fs_info *fs_info = cache->fs_info;
2688         struct btrfs_backref_node *upper;
2689         struct btrfs_backref_node *lower;
2690         struct btrfs_backref_edge *edge;
2691         struct extent_buffer *eb;
2692         struct btrfs_root *root;
2693         struct rb_node *rb_node;
2694         int level;
2695         bool need_check = true;
2696         int ret;
2697
2698         root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2699         if (IS_ERR(root))
2700                 return PTR_ERR(root);
2701         if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2702                 cur->cowonly = 1;
2703
2704         if (btrfs_root_level(&root->root_item) == cur->level) {
2705                 /* Tree root */
2706                 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2707                 /*
2708                  * For reloc backref cache, we may ignore reloc root.  But for
2709                  * general purpose backref cache, we can't rely on
2710                  * btrfs_should_ignore_reloc_root() as it may conflict with
2711                  * current running relocation and lead to missing root.
2712                  *
2713                  * For general purpose backref cache, reloc root detection is
2714                  * completely relying on direct backref (key->offset is parent
2715                  * bytenr), thus only do such check for reloc cache.
2716                  */
2717                 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2718                         btrfs_put_root(root);
2719                         list_add(&cur->list, &cache->useless_node);
2720                 } else {
2721                         cur->root = root;
2722                 }
2723                 return 0;
2724         }
2725
2726         level = cur->level + 1;
2727
2728         /* Search the tree to find parent blocks referring to the block */
2729         path->search_commit_root = 1;
2730         path->skip_locking = 1;
2731         path->lowest_level = level;
2732         ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2733         path->lowest_level = 0;
2734         if (ret < 0) {
2735                 btrfs_put_root(root);
2736                 return ret;
2737         }
2738         if (ret > 0 && path->slots[level] > 0)
2739                 path->slots[level]--;
2740
2741         eb = path->nodes[level];
2742         if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2743                 btrfs_err(fs_info,
2744 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2745                           cur->bytenr, level - 1, root->root_key.objectid,
2746                           tree_key->objectid, tree_key->type, tree_key->offset);
2747                 btrfs_put_root(root);
2748                 ret = -ENOENT;
2749                 goto out;
2750         }
2751         lower = cur;
2752
2753         /* Add all nodes and edges in the path */
2754         for (; level < BTRFS_MAX_LEVEL; level++) {
2755                 if (!path->nodes[level]) {
2756                         ASSERT(btrfs_root_bytenr(&root->root_item) ==
2757                                lower->bytenr);
2758                         /* Same as previous should_ignore_reloc_root() call */
2759                         if (btrfs_should_ignore_reloc_root(root) &&
2760                             cache->is_reloc) {
2761                                 btrfs_put_root(root);
2762                                 list_add(&lower->list, &cache->useless_node);
2763                         } else {
2764                                 lower->root = root;
2765                         }
2766                         break;
2767                 }
2768
2769                 edge = btrfs_backref_alloc_edge(cache);
2770                 if (!edge) {
2771                         btrfs_put_root(root);
2772                         ret = -ENOMEM;
2773                         goto out;
2774                 }
2775
2776                 eb = path->nodes[level];
2777                 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2778                 if (!rb_node) {
2779                         upper = btrfs_backref_alloc_node(cache, eb->start,
2780                                                          lower->level + 1);
2781                         if (!upper) {
2782                                 btrfs_put_root(root);
2783                                 btrfs_backref_free_edge(cache, edge);
2784                                 ret = -ENOMEM;
2785                                 goto out;
2786                         }
2787                         upper->owner = btrfs_header_owner(eb);
2788                         if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2789                                 upper->cowonly = 1;
2790
2791                         /*
2792                          * If we know the block isn't shared we can avoid
2793                          * checking its backrefs.
2794                          */
2795                         if (btrfs_block_can_be_shared(root, eb))
2796                                 upper->checked = 0;
2797                         else
2798                                 upper->checked = 1;
2799
2800                         /*
2801                          * Add the block to pending list if we need to check its
2802                          * backrefs, we only do this once while walking up a
2803                          * tree as we will catch anything else later on.
2804                          */
2805                         if (!upper->checked && need_check) {
2806                                 need_check = false;
2807                                 list_add_tail(&edge->list[UPPER],
2808                                               &cache->pending_edge);
2809                         } else {
2810                                 if (upper->checked)
2811                                         need_check = true;
2812                                 INIT_LIST_HEAD(&edge->list[UPPER]);
2813                         }
2814                 } else {
2815                         upper = rb_entry(rb_node, struct btrfs_backref_node,
2816                                          rb_node);
2817                         ASSERT(upper->checked);
2818                         INIT_LIST_HEAD(&edge->list[UPPER]);
2819                         if (!upper->owner)
2820                                 upper->owner = btrfs_header_owner(eb);
2821                 }
2822                 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2823
2824                 if (rb_node) {
2825                         btrfs_put_root(root);
2826                         break;
2827                 }
2828                 lower = upper;
2829                 upper = NULL;
2830         }
2831 out:
2832         btrfs_release_path(path);
2833         return ret;
2834 }
2835
2836 /*
2837  * Add backref node @cur into @cache.
2838  *
2839  * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2840  *       links aren't yet bi-directional. Needs to finish such links.
2841  *       Use btrfs_backref_finish_upper_links() to finish such linkage.
2842  *
2843  * @path:       Released path for indirect tree backref lookup
2844  * @iter:       Released backref iter for extent tree search
2845  * @node_key:   The first key of the tree block
2846  */
2847 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2848                                 struct btrfs_path *path,
2849                                 struct btrfs_backref_iter *iter,
2850                                 struct btrfs_key *node_key,
2851                                 struct btrfs_backref_node *cur)
2852 {
2853         struct btrfs_fs_info *fs_info = cache->fs_info;
2854         struct btrfs_backref_edge *edge;
2855         struct btrfs_backref_node *exist;
2856         int ret;
2857
2858         ret = btrfs_backref_iter_start(iter, cur->bytenr);
2859         if (ret < 0)
2860                 return ret;
2861         /*
2862          * We skip the first btrfs_tree_block_info, as we don't use the key
2863          * stored in it, but fetch it from the tree block
2864          */
2865         if (btrfs_backref_has_tree_block_info(iter)) {
2866                 ret = btrfs_backref_iter_next(iter);
2867                 if (ret < 0)
2868                         goto out;
2869                 /* No extra backref? This means the tree block is corrupted */
2870                 if (ret > 0) {
2871                         ret = -EUCLEAN;
2872                         goto out;
2873                 }
2874         }
2875         WARN_ON(cur->checked);
2876         if (!list_empty(&cur->upper)) {
2877                 /*
2878                  * The backref was added previously when processing backref of
2879                  * type BTRFS_TREE_BLOCK_REF_KEY
2880                  */
2881                 ASSERT(list_is_singular(&cur->upper));
2882                 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2883                                   list[LOWER]);
2884                 ASSERT(list_empty(&edge->list[UPPER]));
2885                 exist = edge->node[UPPER];
2886                 /*
2887                  * Add the upper level block to pending list if we need check
2888                  * its backrefs
2889                  */
2890                 if (!exist->checked)
2891                         list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2892         } else {
2893                 exist = NULL;
2894         }
2895
2896         for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2897                 struct extent_buffer *eb;
2898                 struct btrfs_key key;
2899                 int type;
2900
2901                 cond_resched();
2902                 eb = btrfs_backref_get_eb(iter);
2903
2904                 key.objectid = iter->bytenr;
2905                 if (btrfs_backref_iter_is_inline_ref(iter)) {
2906                         struct btrfs_extent_inline_ref *iref;
2907
2908                         /* Update key for inline backref */
2909                         iref = (struct btrfs_extent_inline_ref *)
2910                                 ((unsigned long)iter->cur_ptr);
2911                         type = btrfs_get_extent_inline_ref_type(eb, iref,
2912                                                         BTRFS_REF_TYPE_BLOCK);
2913                         if (type == BTRFS_REF_TYPE_INVALID) {
2914                                 ret = -EUCLEAN;
2915                                 goto out;
2916                         }
2917                         key.type = type;
2918                         key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2919                 } else {
2920                         key.type = iter->cur_key.type;
2921                         key.offset = iter->cur_key.offset;
2922                 }
2923
2924                 /*
2925                  * Parent node found and matches current inline ref, no need to
2926                  * rebuild this node for this inline ref
2927                  */
2928                 if (exist &&
2929                     ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2930                       exist->owner == key.offset) ||
2931                      (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2932                       exist->bytenr == key.offset))) {
2933                         exist = NULL;
2934                         continue;
2935                 }
2936
2937                 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2938                 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2939                         ret = handle_direct_tree_backref(cache, &key, cur);
2940                         if (ret < 0)
2941                                 goto out;
2942                         continue;
2943                 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2944                         ret = -EINVAL;
2945                         btrfs_print_v0_err(fs_info);
2946                         btrfs_handle_fs_error(fs_info, ret, NULL);
2947                         goto out;
2948                 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2949                         continue;
2950                 }
2951
2952                 /*
2953                  * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2954                  * means the root objectid. We need to search the tree to get
2955                  * its parent bytenr.
2956                  */
2957                 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2958                                                    cur);
2959                 if (ret < 0)
2960                         goto out;
2961         }
2962         ret = 0;
2963         cur->checked = 1;
2964         WARN_ON(exist);
2965 out:
2966         btrfs_backref_iter_release(iter);
2967         return ret;
2968 }
2969
2970 /*
2971  * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2972  */
2973 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2974                                      struct btrfs_backref_node *start)
2975 {
2976         struct list_head *useless_node = &cache->useless_node;
2977         struct btrfs_backref_edge *edge;
2978         struct rb_node *rb_node;
2979         LIST_HEAD(pending_edge);
2980
2981         ASSERT(start->checked);
2982
2983         /* Insert this node to cache if it's not COW-only */
2984         if (!start->cowonly) {
2985                 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2986                                            &start->rb_node);
2987                 if (rb_node)
2988                         btrfs_backref_panic(cache->fs_info, start->bytenr,
2989                                             -EEXIST);
2990                 list_add_tail(&start->lower, &cache->leaves);
2991         }
2992
2993         /*
2994          * Use breadth first search to iterate all related edges.
2995          *
2996          * The starting points are all the edges of this node
2997          */
2998         list_for_each_entry(edge, &start->upper, list[LOWER])
2999                 list_add_tail(&edge->list[UPPER], &pending_edge);
3000
3001         while (!list_empty(&pending_edge)) {
3002                 struct btrfs_backref_node *upper;
3003                 struct btrfs_backref_node *lower;
3004
3005                 edge = list_first_entry(&pending_edge,
3006                                 struct btrfs_backref_edge, list[UPPER]);
3007                 list_del_init(&edge->list[UPPER]);
3008                 upper = edge->node[UPPER];
3009                 lower = edge->node[LOWER];
3010
3011                 /* Parent is detached, no need to keep any edges */
3012                 if (upper->detached) {
3013                         list_del(&edge->list[LOWER]);
3014                         btrfs_backref_free_edge(cache, edge);
3015
3016                         /* Lower node is orphan, queue for cleanup */
3017                         if (list_empty(&lower->upper))
3018                                 list_add(&lower->list, useless_node);
3019                         continue;
3020                 }
3021
3022                 /*
3023                  * All new nodes added in current build_backref_tree() haven't
3024                  * been linked to the cache rb tree.
3025                  * So if we have upper->rb_node populated, this means a cache
3026                  * hit. We only need to link the edge, as @upper and all its
3027                  * parents have already been linked.
3028                  */
3029                 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3030                         if (upper->lowest) {
3031                                 list_del_init(&upper->lower);
3032                                 upper->lowest = 0;
3033                         }
3034
3035                         list_add_tail(&edge->list[UPPER], &upper->lower);
3036                         continue;
3037                 }
3038
3039                 /* Sanity check, we shouldn't have any unchecked nodes */
3040                 if (!upper->checked) {
3041                         ASSERT(0);
3042                         return -EUCLEAN;
3043                 }
3044
3045                 /* Sanity check, COW-only node has non-COW-only parent */
3046                 if (start->cowonly != upper->cowonly) {
3047                         ASSERT(0);
3048                         return -EUCLEAN;
3049                 }
3050
3051                 /* Only cache non-COW-only (subvolume trees) tree blocks */
3052                 if (!upper->cowonly) {
3053                         rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3054                                                    &upper->rb_node);
3055                         if (rb_node) {
3056                                 btrfs_backref_panic(cache->fs_info,
3057                                                 upper->bytenr, -EEXIST);
3058                                 return -EUCLEAN;
3059                         }
3060                 }
3061
3062                 list_add_tail(&edge->list[UPPER], &upper->lower);
3063
3064                 /*
3065                  * Also queue all the parent edges of this uncached node
3066                  * to finish the upper linkage
3067                  */
3068                 list_for_each_entry(edge, &upper->upper, list[LOWER])
3069                         list_add_tail(&edge->list[UPPER], &pending_edge);
3070         }
3071         return 0;
3072 }
3073
3074 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3075                                  struct btrfs_backref_node *node)
3076 {
3077         struct btrfs_backref_node *lower;
3078         struct btrfs_backref_node *upper;
3079         struct btrfs_backref_edge *edge;
3080
3081         while (!list_empty(&cache->useless_node)) {
3082                 lower = list_first_entry(&cache->useless_node,
3083                                    struct btrfs_backref_node, list);
3084                 list_del_init(&lower->list);
3085         }
3086         while (!list_empty(&cache->pending_edge)) {
3087                 edge = list_first_entry(&cache->pending_edge,
3088                                 struct btrfs_backref_edge, list[UPPER]);
3089                 list_del(&edge->list[UPPER]);
3090                 list_del(&edge->list[LOWER]);
3091                 lower = edge->node[LOWER];
3092                 upper = edge->node[UPPER];
3093                 btrfs_backref_free_edge(cache, edge);
3094
3095                 /*
3096                  * Lower is no longer linked to any upper backref nodes and
3097                  * isn't in the cache, we can free it ourselves.
3098                  */
3099                 if (list_empty(&lower->upper) &&
3100                     RB_EMPTY_NODE(&lower->rb_node))
3101                         list_add(&lower->list, &cache->useless_node);
3102
3103                 if (!RB_EMPTY_NODE(&upper->rb_node))
3104                         continue;
3105
3106                 /* Add this guy's upper edges to the list to process */
3107                 list_for_each_entry(edge, &upper->upper, list[LOWER])
3108                         list_add_tail(&edge->list[UPPER],
3109                                       &cache->pending_edge);
3110                 if (list_empty(&upper->upper))
3111                         list_add(&upper->list, &cache->useless_node);
3112         }
3113
3114         while (!list_empty(&cache->useless_node)) {
3115                 lower = list_first_entry(&cache->useless_node,
3116                                    struct btrfs_backref_node, list);
3117                 list_del_init(&lower->list);
3118                 if (lower == node)
3119                         node = NULL;
3120                 btrfs_backref_free_node(cache, lower);
3121         }
3122
3123         btrfs_backref_cleanup_node(cache, node);
3124         ASSERT(list_empty(&cache->useless_node) &&
3125                list_empty(&cache->pending_edge));
3126 }