x86/sev-es: Handle INVD Events
[linux-2.6-microblaze.git] / fs / btrfs / ref-verify.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2014 Facebook.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
8 #include "ctree.h"
9 #include "disk-io.h"
10 #include "locking.h"
11 #include "delayed-ref.h"
12 #include "ref-verify.h"
13
14 /*
15  * Used to keep track the roots and number of refs each root has for a given
16  * bytenr.  This just tracks the number of direct references, no shared
17  * references.
18  */
19 struct root_entry {
20         u64 root_objectid;
21         u64 num_refs;
22         struct rb_node node;
23 };
24
25 /*
26  * These are meant to represent what should exist in the extent tree, these can
27  * be used to verify the extent tree is consistent as these should all match
28  * what the extent tree says.
29  */
30 struct ref_entry {
31         u64 root_objectid;
32         u64 parent;
33         u64 owner;
34         u64 offset;
35         u64 num_refs;
36         struct rb_node node;
37 };
38
39 #define MAX_TRACE       16
40
41 /*
42  * Whenever we add/remove a reference we record the action.  The action maps
43  * back to the delayed ref action.  We hold the ref we are changing in the
44  * action so we can account for the history properly, and we record the root we
45  * were called with since it could be different from ref_root.  We also store
46  * stack traces because that's how I roll.
47  */
48 struct ref_action {
49         int action;
50         u64 root;
51         struct ref_entry ref;
52         struct list_head list;
53         unsigned long trace[MAX_TRACE];
54         unsigned int trace_len;
55 };
56
57 /*
58  * One of these for every block we reference, it holds the roots and references
59  * to it as well as all of the ref actions that have occurred to it.  We never
60  * free it until we unmount the file system in order to make sure re-allocations
61  * are happening properly.
62  */
63 struct block_entry {
64         u64 bytenr;
65         u64 len;
66         u64 num_refs;
67         int metadata;
68         int from_disk;
69         struct rb_root roots;
70         struct rb_root refs;
71         struct rb_node node;
72         struct list_head actions;
73 };
74
75 static struct block_entry *insert_block_entry(struct rb_root *root,
76                                               struct block_entry *be)
77 {
78         struct rb_node **p = &root->rb_node;
79         struct rb_node *parent_node = NULL;
80         struct block_entry *entry;
81
82         while (*p) {
83                 parent_node = *p;
84                 entry = rb_entry(parent_node, struct block_entry, node);
85                 if (entry->bytenr > be->bytenr)
86                         p = &(*p)->rb_left;
87                 else if (entry->bytenr < be->bytenr)
88                         p = &(*p)->rb_right;
89                 else
90                         return entry;
91         }
92
93         rb_link_node(&be->node, parent_node, p);
94         rb_insert_color(&be->node, root);
95         return NULL;
96 }
97
98 static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr)
99 {
100         struct rb_node *n;
101         struct block_entry *entry = NULL;
102
103         n = root->rb_node;
104         while (n) {
105                 entry = rb_entry(n, struct block_entry, node);
106                 if (entry->bytenr < bytenr)
107                         n = n->rb_right;
108                 else if (entry->bytenr > bytenr)
109                         n = n->rb_left;
110                 else
111                         return entry;
112         }
113         return NULL;
114 }
115
116 static struct root_entry *insert_root_entry(struct rb_root *root,
117                                             struct root_entry *re)
118 {
119         struct rb_node **p = &root->rb_node;
120         struct rb_node *parent_node = NULL;
121         struct root_entry *entry;
122
123         while (*p) {
124                 parent_node = *p;
125                 entry = rb_entry(parent_node, struct root_entry, node);
126                 if (entry->root_objectid > re->root_objectid)
127                         p = &(*p)->rb_left;
128                 else if (entry->root_objectid < re->root_objectid)
129                         p = &(*p)->rb_right;
130                 else
131                         return entry;
132         }
133
134         rb_link_node(&re->node, parent_node, p);
135         rb_insert_color(&re->node, root);
136         return NULL;
137
138 }
139
140 static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
141 {
142         if (ref1->root_objectid < ref2->root_objectid)
143                 return -1;
144         if (ref1->root_objectid > ref2->root_objectid)
145                 return 1;
146         if (ref1->parent < ref2->parent)
147                 return -1;
148         if (ref1->parent > ref2->parent)
149                 return 1;
150         if (ref1->owner < ref2->owner)
151                 return -1;
152         if (ref1->owner > ref2->owner)
153                 return 1;
154         if (ref1->offset < ref2->offset)
155                 return -1;
156         if (ref1->offset > ref2->offset)
157                 return 1;
158         return 0;
159 }
160
161 static struct ref_entry *insert_ref_entry(struct rb_root *root,
162                                           struct ref_entry *ref)
163 {
164         struct rb_node **p = &root->rb_node;
165         struct rb_node *parent_node = NULL;
166         struct ref_entry *entry;
167         int cmp;
168
169         while (*p) {
170                 parent_node = *p;
171                 entry = rb_entry(parent_node, struct ref_entry, node);
172                 cmp = comp_refs(entry, ref);
173                 if (cmp > 0)
174                         p = &(*p)->rb_left;
175                 else if (cmp < 0)
176                         p = &(*p)->rb_right;
177                 else
178                         return entry;
179         }
180
181         rb_link_node(&ref->node, parent_node, p);
182         rb_insert_color(&ref->node, root);
183         return NULL;
184
185 }
186
187 static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
188 {
189         struct rb_node *n;
190         struct root_entry *entry = NULL;
191
192         n = root->rb_node;
193         while (n) {
194                 entry = rb_entry(n, struct root_entry, node);
195                 if (entry->root_objectid < objectid)
196                         n = n->rb_right;
197                 else if (entry->root_objectid > objectid)
198                         n = n->rb_left;
199                 else
200                         return entry;
201         }
202         return NULL;
203 }
204
205 #ifdef CONFIG_STACKTRACE
206 static void __save_stack_trace(struct ref_action *ra)
207 {
208         ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2);
209 }
210
211 static void __print_stack_trace(struct btrfs_fs_info *fs_info,
212                                 struct ref_action *ra)
213 {
214         if (ra->trace_len == 0) {
215                 btrfs_err(fs_info, "  ref-verify: no stacktrace");
216                 return;
217         }
218         stack_trace_print(ra->trace, ra->trace_len, 2);
219 }
220 #else
221 static void inline __save_stack_trace(struct ref_action *ra)
222 {
223 }
224
225 static void inline __print_stack_trace(struct btrfs_fs_info *fs_info,
226                                        struct ref_action *ra)
227 {
228         btrfs_err(fs_info, "  ref-verify: no stacktrace support");
229 }
230 #endif
231
232 static void free_block_entry(struct block_entry *be)
233 {
234         struct root_entry *re;
235         struct ref_entry *ref;
236         struct ref_action *ra;
237         struct rb_node *n;
238
239         while ((n = rb_first(&be->roots))) {
240                 re = rb_entry(n, struct root_entry, node);
241                 rb_erase(&re->node, &be->roots);
242                 kfree(re);
243         }
244
245         while((n = rb_first(&be->refs))) {
246                 ref = rb_entry(n, struct ref_entry, node);
247                 rb_erase(&ref->node, &be->refs);
248                 kfree(ref);
249         }
250
251         while (!list_empty(&be->actions)) {
252                 ra = list_first_entry(&be->actions, struct ref_action,
253                                       list);
254                 list_del(&ra->list);
255                 kfree(ra);
256         }
257         kfree(be);
258 }
259
260 static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
261                                            u64 bytenr, u64 len,
262                                            u64 root_objectid)
263 {
264         struct block_entry *be = NULL, *exist;
265         struct root_entry *re = NULL;
266
267         re = kzalloc(sizeof(struct root_entry), GFP_KERNEL);
268         be = kzalloc(sizeof(struct block_entry), GFP_KERNEL);
269         if (!be || !re) {
270                 kfree(re);
271                 kfree(be);
272                 return ERR_PTR(-ENOMEM);
273         }
274         be->bytenr = bytenr;
275         be->len = len;
276
277         re->root_objectid = root_objectid;
278         re->num_refs = 0;
279
280         spin_lock(&fs_info->ref_verify_lock);
281         exist = insert_block_entry(&fs_info->block_tree, be);
282         if (exist) {
283                 if (root_objectid) {
284                         struct root_entry *exist_re;
285
286                         exist_re = insert_root_entry(&exist->roots, re);
287                         if (exist_re)
288                                 kfree(re);
289                 } else {
290                         kfree(re);
291                 }
292                 kfree(be);
293                 return exist;
294         }
295
296         be->num_refs = 0;
297         be->metadata = 0;
298         be->from_disk = 0;
299         be->roots = RB_ROOT;
300         be->refs = RB_ROOT;
301         INIT_LIST_HEAD(&be->actions);
302         if (root_objectid)
303                 insert_root_entry(&be->roots, re);
304         else
305                 kfree(re);
306         return be;
307 }
308
309 static int add_tree_block(struct btrfs_fs_info *fs_info, u64 ref_root,
310                           u64 parent, u64 bytenr, int level)
311 {
312         struct block_entry *be;
313         struct root_entry *re;
314         struct ref_entry *ref = NULL, *exist;
315
316         ref = kmalloc(sizeof(struct ref_entry), GFP_KERNEL);
317         if (!ref)
318                 return -ENOMEM;
319
320         if (parent)
321                 ref->root_objectid = 0;
322         else
323                 ref->root_objectid = ref_root;
324         ref->parent = parent;
325         ref->owner = level;
326         ref->offset = 0;
327         ref->num_refs = 1;
328
329         be = add_block_entry(fs_info, bytenr, fs_info->nodesize, ref_root);
330         if (IS_ERR(be)) {
331                 kfree(ref);
332                 return PTR_ERR(be);
333         }
334         be->num_refs++;
335         be->from_disk = 1;
336         be->metadata = 1;
337
338         if (!parent) {
339                 ASSERT(ref_root);
340                 re = lookup_root_entry(&be->roots, ref_root);
341                 ASSERT(re);
342                 re->num_refs++;
343         }
344         exist = insert_ref_entry(&be->refs, ref);
345         if (exist) {
346                 exist->num_refs++;
347                 kfree(ref);
348         }
349         spin_unlock(&fs_info->ref_verify_lock);
350
351         return 0;
352 }
353
354 static int add_shared_data_ref(struct btrfs_fs_info *fs_info,
355                                u64 parent, u32 num_refs, u64 bytenr,
356                                u64 num_bytes)
357 {
358         struct block_entry *be;
359         struct ref_entry *ref;
360
361         ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
362         if (!ref)
363                 return -ENOMEM;
364         be = add_block_entry(fs_info, bytenr, num_bytes, 0);
365         if (IS_ERR(be)) {
366                 kfree(ref);
367                 return PTR_ERR(be);
368         }
369         be->num_refs += num_refs;
370
371         ref->parent = parent;
372         ref->num_refs = num_refs;
373         if (insert_ref_entry(&be->refs, ref)) {
374                 spin_unlock(&fs_info->ref_verify_lock);
375                 btrfs_err(fs_info, "existing shared ref when reading from disk?");
376                 kfree(ref);
377                 return -EINVAL;
378         }
379         spin_unlock(&fs_info->ref_verify_lock);
380         return 0;
381 }
382
383 static int add_extent_data_ref(struct btrfs_fs_info *fs_info,
384                                struct extent_buffer *leaf,
385                                struct btrfs_extent_data_ref *dref,
386                                u64 bytenr, u64 num_bytes)
387 {
388         struct block_entry *be;
389         struct ref_entry *ref;
390         struct root_entry *re;
391         u64 ref_root = btrfs_extent_data_ref_root(leaf, dref);
392         u64 owner = btrfs_extent_data_ref_objectid(leaf, dref);
393         u64 offset = btrfs_extent_data_ref_offset(leaf, dref);
394         u32 num_refs = btrfs_extent_data_ref_count(leaf, dref);
395
396         ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
397         if (!ref)
398                 return -ENOMEM;
399         be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
400         if (IS_ERR(be)) {
401                 kfree(ref);
402                 return PTR_ERR(be);
403         }
404         be->num_refs += num_refs;
405
406         ref->parent = 0;
407         ref->owner = owner;
408         ref->root_objectid = ref_root;
409         ref->offset = offset;
410         ref->num_refs = num_refs;
411         if (insert_ref_entry(&be->refs, ref)) {
412                 spin_unlock(&fs_info->ref_verify_lock);
413                 btrfs_err(fs_info, "existing ref when reading from disk?");
414                 kfree(ref);
415                 return -EINVAL;
416         }
417
418         re = lookup_root_entry(&be->roots, ref_root);
419         if (!re) {
420                 spin_unlock(&fs_info->ref_verify_lock);
421                 btrfs_err(fs_info, "missing root in new block entry?");
422                 return -EINVAL;
423         }
424         re->num_refs += num_refs;
425         spin_unlock(&fs_info->ref_verify_lock);
426         return 0;
427 }
428
429 static int process_extent_item(struct btrfs_fs_info *fs_info,
430                                struct btrfs_path *path, struct btrfs_key *key,
431                                int slot, int *tree_block_level)
432 {
433         struct btrfs_extent_item *ei;
434         struct btrfs_extent_inline_ref *iref;
435         struct btrfs_extent_data_ref *dref;
436         struct btrfs_shared_data_ref *sref;
437         struct extent_buffer *leaf = path->nodes[0];
438         u32 item_size = btrfs_item_size_nr(leaf, slot);
439         unsigned long end, ptr;
440         u64 offset, flags, count;
441         int type, ret;
442
443         ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
444         flags = btrfs_extent_flags(leaf, ei);
445
446         if ((key->type == BTRFS_EXTENT_ITEM_KEY) &&
447             flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
448                 struct btrfs_tree_block_info *info;
449
450                 info = (struct btrfs_tree_block_info *)(ei + 1);
451                 *tree_block_level = btrfs_tree_block_level(leaf, info);
452                 iref = (struct btrfs_extent_inline_ref *)(info + 1);
453         } else {
454                 if (key->type == BTRFS_METADATA_ITEM_KEY)
455                         *tree_block_level = key->offset;
456                 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
457         }
458
459         ptr = (unsigned long)iref;
460         end = (unsigned long)ei + item_size;
461         while (ptr < end) {
462                 iref = (struct btrfs_extent_inline_ref *)ptr;
463                 type = btrfs_extent_inline_ref_type(leaf, iref);
464                 offset = btrfs_extent_inline_ref_offset(leaf, iref);
465                 switch (type) {
466                 case BTRFS_TREE_BLOCK_REF_KEY:
467                         ret = add_tree_block(fs_info, offset, 0, key->objectid,
468                                              *tree_block_level);
469                         break;
470                 case BTRFS_SHARED_BLOCK_REF_KEY:
471                         ret = add_tree_block(fs_info, 0, offset, key->objectid,
472                                              *tree_block_level);
473                         break;
474                 case BTRFS_EXTENT_DATA_REF_KEY:
475                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
476                         ret = add_extent_data_ref(fs_info, leaf, dref,
477                                                   key->objectid, key->offset);
478                         break;
479                 case BTRFS_SHARED_DATA_REF_KEY:
480                         sref = (struct btrfs_shared_data_ref *)(iref + 1);
481                         count = btrfs_shared_data_ref_count(leaf, sref);
482                         ret = add_shared_data_ref(fs_info, offset, count,
483                                                   key->objectid, key->offset);
484                         break;
485                 default:
486                         btrfs_err(fs_info, "invalid key type in iref");
487                         ret = -EINVAL;
488                         break;
489                 }
490                 if (ret)
491                         break;
492                 ptr += btrfs_extent_inline_ref_size(type);
493         }
494         return ret;
495 }
496
497 static int process_leaf(struct btrfs_root *root,
498                         struct btrfs_path *path, u64 *bytenr, u64 *num_bytes)
499 {
500         struct btrfs_fs_info *fs_info = root->fs_info;
501         struct extent_buffer *leaf = path->nodes[0];
502         struct btrfs_extent_data_ref *dref;
503         struct btrfs_shared_data_ref *sref;
504         u32 count;
505         int i = 0, tree_block_level = 0, ret = 0;
506         struct btrfs_key key;
507         int nritems = btrfs_header_nritems(leaf);
508
509         for (i = 0; i < nritems; i++) {
510                 btrfs_item_key_to_cpu(leaf, &key, i);
511                 switch (key.type) {
512                 case BTRFS_EXTENT_ITEM_KEY:
513                         *num_bytes = key.offset;
514                         fallthrough;
515                 case BTRFS_METADATA_ITEM_KEY:
516                         *bytenr = key.objectid;
517                         ret = process_extent_item(fs_info, path, &key, i,
518                                                   &tree_block_level);
519                         break;
520                 case BTRFS_TREE_BLOCK_REF_KEY:
521                         ret = add_tree_block(fs_info, key.offset, 0,
522                                              key.objectid, tree_block_level);
523                         break;
524                 case BTRFS_SHARED_BLOCK_REF_KEY:
525                         ret = add_tree_block(fs_info, 0, key.offset,
526                                              key.objectid, tree_block_level);
527                         break;
528                 case BTRFS_EXTENT_DATA_REF_KEY:
529                         dref = btrfs_item_ptr(leaf, i,
530                                               struct btrfs_extent_data_ref);
531                         ret = add_extent_data_ref(fs_info, leaf, dref, *bytenr,
532                                                   *num_bytes);
533                         break;
534                 case BTRFS_SHARED_DATA_REF_KEY:
535                         sref = btrfs_item_ptr(leaf, i,
536                                               struct btrfs_shared_data_ref);
537                         count = btrfs_shared_data_ref_count(leaf, sref);
538                         ret = add_shared_data_ref(fs_info, key.offset, count,
539                                                   *bytenr, *num_bytes);
540                         break;
541                 default:
542                         break;
543                 }
544                 if (ret)
545                         break;
546         }
547         return ret;
548 }
549
550 /* Walk down to the leaf from the given level */
551 static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
552                           int level, u64 *bytenr, u64 *num_bytes)
553 {
554         struct btrfs_fs_info *fs_info = root->fs_info;
555         struct extent_buffer *eb;
556         u64 block_bytenr, gen;
557         int ret = 0;
558
559         while (level >= 0) {
560                 if (level) {
561                         struct btrfs_key first_key;
562
563                         block_bytenr = btrfs_node_blockptr(path->nodes[level],
564                                                            path->slots[level]);
565                         gen = btrfs_node_ptr_generation(path->nodes[level],
566                                                         path->slots[level]);
567                         btrfs_node_key_to_cpu(path->nodes[level], &first_key,
568                                               path->slots[level]);
569                         eb = read_tree_block(fs_info, block_bytenr, gen,
570                                              level - 1, &first_key);
571                         if (IS_ERR(eb))
572                                 return PTR_ERR(eb);
573                         if (!extent_buffer_uptodate(eb)) {
574                                 free_extent_buffer(eb);
575                                 return -EIO;
576                         }
577                         btrfs_tree_read_lock(eb);
578                         btrfs_set_lock_blocking_read(eb);
579                         path->nodes[level-1] = eb;
580                         path->slots[level-1] = 0;
581                         path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING;
582                 } else {
583                         ret = process_leaf(root, path, bytenr, num_bytes);
584                         if (ret)
585                                 break;
586                 }
587                 level--;
588         }
589         return ret;
590 }
591
592 /* Walk up to the next node that needs to be processed */
593 static int walk_up_tree(struct btrfs_path *path, int *level)
594 {
595         int l;
596
597         for (l = 0; l < BTRFS_MAX_LEVEL; l++) {
598                 if (!path->nodes[l])
599                         continue;
600                 if (l) {
601                         path->slots[l]++;
602                         if (path->slots[l] <
603                             btrfs_header_nritems(path->nodes[l])) {
604                                 *level = l;
605                                 return 0;
606                         }
607                 }
608                 btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]);
609                 free_extent_buffer(path->nodes[l]);
610                 path->nodes[l] = NULL;
611                 path->slots[l] = 0;
612                 path->locks[l] = 0;
613         }
614
615         return 1;
616 }
617
618 static void dump_ref_action(struct btrfs_fs_info *fs_info,
619                             struct ref_action *ra)
620 {
621         btrfs_err(fs_info,
622 "  Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
623                   ra->action, ra->root, ra->ref.root_objectid, ra->ref.parent,
624                   ra->ref.owner, ra->ref.offset, ra->ref.num_refs);
625         __print_stack_trace(fs_info, ra);
626 }
627
628 /*
629  * Dumps all the information from the block entry to printk, it's going to be
630  * awesome.
631  */
632 static void dump_block_entry(struct btrfs_fs_info *fs_info,
633                              struct block_entry *be)
634 {
635         struct ref_entry *ref;
636         struct root_entry *re;
637         struct ref_action *ra;
638         struct rb_node *n;
639
640         btrfs_err(fs_info,
641 "dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
642                   be->bytenr, be->len, be->num_refs, be->metadata,
643                   be->from_disk);
644
645         for (n = rb_first(&be->refs); n; n = rb_next(n)) {
646                 ref = rb_entry(n, struct ref_entry, node);
647                 btrfs_err(fs_info,
648 "  ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
649                           ref->root_objectid, ref->parent, ref->owner,
650                           ref->offset, ref->num_refs);
651         }
652
653         for (n = rb_first(&be->roots); n; n = rb_next(n)) {
654                 re = rb_entry(n, struct root_entry, node);
655                 btrfs_err(fs_info, "  root entry %llu, num_refs %llu",
656                           re->root_objectid, re->num_refs);
657         }
658
659         list_for_each_entry(ra, &be->actions, list)
660                 dump_ref_action(fs_info, ra);
661 }
662
663 /*
664  * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
665  *
666  * This will add an action item to the given bytenr and do sanity checks to make
667  * sure we haven't messed something up.  If we are making a new allocation and
668  * this block entry has history we will delete all previous actions as long as
669  * our sanity checks pass as they are no longer needed.
670  */
671 int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
672                        struct btrfs_ref *generic_ref)
673 {
674         struct ref_entry *ref = NULL, *exist;
675         struct ref_action *ra = NULL;
676         struct block_entry *be = NULL;
677         struct root_entry *re = NULL;
678         int action = generic_ref->action;
679         int ret = 0;
680         bool metadata;
681         u64 bytenr = generic_ref->bytenr;
682         u64 num_bytes = generic_ref->len;
683         u64 parent = generic_ref->parent;
684         u64 ref_root;
685         u64 owner;
686         u64 offset;
687
688         if (!btrfs_test_opt(fs_info, REF_VERIFY))
689                 return 0;
690
691         if (generic_ref->type == BTRFS_REF_METADATA) {
692                 ref_root = generic_ref->tree_ref.root;
693                 owner = generic_ref->tree_ref.level;
694                 offset = 0;
695         } else {
696                 ref_root = generic_ref->data_ref.ref_root;
697                 owner = generic_ref->data_ref.ino;
698                 offset = generic_ref->data_ref.offset;
699         }
700         metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
701
702         ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
703         ra = kmalloc(sizeof(struct ref_action), GFP_NOFS);
704         if (!ra || !ref) {
705                 kfree(ref);
706                 kfree(ra);
707                 ret = -ENOMEM;
708                 goto out;
709         }
710
711         if (parent) {
712                 ref->parent = parent;
713         } else {
714                 ref->root_objectid = ref_root;
715                 ref->owner = owner;
716                 ref->offset = offset;
717         }
718         ref->num_refs = (action == BTRFS_DROP_DELAYED_REF) ? -1 : 1;
719
720         memcpy(&ra->ref, ref, sizeof(struct ref_entry));
721         /*
722          * Save the extra info from the delayed ref in the ref action to make it
723          * easier to figure out what is happening.  The real ref's we add to the
724          * ref tree need to reflect what we save on disk so it matches any
725          * on-disk refs we pre-loaded.
726          */
727         ra->ref.owner = owner;
728         ra->ref.offset = offset;
729         ra->ref.root_objectid = ref_root;
730         __save_stack_trace(ra);
731
732         INIT_LIST_HEAD(&ra->list);
733         ra->action = action;
734         ra->root = generic_ref->real_root;
735
736         /*
737          * This is an allocation, preallocate the block_entry in case we haven't
738          * used it before.
739          */
740         ret = -EINVAL;
741         if (action == BTRFS_ADD_DELAYED_EXTENT) {
742                 /*
743                  * For subvol_create we'll just pass in whatever the parent root
744                  * is and the new root objectid, so let's not treat the passed
745                  * in root as if it really has a ref for this bytenr.
746                  */
747                 be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
748                 if (IS_ERR(be)) {
749                         kfree(ref);
750                         kfree(ra);
751                         ret = PTR_ERR(be);
752                         goto out;
753                 }
754                 be->num_refs++;
755                 if (metadata)
756                         be->metadata = 1;
757
758                 if (be->num_refs != 1) {
759                         btrfs_err(fs_info,
760                         "re-allocated a block that still has references to it!");
761                         dump_block_entry(fs_info, be);
762                         dump_ref_action(fs_info, ra);
763                         kfree(ref);
764                         kfree(ra);
765                         goto out_unlock;
766                 }
767
768                 while (!list_empty(&be->actions)) {
769                         struct ref_action *tmp;
770
771                         tmp = list_first_entry(&be->actions, struct ref_action,
772                                                list);
773                         list_del(&tmp->list);
774                         kfree(tmp);
775                 }
776         } else {
777                 struct root_entry *tmp;
778
779                 if (!parent) {
780                         re = kmalloc(sizeof(struct root_entry), GFP_NOFS);
781                         if (!re) {
782                                 kfree(ref);
783                                 kfree(ra);
784                                 ret = -ENOMEM;
785                                 goto out;
786                         }
787                         /*
788                          * This is the root that is modifying us, so it's the
789                          * one we want to lookup below when we modify the
790                          * re->num_refs.
791                          */
792                         ref_root = generic_ref->real_root;
793                         re->root_objectid = generic_ref->real_root;
794                         re->num_refs = 0;
795                 }
796
797                 spin_lock(&fs_info->ref_verify_lock);
798                 be = lookup_block_entry(&fs_info->block_tree, bytenr);
799                 if (!be) {
800                         btrfs_err(fs_info,
801 "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
802                                   action, (unsigned long long)bytenr,
803                                   (unsigned long long)num_bytes);
804                         dump_ref_action(fs_info, ra);
805                         kfree(ref);
806                         kfree(ra);
807                         goto out_unlock;
808                 } else if (be->num_refs == 0) {
809                         btrfs_err(fs_info,
810                 "trying to do action %d for a bytenr that has 0 total references",
811                                 action);
812                         dump_block_entry(fs_info, be);
813                         dump_ref_action(fs_info, ra);
814                         kfree(ref);
815                         kfree(ra);
816                         goto out_unlock;
817                 }
818
819                 if (!parent) {
820                         tmp = insert_root_entry(&be->roots, re);
821                         if (tmp) {
822                                 kfree(re);
823                                 re = tmp;
824                         }
825                 }
826         }
827
828         exist = insert_ref_entry(&be->refs, ref);
829         if (exist) {
830                 if (action == BTRFS_DROP_DELAYED_REF) {
831                         if (exist->num_refs == 0) {
832                                 btrfs_err(fs_info,
833 "dropping a ref for a existing root that doesn't have a ref on the block");
834                                 dump_block_entry(fs_info, be);
835                                 dump_ref_action(fs_info, ra);
836                                 kfree(ref);
837                                 kfree(ra);
838                                 goto out_unlock;
839                         }
840                         exist->num_refs--;
841                         if (exist->num_refs == 0) {
842                                 rb_erase(&exist->node, &be->refs);
843                                 kfree(exist);
844                         }
845                 } else if (!be->metadata) {
846                         exist->num_refs++;
847                 } else {
848                         btrfs_err(fs_info,
849 "attempting to add another ref for an existing ref on a tree block");
850                         dump_block_entry(fs_info, be);
851                         dump_ref_action(fs_info, ra);
852                         kfree(ref);
853                         kfree(ra);
854                         goto out_unlock;
855                 }
856                 kfree(ref);
857         } else {
858                 if (action == BTRFS_DROP_DELAYED_REF) {
859                         btrfs_err(fs_info,
860 "dropping a ref for a root that doesn't have a ref on the block");
861                         dump_block_entry(fs_info, be);
862                         dump_ref_action(fs_info, ra);
863                         kfree(ra);
864                         goto out_unlock;
865                 }
866         }
867
868         if (!parent && !re) {
869                 re = lookup_root_entry(&be->roots, ref_root);
870                 if (!re) {
871                         /*
872                          * This shouldn't happen because we will add our re
873                          * above when we lookup the be with !parent, but just in
874                          * case catch this case so we don't panic because I
875                          * didn't think of some other corner case.
876                          */
877                         btrfs_err(fs_info, "failed to find root %llu for %llu",
878                                   generic_ref->real_root, be->bytenr);
879                         dump_block_entry(fs_info, be);
880                         dump_ref_action(fs_info, ra);
881                         kfree(ra);
882                         goto out_unlock;
883                 }
884         }
885         if (action == BTRFS_DROP_DELAYED_REF) {
886                 if (re)
887                         re->num_refs--;
888                 be->num_refs--;
889         } else if (action == BTRFS_ADD_DELAYED_REF) {
890                 be->num_refs++;
891                 if (re)
892                         re->num_refs++;
893         }
894         list_add_tail(&ra->list, &be->actions);
895         ret = 0;
896 out_unlock:
897         spin_unlock(&fs_info->ref_verify_lock);
898 out:
899         if (ret)
900                 btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
901         return ret;
902 }
903
904 /* Free up the ref cache */
905 void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info)
906 {
907         struct block_entry *be;
908         struct rb_node *n;
909
910         if (!btrfs_test_opt(fs_info, REF_VERIFY))
911                 return;
912
913         spin_lock(&fs_info->ref_verify_lock);
914         while ((n = rb_first(&fs_info->block_tree))) {
915                 be = rb_entry(n, struct block_entry, node);
916                 rb_erase(&be->node, &fs_info->block_tree);
917                 free_block_entry(be);
918                 cond_resched_lock(&fs_info->ref_verify_lock);
919         }
920         spin_unlock(&fs_info->ref_verify_lock);
921 }
922
923 void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
924                                u64 len)
925 {
926         struct block_entry *be = NULL, *entry;
927         struct rb_node *n;
928
929         if (!btrfs_test_opt(fs_info, REF_VERIFY))
930                 return;
931
932         spin_lock(&fs_info->ref_verify_lock);
933         n = fs_info->block_tree.rb_node;
934         while (n) {
935                 entry = rb_entry(n, struct block_entry, node);
936                 if (entry->bytenr < start) {
937                         n = n->rb_right;
938                 } else if (entry->bytenr > start) {
939                         n = n->rb_left;
940                 } else {
941                         be = entry;
942                         break;
943                 }
944                 /* We want to get as close to start as possible */
945                 if (be == NULL ||
946                     (entry->bytenr < start && be->bytenr > start) ||
947                     (entry->bytenr < start && entry->bytenr > be->bytenr))
948                         be = entry;
949         }
950
951         /*
952          * Could have an empty block group, maybe have something to check for
953          * this case to verify we were actually empty?
954          */
955         if (!be) {
956                 spin_unlock(&fs_info->ref_verify_lock);
957                 return;
958         }
959
960         n = &be->node;
961         while (n) {
962                 be = rb_entry(n, struct block_entry, node);
963                 n = rb_next(n);
964                 if (be->bytenr < start && be->bytenr + be->len > start) {
965                         btrfs_err(fs_info,
966                                 "block entry overlaps a block group [%llu,%llu]!",
967                                 start, len);
968                         dump_block_entry(fs_info, be);
969                         continue;
970                 }
971                 if (be->bytenr < start)
972                         continue;
973                 if (be->bytenr >= start + len)
974                         break;
975                 if (be->bytenr + be->len > start + len) {
976                         btrfs_err(fs_info,
977                                 "block entry overlaps a block group [%llu,%llu]!",
978                                 start, len);
979                         dump_block_entry(fs_info, be);
980                 }
981                 rb_erase(&be->node, &fs_info->block_tree);
982                 free_block_entry(be);
983         }
984         spin_unlock(&fs_info->ref_verify_lock);
985 }
986
987 /* Walk down all roots and build the ref tree, meant to be called at mount */
988 int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
989 {
990         struct btrfs_path *path;
991         struct extent_buffer *eb;
992         u64 bytenr = 0, num_bytes = 0;
993         int ret, level;
994
995         if (!btrfs_test_opt(fs_info, REF_VERIFY))
996                 return 0;
997
998         path = btrfs_alloc_path();
999         if (!path)
1000                 return -ENOMEM;
1001
1002         eb = btrfs_read_lock_root_node(fs_info->extent_root);
1003         btrfs_set_lock_blocking_read(eb);
1004         level = btrfs_header_level(eb);
1005         path->nodes[level] = eb;
1006         path->slots[level] = 0;
1007         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
1008
1009         while (1) {
1010                 /*
1011                  * We have to keep track of the bytenr/num_bytes we last hit
1012                  * because we could have run out of space for an inline ref, and
1013                  * would have had to added a ref key item which may appear on a
1014                  * different leaf from the original extent item.
1015                  */
1016                 ret = walk_down_tree(fs_info->extent_root, path, level,
1017                                      &bytenr, &num_bytes);
1018                 if (ret)
1019                         break;
1020                 ret = walk_up_tree(path, &level);
1021                 if (ret < 0)
1022                         break;
1023                 if (ret > 0) {
1024                         ret = 0;
1025                         break;
1026                 }
1027         }
1028         if (ret) {
1029                 btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
1030                 btrfs_free_ref_cache(fs_info);
1031         }
1032         btrfs_free_path(path);
1033         return ret;
1034 }