1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/slab.h>
11 #include <linux/workqueue.h>
15 #include "transaction.h"
16 #include "dev-replace.h"
17 #include "block-group.h"
22 * This is the implementation for the generic read ahead framework.
24 * To trigger a readahead, btrfs_reada_add must be called. It will start
25 * a read ahead for the given range [start, end) on tree root. The returned
26 * handle can either be used to wait on the readahead to finish
27 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
29 * The read ahead works as follows:
30 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
31 * reada_start_machine will then search for extents to prefetch and trigger
32 * some reads. When a read finishes for a node, all contained node/leaf
33 * pointers that lie in the given range will also be enqueued. The reads will
34 * be triggered in sequential order, thus giving a big win over a naive
35 * enumeration. It will also make use of multi-device layouts. Each disk
36 * will have its on read pointer and all disks will by utilized in parallel.
37 * Also will no two disks read both sides of a mirror simultaneously, as this
38 * would waste seeking capacity. Instead both disks will read different parts
40 * Any number of readaheads can be started in parallel. The read order will be
41 * determined globally, i.e. 2 parallel readaheads will normally finish faster
42 * than the 2 started one after another.
45 #define MAX_IN_FLIGHT 6
48 struct list_head list;
49 struct reada_control *rc;
56 struct list_head extctl;
59 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
68 struct list_head list;
71 struct btrfs_device *device;
72 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
78 struct reada_machine_work {
79 struct btrfs_work work;
80 struct btrfs_fs_info *fs_info;
83 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
84 static void reada_control_release(struct kref *kref);
85 static void reada_zone_release(struct kref *kref);
86 static void reada_start_machine(struct btrfs_fs_info *fs_info);
87 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
89 static int reada_add_block(struct reada_control *rc, u64 logical,
90 struct btrfs_key *top, u64 generation);
93 /* in case of err, eb might be NULL */
94 static void __readahead_hook(struct btrfs_fs_info *fs_info,
95 struct reada_extent *re, struct extent_buffer *eb,
102 struct list_head list;
104 spin_lock(&re->lock);
106 * just take the full list from the extent. afterwards we
107 * don't need the lock anymore
109 list_replace_init(&re->extctl, &list);
111 spin_unlock(&re->lock);
114 * this is the error case, the extent buffer has not been
115 * read correctly. We won't access anything from it and
116 * just cleanup our data structures. Effectively this will
117 * cut the branch below this node from read ahead.
123 * FIXME: currently we just set nritems to 0 if this is a leaf,
124 * effectively ignoring the content. In a next step we could
125 * trigger more readahead depending from the content, e.g.
126 * fetch the checksums for the extents in the leaf.
128 if (!btrfs_header_level(eb))
131 nritems = btrfs_header_nritems(eb);
132 generation = btrfs_header_generation(eb);
133 for (i = 0; i < nritems; i++) {
134 struct reada_extctl *rec;
136 struct btrfs_key key;
137 struct btrfs_key next_key;
139 btrfs_node_key_to_cpu(eb, &key, i);
141 btrfs_node_key_to_cpu(eb, &next_key, i + 1);
144 bytenr = btrfs_node_blockptr(eb, i);
145 n_gen = btrfs_node_ptr_generation(eb, i);
147 list_for_each_entry(rec, &list, list) {
148 struct reada_control *rc = rec->rc;
151 * if the generation doesn't match, just ignore this
152 * extctl. This will probably cut off a branch from
153 * prefetch. Alternatively one could start a new (sub-)
154 * prefetch for this branch, starting again from root.
155 * FIXME: move the generation check out of this loop
158 if (rec->generation != generation) {
160 "generation mismatch for (%llu,%d,%llu) %llu != %llu",
161 key.objectid, key.type, key.offset,
162 rec->generation, generation);
165 if (rec->generation == generation &&
166 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
167 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
168 reada_add_block(rc, bytenr, &next_key, n_gen);
174 * free extctl records
176 while (!list_empty(&list)) {
177 struct reada_control *rc;
178 struct reada_extctl *rec;
180 rec = list_first_entry(&list, struct reada_extctl, list);
181 list_del(&rec->list);
185 kref_get(&rc->refcnt);
186 if (atomic_dec_and_test(&rc->elems)) {
187 kref_put(&rc->refcnt, reada_control_release);
190 kref_put(&rc->refcnt, reada_control_release);
192 reada_extent_put(fs_info, re); /* one ref for each entry */
198 int btree_readahead_hook(struct extent_buffer *eb, int err)
200 struct btrfs_fs_info *fs_info = eb->fs_info;
202 struct reada_extent *re;
205 spin_lock(&fs_info->reada_lock);
206 re = radix_tree_lookup(&fs_info->reada_tree,
207 eb->start >> PAGE_SHIFT);
210 spin_unlock(&fs_info->reada_lock);
216 __readahead_hook(fs_info, re, eb, err);
217 reada_extent_put(fs_info, re); /* our ref */
220 reada_start_machine(fs_info);
224 static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
225 struct btrfs_bio *bbio)
227 struct btrfs_fs_info *fs_info = dev->fs_info;
229 struct reada_zone *zone;
230 struct btrfs_block_group_cache *cache = NULL;
236 spin_lock(&fs_info->reada_lock);
237 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
238 logical >> PAGE_SHIFT, 1);
239 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
240 kref_get(&zone->refcnt);
241 spin_unlock(&fs_info->reada_lock);
245 spin_unlock(&fs_info->reada_lock);
247 cache = btrfs_lookup_block_group(fs_info, logical);
251 start = cache->key.objectid;
252 end = start + cache->key.offset - 1;
253 btrfs_put_block_group(cache);
255 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
259 ret = radix_tree_preload(GFP_KERNEL);
267 INIT_LIST_HEAD(&zone->list);
268 spin_lock_init(&zone->lock);
270 kref_init(&zone->refcnt);
272 zone->device = dev; /* our device always sits at index 0 */
273 for (i = 0; i < bbio->num_stripes; ++i) {
274 /* bounds have already been checked */
275 zone->devs[i] = bbio->stripes[i].dev;
277 zone->ndevs = bbio->num_stripes;
279 spin_lock(&fs_info->reada_lock);
280 ret = radix_tree_insert(&dev->reada_zones,
281 (unsigned long)(zone->end >> PAGE_SHIFT),
284 if (ret == -EEXIST) {
286 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
287 logical >> PAGE_SHIFT, 1);
288 if (ret == 1 && logical >= zone->start && logical <= zone->end)
289 kref_get(&zone->refcnt);
293 spin_unlock(&fs_info->reada_lock);
294 radix_tree_preload_end();
299 static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
301 struct btrfs_key *top)
304 struct reada_extent *re = NULL;
305 struct reada_extent *re_exist = NULL;
306 struct btrfs_bio *bbio = NULL;
307 struct btrfs_device *dev;
308 struct btrfs_device *prev_dev;
312 unsigned long index = logical >> PAGE_SHIFT;
313 int dev_replace_is_ongoing;
316 spin_lock(&fs_info->reada_lock);
317 re = radix_tree_lookup(&fs_info->reada_tree, index);
320 spin_unlock(&fs_info->reada_lock);
325 re = kzalloc(sizeof(*re), GFP_KERNEL);
329 re->logical = logical;
331 INIT_LIST_HEAD(&re->extctl);
332 spin_lock_init(&re->lock);
338 length = fs_info->nodesize;
339 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
341 if (ret || !bbio || length < fs_info->nodesize)
344 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
346 "readahead: more than %d copies not supported",
351 real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
352 for (nzones = 0; nzones < real_stripes; ++nzones) {
353 struct reada_zone *zone;
355 dev = bbio->stripes[nzones].dev;
357 /* cannot read ahead on missing device. */
361 zone = reada_find_zone(dev, logical, bbio);
365 re->zones[re->nzones++] = zone;
366 spin_lock(&zone->lock);
368 kref_get(&zone->refcnt);
370 spin_unlock(&zone->lock);
371 spin_lock(&fs_info->reada_lock);
372 kref_put(&zone->refcnt, reada_zone_release);
373 spin_unlock(&fs_info->reada_lock);
375 if (re->nzones == 0) {
376 /* not a single zone found, error and out */
380 /* Insert extent in reada tree + all per-device trees, all or nothing */
381 down_read(&fs_info->dev_replace.rwsem);
382 ret = radix_tree_preload(GFP_KERNEL);
384 up_read(&fs_info->dev_replace.rwsem);
388 spin_lock(&fs_info->reada_lock);
389 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
390 if (ret == -EEXIST) {
391 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
393 spin_unlock(&fs_info->reada_lock);
394 radix_tree_preload_end();
395 up_read(&fs_info->dev_replace.rwsem);
399 spin_unlock(&fs_info->reada_lock);
400 radix_tree_preload_end();
401 up_read(&fs_info->dev_replace.rwsem);
404 radix_tree_preload_end();
406 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
407 &fs_info->dev_replace);
408 for (nzones = 0; nzones < re->nzones; ++nzones) {
409 dev = re->zones[nzones]->device;
411 if (dev == prev_dev) {
413 * in case of DUP, just add the first zone. As both
414 * are on the same device, there's nothing to gain
416 * Also, it wouldn't work, as the tree is per device
417 * and adding would fail with EEXIST
424 if (dev_replace_is_ongoing &&
425 dev == fs_info->dev_replace.tgtdev) {
427 * as this device is selected for reading only as
428 * a last resort, skip it for read ahead.
433 ret = radix_tree_insert(&dev->reada_extents, index, re);
435 while (--nzones >= 0) {
436 dev = re->zones[nzones]->device;
438 /* ignore whether the entry was inserted */
439 radix_tree_delete(&dev->reada_extents, index);
441 radix_tree_delete(&fs_info->reada_tree, index);
442 spin_unlock(&fs_info->reada_lock);
443 up_read(&fs_info->dev_replace.rwsem);
448 spin_unlock(&fs_info->reada_lock);
449 up_read(&fs_info->dev_replace.rwsem);
454 btrfs_put_bbio(bbio);
458 for (nzones = 0; nzones < re->nzones; ++nzones) {
459 struct reada_zone *zone;
461 zone = re->zones[nzones];
462 kref_get(&zone->refcnt);
463 spin_lock(&zone->lock);
465 if (zone->elems == 0) {
467 * no fs_info->reada_lock needed, as this can't be
470 kref_put(&zone->refcnt, reada_zone_release);
472 spin_unlock(&zone->lock);
474 spin_lock(&fs_info->reada_lock);
475 kref_put(&zone->refcnt, reada_zone_release);
476 spin_unlock(&fs_info->reada_lock);
478 btrfs_put_bbio(bbio);
483 static void reada_extent_put(struct btrfs_fs_info *fs_info,
484 struct reada_extent *re)
487 unsigned long index = re->logical >> PAGE_SHIFT;
489 spin_lock(&fs_info->reada_lock);
491 spin_unlock(&fs_info->reada_lock);
495 radix_tree_delete(&fs_info->reada_tree, index);
496 for (i = 0; i < re->nzones; ++i) {
497 struct reada_zone *zone = re->zones[i];
499 radix_tree_delete(&zone->device->reada_extents, index);
502 spin_unlock(&fs_info->reada_lock);
504 for (i = 0; i < re->nzones; ++i) {
505 struct reada_zone *zone = re->zones[i];
507 kref_get(&zone->refcnt);
508 spin_lock(&zone->lock);
510 if (zone->elems == 0) {
511 /* no fs_info->reada_lock needed, as this can't be
513 kref_put(&zone->refcnt, reada_zone_release);
515 spin_unlock(&zone->lock);
517 spin_lock(&fs_info->reada_lock);
518 kref_put(&zone->refcnt, reada_zone_release);
519 spin_unlock(&fs_info->reada_lock);
525 static void reada_zone_release(struct kref *kref)
527 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
529 radix_tree_delete(&zone->device->reada_zones,
530 zone->end >> PAGE_SHIFT);
535 static void reada_control_release(struct kref *kref)
537 struct reada_control *rc = container_of(kref, struct reada_control,
543 static int reada_add_block(struct reada_control *rc, u64 logical,
544 struct btrfs_key *top, u64 generation)
546 struct btrfs_fs_info *fs_info = rc->fs_info;
547 struct reada_extent *re;
548 struct reada_extctl *rec;
551 re = reada_find_extent(fs_info, logical, top);
555 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
557 reada_extent_put(fs_info, re);
562 rec->generation = generation;
563 atomic_inc(&rc->elems);
565 spin_lock(&re->lock);
566 list_add_tail(&rec->list, &re->extctl);
567 spin_unlock(&re->lock);
569 /* leave the ref on the extent */
575 * called with fs_info->reada_lock held
577 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
580 unsigned long index = zone->end >> PAGE_SHIFT;
582 for (i = 0; i < zone->ndevs; ++i) {
583 struct reada_zone *peer;
584 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
585 if (peer && peer->device != zone->device)
591 * called with fs_info->reada_lock held
593 static int reada_pick_zone(struct btrfs_device *dev)
595 struct reada_zone *top_zone = NULL;
596 struct reada_zone *top_locked_zone = NULL;
598 u64 top_locked_elems = 0;
599 unsigned long index = 0;
602 if (dev->reada_curr_zone) {
603 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
604 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
605 dev->reada_curr_zone = NULL;
607 /* pick the zone with the most elements */
609 struct reada_zone *zone;
611 ret = radix_tree_gang_lookup(&dev->reada_zones,
612 (void **)&zone, index, 1);
615 index = (zone->end >> PAGE_SHIFT) + 1;
617 if (zone->elems > top_locked_elems) {
618 top_locked_elems = zone->elems;
619 top_locked_zone = zone;
622 if (zone->elems > top_elems) {
623 top_elems = zone->elems;
629 dev->reada_curr_zone = top_zone;
630 else if (top_locked_zone)
631 dev->reada_curr_zone = top_locked_zone;
635 dev->reada_next = dev->reada_curr_zone->start;
636 kref_get(&dev->reada_curr_zone->refcnt);
637 reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
642 static int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
643 int mirror_num, struct extent_buffer **eb)
645 struct extent_buffer *buf = NULL;
648 buf = btrfs_find_create_tree_block(fs_info, bytenr);
652 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
654 ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num);
656 free_extent_buffer_stale(buf);
660 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
661 free_extent_buffer_stale(buf);
663 } else if (extent_buffer_uptodate(buf)) {
666 free_extent_buffer(buf);
671 static int reada_start_machine_dev(struct btrfs_device *dev)
673 struct btrfs_fs_info *fs_info = dev->fs_info;
674 struct reada_extent *re = NULL;
676 struct extent_buffer *eb = NULL;
681 spin_lock(&fs_info->reada_lock);
682 if (dev->reada_curr_zone == NULL) {
683 ret = reada_pick_zone(dev);
685 spin_unlock(&fs_info->reada_lock);
690 * FIXME currently we issue the reads one extent at a time. If we have
691 * a contiguous block of extents, we could also coagulate them or use
692 * plugging to speed things up
694 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
695 dev->reada_next >> PAGE_SHIFT, 1);
696 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
697 ret = reada_pick_zone(dev);
699 spin_unlock(&fs_info->reada_lock);
703 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
704 dev->reada_next >> PAGE_SHIFT, 1);
707 spin_unlock(&fs_info->reada_lock);
710 dev->reada_next = re->logical + fs_info->nodesize;
713 spin_unlock(&fs_info->reada_lock);
715 spin_lock(&re->lock);
716 if (re->scheduled || list_empty(&re->extctl)) {
717 spin_unlock(&re->lock);
718 reada_extent_put(fs_info, re);
722 spin_unlock(&re->lock);
727 for (i = 0; i < re->nzones; ++i) {
728 if (re->zones[i]->device == dev) {
733 logical = re->logical;
735 atomic_inc(&dev->reada_in_flight);
736 ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
738 __readahead_hook(fs_info, re, NULL, ret);
740 __readahead_hook(fs_info, re, eb, ret);
743 free_extent_buffer(eb);
745 atomic_dec(&dev->reada_in_flight);
746 reada_extent_put(fs_info, re);
752 static void reada_start_machine_worker(struct btrfs_work *work)
754 struct reada_machine_work *rmw;
755 struct btrfs_fs_info *fs_info;
758 rmw = container_of(work, struct reada_machine_work, work);
759 fs_info = rmw->fs_info;
763 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
764 task_nice_ioprio(current));
765 set_task_ioprio(current, BTRFS_IOPRIO_READA);
766 __reada_start_machine(fs_info);
767 set_task_ioprio(current, old_ioprio);
769 atomic_dec(&fs_info->reada_works_cnt);
772 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
774 struct btrfs_device *device;
775 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
783 mutex_lock(&fs_devices->device_list_mutex);
784 list_for_each_entry(device, &fs_devices->devices, dev_list) {
785 if (atomic_read(&device->reada_in_flight) <
787 enqueued += reada_start_machine_dev(device);
789 mutex_unlock(&fs_devices->device_list_mutex);
791 } while (enqueued && total < 10000);
792 if (fs_devices->seed) {
793 fs_devices = fs_devices->seed;
801 * If everything is already in the cache, this is effectively single
802 * threaded. To a) not hold the caller for too long and b) to utilize
803 * more cores, we broke the loop above after 10000 iterations and now
804 * enqueue to workers to finish it. This will distribute the load to
807 for (i = 0; i < 2; ++i) {
808 reada_start_machine(fs_info);
809 if (atomic_read(&fs_info->reada_works_cnt) >
810 BTRFS_MAX_MIRRORS * 2)
815 static void reada_start_machine(struct btrfs_fs_info *fs_info)
817 struct reada_machine_work *rmw;
819 rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
821 /* FIXME we cannot handle this properly right now */
824 btrfs_init_work(&rmw->work, btrfs_readahead_helper,
825 reada_start_machine_worker, NULL, NULL);
826 rmw->fs_info = fs_info;
828 btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
829 atomic_inc(&fs_info->reada_works_cnt);
833 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
835 struct btrfs_device *device;
836 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
843 spin_lock(&fs_info->reada_lock);
844 list_for_each_entry(device, &fs_devices->devices, dev_list) {
845 btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
846 atomic_read(&device->reada_in_flight));
849 struct reada_zone *zone;
850 ret = radix_tree_gang_lookup(&device->reada_zones,
851 (void **)&zone, index, 1);
854 pr_debug(" zone %llu-%llu elems %llu locked %d devs",
855 zone->start, zone->end, zone->elems,
857 for (j = 0; j < zone->ndevs; ++j) {
859 zone->devs[j]->devid);
861 if (device->reada_curr_zone == zone)
862 pr_cont(" curr off %llu",
863 device->reada_next - zone->start);
865 index = (zone->end >> PAGE_SHIFT) + 1;
870 struct reada_extent *re = NULL;
872 ret = radix_tree_gang_lookup(&device->reada_extents,
873 (void **)&re, index, 1);
876 pr_debug(" re: logical %llu size %u empty %d scheduled %d",
877 re->logical, fs_info->nodesize,
878 list_empty(&re->extctl), re->scheduled);
880 for (i = 0; i < re->nzones; ++i) {
881 pr_cont(" zone %llu-%llu devs",
884 for (j = 0; j < re->zones[i]->ndevs; ++j) {
886 re->zones[i]->devs[j]->devid);
890 index = (re->logical >> PAGE_SHIFT) + 1;
899 struct reada_extent *re = NULL;
901 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
905 if (!re->scheduled) {
906 index = (re->logical >> PAGE_SHIFT) + 1;
909 pr_debug("re: logical %llu size %u list empty %d scheduled %d",
910 re->logical, fs_info->nodesize,
911 list_empty(&re->extctl), re->scheduled);
912 for (i = 0; i < re->nzones; ++i) {
913 pr_cont(" zone %llu-%llu devs",
916 for (j = 0; j < re->zones[i]->ndevs; ++j) {
918 re->zones[i]->devs[j]->devid);
922 index = (re->logical >> PAGE_SHIFT) + 1;
924 spin_unlock(&fs_info->reada_lock);
931 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
932 struct btrfs_key *key_start, struct btrfs_key *key_end)
934 struct reada_control *rc;
938 struct extent_buffer *node;
939 static struct btrfs_key max_key = {
945 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
947 return ERR_PTR(-ENOMEM);
949 rc->fs_info = root->fs_info;
950 rc->key_start = *key_start;
951 rc->key_end = *key_end;
952 atomic_set(&rc->elems, 0);
953 init_waitqueue_head(&rc->wait);
954 kref_init(&rc->refcnt);
955 kref_get(&rc->refcnt); /* one ref for having elements */
957 node = btrfs_root_node(root);
959 generation = btrfs_header_generation(node);
960 free_extent_buffer(node);
962 ret = reada_add_block(rc, start, &max_key, generation);
968 reada_start_machine(root->fs_info);
974 int btrfs_reada_wait(void *handle)
976 struct reada_control *rc = handle;
977 struct btrfs_fs_info *fs_info = rc->fs_info;
979 while (atomic_read(&rc->elems)) {
980 if (!atomic_read(&fs_info->reada_works_cnt))
981 reada_start_machine(fs_info);
982 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
984 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
987 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
989 kref_put(&rc->refcnt, reada_control_release);
994 int btrfs_reada_wait(void *handle)
996 struct reada_control *rc = handle;
997 struct btrfs_fs_info *fs_info = rc->fs_info;
999 while (atomic_read(&rc->elems)) {
1000 if (!atomic_read(&fs_info->reada_works_cnt))
1001 reada_start_machine(fs_info);
1002 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
1006 kref_put(&rc->refcnt, reada_control_release);
1012 void btrfs_reada_detach(void *handle)
1014 struct reada_control *rc = handle;
1016 kref_put(&rc->refcnt, reada_control_release);