Merge tag 'timers-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / md / bcache / writeback.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * background writeback - scan btree for dirty data and write it to the backing
4  * device
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "writeback.h"
14
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
19
20 static void update_gc_after_writeback(struct cache_set *c)
21 {
22         if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
23             c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
24                 return;
25
26         c->gc_after_writeback |= BCH_DO_AUTO_GC;
27 }
28
29 /* Rate limiting */
30 static uint64_t __calc_target_rate(struct cached_dev *dc)
31 {
32         struct cache_set *c = dc->disk.c;
33
34         /*
35          * This is the size of the cache, minus the amount used for
36          * flash-only devices
37          */
38         uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
39                                 atomic_long_read(&c->flash_dev_dirty_sectors);
40
41         /*
42          * Unfortunately there is no control of global dirty data.  If the
43          * user states that they want 10% dirty data in the cache, and has,
44          * e.g., 5 backing volumes of equal size, we try and ensure each
45          * backing volume uses about 2% of the cache for dirty data.
46          */
47         uint32_t bdev_share =
48                 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
49                                 c->cached_dev_sectors);
50
51         uint64_t cache_dirty_target =
52                 div_u64(cache_sectors * dc->writeback_percent, 100);
53
54         /* Ensure each backing dev gets at least one dirty share */
55         if (bdev_share < 1)
56                 bdev_share = 1;
57
58         return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
59 }
60
61 static void __update_writeback_rate(struct cached_dev *dc)
62 {
63         /*
64          * PI controller:
65          * Figures out the amount that should be written per second.
66          *
67          * First, the error (number of sectors that are dirty beyond our
68          * target) is calculated.  The error is accumulated (numerically
69          * integrated).
70          *
71          * Then, the proportional value and integral value are scaled
72          * based on configured values.  These are stored as inverses to
73          * avoid fixed point math and to make configuration easy-- e.g.
74          * the default value of 40 for writeback_rate_p_term_inverse
75          * attempts to write at a rate that would retire all the dirty
76          * blocks in 40 seconds.
77          *
78          * The writeback_rate_i_inverse value of 10000 means that 1/10000th
79          * of the error is accumulated in the integral term per second.
80          * This acts as a slow, long-term average that is not subject to
81          * variations in usage like the p term.
82          */
83         int64_t target = __calc_target_rate(dc);
84         int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
85         int64_t error = dirty - target;
86         int64_t proportional_scaled =
87                 div_s64(error, dc->writeback_rate_p_term_inverse);
88         int64_t integral_scaled;
89         uint32_t new_rate;
90
91         if ((error < 0 && dc->writeback_rate_integral > 0) ||
92             (error > 0 && time_before64(local_clock(),
93                          dc->writeback_rate.next + NSEC_PER_MSEC))) {
94                 /*
95                  * Only decrease the integral term if it's more than
96                  * zero.  Only increase the integral term if the device
97                  * is keeping up.  (Don't wind up the integral
98                  * ineffectively in either case).
99                  *
100                  * It's necessary to scale this by
101                  * writeback_rate_update_seconds to keep the integral
102                  * term dimensioned properly.
103                  */
104                 dc->writeback_rate_integral += error *
105                         dc->writeback_rate_update_seconds;
106         }
107
108         integral_scaled = div_s64(dc->writeback_rate_integral,
109                         dc->writeback_rate_i_term_inverse);
110
111         new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
112                         dc->writeback_rate_minimum, NSEC_PER_SEC);
113
114         dc->writeback_rate_proportional = proportional_scaled;
115         dc->writeback_rate_integral_scaled = integral_scaled;
116         dc->writeback_rate_change = new_rate -
117                         atomic_long_read(&dc->writeback_rate.rate);
118         atomic_long_set(&dc->writeback_rate.rate, new_rate);
119         dc->writeback_rate_target = target;
120 }
121
122 static bool set_at_max_writeback_rate(struct cache_set *c,
123                                        struct cached_dev *dc)
124 {
125         /* Don't sst max writeback rate if it is disabled */
126         if (!c->idle_max_writeback_rate_enabled)
127                 return false;
128
129         /* Don't set max writeback rate if gc is running */
130         if (!c->gc_mark_valid)
131                 return false;
132         /*
133          * Idle_counter is increased everytime when update_writeback_rate() is
134          * called. If all backing devices attached to the same cache set have
135          * identical dc->writeback_rate_update_seconds values, it is about 6
136          * rounds of update_writeback_rate() on each backing device before
137          * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
138          * to each dc->writeback_rate.rate.
139          * In order to avoid extra locking cost for counting exact dirty cached
140          * devices number, c->attached_dev_nr is used to calculate the idle
141          * throushold. It might be bigger if not all cached device are in write-
142          * back mode, but it still works well with limited extra rounds of
143          * update_writeback_rate().
144          */
145         if (atomic_inc_return(&c->idle_counter) <
146             atomic_read(&c->attached_dev_nr) * 6)
147                 return false;
148
149         if (atomic_read(&c->at_max_writeback_rate) != 1)
150                 atomic_set(&c->at_max_writeback_rate, 1);
151
152         atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
153
154         /* keep writeback_rate_target as existing value */
155         dc->writeback_rate_proportional = 0;
156         dc->writeback_rate_integral_scaled = 0;
157         dc->writeback_rate_change = 0;
158
159         /*
160          * Check c->idle_counter and c->at_max_writeback_rate agagain in case
161          * new I/O arrives during before set_at_max_writeback_rate() returns.
162          * Then the writeback rate is set to 1, and its new value should be
163          * decided via __update_writeback_rate().
164          */
165         if ((atomic_read(&c->idle_counter) <
166              atomic_read(&c->attached_dev_nr) * 6) ||
167             !atomic_read(&c->at_max_writeback_rate))
168                 return false;
169
170         return true;
171 }
172
173 static void update_writeback_rate(struct work_struct *work)
174 {
175         struct cached_dev *dc = container_of(to_delayed_work(work),
176                                              struct cached_dev,
177                                              writeback_rate_update);
178         struct cache_set *c = dc->disk.c;
179
180         /*
181          * should check BCACHE_DEV_RATE_DW_RUNNING before calling
182          * cancel_delayed_work_sync().
183          */
184         set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
185         /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
186         smp_mb__after_atomic();
187
188         /*
189          * CACHE_SET_IO_DISABLE might be set via sysfs interface,
190          * check it here too.
191          */
192         if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
193             test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
194                 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
195                 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
196                 smp_mb__after_atomic();
197                 return;
198         }
199
200         if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
201                 /*
202                  * If the whole cache set is idle, set_at_max_writeback_rate()
203                  * will set writeback rate to a max number. Then it is
204                  * unncessary to update writeback rate for an idle cache set
205                  * in maximum writeback rate number(s).
206                  */
207                 if (!set_at_max_writeback_rate(c, dc)) {
208                         down_read(&dc->writeback_lock);
209                         __update_writeback_rate(dc);
210                         update_gc_after_writeback(c);
211                         up_read(&dc->writeback_lock);
212                 }
213         }
214
215
216         /*
217          * CACHE_SET_IO_DISABLE might be set via sysfs interface,
218          * check it here too.
219          */
220         if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
221             !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
222                 schedule_delayed_work(&dc->writeback_rate_update,
223                               dc->writeback_rate_update_seconds * HZ);
224         }
225
226         /*
227          * should check BCACHE_DEV_RATE_DW_RUNNING before calling
228          * cancel_delayed_work_sync().
229          */
230         clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
231         /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
232         smp_mb__after_atomic();
233 }
234
235 static unsigned int writeback_delay(struct cached_dev *dc,
236                                     unsigned int sectors)
237 {
238         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
239             !dc->writeback_percent)
240                 return 0;
241
242         return bch_next_delay(&dc->writeback_rate, sectors);
243 }
244
245 struct dirty_io {
246         struct closure          cl;
247         struct cached_dev       *dc;
248         uint16_t                sequence;
249         struct bio              bio;
250 };
251
252 static void dirty_init(struct keybuf_key *w)
253 {
254         struct dirty_io *io = w->private;
255         struct bio *bio = &io->bio;
256
257         bio_init(bio, bio->bi_inline_vecs,
258                  DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
259         if (!io->dc->writeback_percent)
260                 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
261
262         bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
263         bio->bi_private         = w;
264         bch_bio_map(bio, NULL);
265 }
266
267 static void dirty_io_destructor(struct closure *cl)
268 {
269         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
270
271         kfree(io);
272 }
273
274 static void write_dirty_finish(struct closure *cl)
275 {
276         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
277         struct keybuf_key *w = io->bio.bi_private;
278         struct cached_dev *dc = io->dc;
279
280         bio_free_pages(&io->bio);
281
282         /* This is kind of a dumb way of signalling errors. */
283         if (KEY_DIRTY(&w->key)) {
284                 int ret;
285                 unsigned int i;
286                 struct keylist keys;
287
288                 bch_keylist_init(&keys);
289
290                 bkey_copy(keys.top, &w->key);
291                 SET_KEY_DIRTY(keys.top, false);
292                 bch_keylist_push(&keys);
293
294                 for (i = 0; i < KEY_PTRS(&w->key); i++)
295                         atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
296
297                 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
298
299                 if (ret)
300                         trace_bcache_writeback_collision(&w->key);
301
302                 atomic_long_inc(ret
303                                 ? &dc->disk.c->writeback_keys_failed
304                                 : &dc->disk.c->writeback_keys_done);
305         }
306
307         bch_keybuf_del(&dc->writeback_keys, w);
308         up(&dc->in_flight);
309
310         closure_return_with_destructor(cl, dirty_io_destructor);
311 }
312
313 static void dirty_endio(struct bio *bio)
314 {
315         struct keybuf_key *w = bio->bi_private;
316         struct dirty_io *io = w->private;
317
318         if (bio->bi_status) {
319                 SET_KEY_DIRTY(&w->key, false);
320                 bch_count_backing_io_errors(io->dc, bio);
321         }
322
323         closure_put(&io->cl);
324 }
325
326 static void write_dirty(struct closure *cl)
327 {
328         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
329         struct keybuf_key *w = io->bio.bi_private;
330         struct cached_dev *dc = io->dc;
331
332         uint16_t next_sequence;
333
334         if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
335                 /* Not our turn to write; wait for a write to complete */
336                 closure_wait(&dc->writeback_ordering_wait, cl);
337
338                 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
339                         /*
340                          * Edge case-- it happened in indeterminate order
341                          * relative to when we were added to wait list..
342                          */
343                         closure_wake_up(&dc->writeback_ordering_wait);
344                 }
345
346                 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
347                 return;
348         }
349
350         next_sequence = io->sequence + 1;
351
352         /*
353          * IO errors are signalled using the dirty bit on the key.
354          * If we failed to read, we should not attempt to write to the
355          * backing device.  Instead, immediately go to write_dirty_finish
356          * to clean up.
357          */
358         if (KEY_DIRTY(&w->key)) {
359                 dirty_init(w);
360                 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
361                 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
362                 bio_set_dev(&io->bio, io->dc->bdev);
363                 io->bio.bi_end_io       = dirty_endio;
364
365                 /* I/O request sent to backing device */
366                 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
367         }
368
369         atomic_set(&dc->writeback_sequence_next, next_sequence);
370         closure_wake_up(&dc->writeback_ordering_wait);
371
372         continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
373 }
374
375 static void read_dirty_endio(struct bio *bio)
376 {
377         struct keybuf_key *w = bio->bi_private;
378         struct dirty_io *io = w->private;
379
380         /* is_read = 1 */
381         bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
382                             bio->bi_status, 1,
383                             "reading dirty data from cache");
384
385         dirty_endio(bio);
386 }
387
388 static void read_dirty_submit(struct closure *cl)
389 {
390         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
391
392         closure_bio_submit(io->dc->disk.c, &io->bio, cl);
393
394         continue_at(cl, write_dirty, io->dc->writeback_write_wq);
395 }
396
397 static void read_dirty(struct cached_dev *dc)
398 {
399         unsigned int delay = 0;
400         struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
401         size_t size;
402         int nk, i;
403         struct dirty_io *io;
404         struct closure cl;
405         uint16_t sequence = 0;
406
407         BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
408         atomic_set(&dc->writeback_sequence_next, sequence);
409         closure_init_stack(&cl);
410
411         /*
412          * XXX: if we error, background writeback just spins. Should use some
413          * mempools.
414          */
415
416         next = bch_keybuf_next(&dc->writeback_keys);
417
418         while (!kthread_should_stop() &&
419                !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
420                next) {
421                 size = 0;
422                 nk = 0;
423
424                 do {
425                         BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
426
427                         /*
428                          * Don't combine too many operations, even if they
429                          * are all small.
430                          */
431                         if (nk >= MAX_WRITEBACKS_IN_PASS)
432                                 break;
433
434                         /*
435                          * If the current operation is very large, don't
436                          * further combine operations.
437                          */
438                         if (size >= MAX_WRITESIZE_IN_PASS)
439                                 break;
440
441                         /*
442                          * Operations are only eligible to be combined
443                          * if they are contiguous.
444                          *
445                          * TODO: add a heuristic willing to fire a
446                          * certain amount of non-contiguous IO per pass,
447                          * so that we can benefit from backing device
448                          * command queueing.
449                          */
450                         if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
451                                                 &START_KEY(&next->key)))
452                                 break;
453
454                         size += KEY_SIZE(&next->key);
455                         keys[nk++] = next;
456                 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
457
458                 /* Now we have gathered a set of 1..5 keys to write back. */
459                 for (i = 0; i < nk; i++) {
460                         w = keys[i];
461
462                         io = kzalloc(struct_size(io, bio.bi_inline_vecs,
463                                                 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
464                                      GFP_KERNEL);
465                         if (!io)
466                                 goto err;
467
468                         w->private      = io;
469                         io->dc          = dc;
470                         io->sequence    = sequence++;
471
472                         dirty_init(w);
473                         bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
474                         io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
475                         bio_set_dev(&io->bio,
476                                     PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
477                         io->bio.bi_end_io       = read_dirty_endio;
478
479                         if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
480                                 goto err_free;
481
482                         trace_bcache_writeback(&w->key);
483
484                         down(&dc->in_flight);
485
486                         /*
487                          * We've acquired a semaphore for the maximum
488                          * simultaneous number of writebacks; from here
489                          * everything happens asynchronously.
490                          */
491                         closure_call(&io->cl, read_dirty_submit, NULL, &cl);
492                 }
493
494                 delay = writeback_delay(dc, size);
495
496                 while (!kthread_should_stop() &&
497                        !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
498                        delay) {
499                         schedule_timeout_interruptible(delay);
500                         delay = writeback_delay(dc, 0);
501                 }
502         }
503
504         if (0) {
505 err_free:
506                 kfree(w->private);
507 err:
508                 bch_keybuf_del(&dc->writeback_keys, w);
509         }
510
511         /*
512          * Wait for outstanding writeback IOs to finish (and keybuf slots to be
513          * freed) before refilling again
514          */
515         closure_sync(&cl);
516 }
517
518 /* Scan for dirty data */
519
520 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
521                                   uint64_t offset, int nr_sectors)
522 {
523         struct bcache_device *d = c->devices[inode];
524         unsigned int stripe_offset, sectors_dirty;
525         int stripe;
526
527         if (!d)
528                 return;
529
530         stripe = offset_to_stripe(d, offset);
531         if (stripe < 0)
532                 return;
533
534         if (UUID_FLASH_ONLY(&c->uuids[inode]))
535                 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
536
537         stripe_offset = offset & (d->stripe_size - 1);
538
539         while (nr_sectors) {
540                 int s = min_t(unsigned int, abs(nr_sectors),
541                               d->stripe_size - stripe_offset);
542
543                 if (nr_sectors < 0)
544                         s = -s;
545
546                 if (stripe >= d->nr_stripes)
547                         return;
548
549                 sectors_dirty = atomic_add_return(s,
550                                         d->stripe_sectors_dirty + stripe);
551                 if (sectors_dirty == d->stripe_size)
552                         set_bit(stripe, d->full_dirty_stripes);
553                 else
554                         clear_bit(stripe, d->full_dirty_stripes);
555
556                 nr_sectors -= s;
557                 stripe_offset = 0;
558                 stripe++;
559         }
560 }
561
562 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
563 {
564         struct cached_dev *dc = container_of(buf,
565                                              struct cached_dev,
566                                              writeback_keys);
567
568         BUG_ON(KEY_INODE(k) != dc->disk.id);
569
570         return KEY_DIRTY(k);
571 }
572
573 static void refill_full_stripes(struct cached_dev *dc)
574 {
575         struct keybuf *buf = &dc->writeback_keys;
576         unsigned int start_stripe, next_stripe;
577         int stripe;
578         bool wrapped = false;
579
580         stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
581         if (stripe < 0)
582                 stripe = 0;
583
584         start_stripe = stripe;
585
586         while (1) {
587                 stripe = find_next_bit(dc->disk.full_dirty_stripes,
588                                        dc->disk.nr_stripes, stripe);
589
590                 if (stripe == dc->disk.nr_stripes)
591                         goto next;
592
593                 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
594                                                  dc->disk.nr_stripes, stripe);
595
596                 buf->last_scanned = KEY(dc->disk.id,
597                                         stripe * dc->disk.stripe_size, 0);
598
599                 bch_refill_keybuf(dc->disk.c, buf,
600                                   &KEY(dc->disk.id,
601                                        next_stripe * dc->disk.stripe_size, 0),
602                                   dirty_pred);
603
604                 if (array_freelist_empty(&buf->freelist))
605                         return;
606
607                 stripe = next_stripe;
608 next:
609                 if (wrapped && stripe > start_stripe)
610                         return;
611
612                 if (stripe == dc->disk.nr_stripes) {
613                         stripe = 0;
614                         wrapped = true;
615                 }
616         }
617 }
618
619 /*
620  * Returns true if we scanned the entire disk
621  */
622 static bool refill_dirty(struct cached_dev *dc)
623 {
624         struct keybuf *buf = &dc->writeback_keys;
625         struct bkey start = KEY(dc->disk.id, 0, 0);
626         struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
627         struct bkey start_pos;
628
629         /*
630          * make sure keybuf pos is inside the range for this disk - at bringup
631          * we might not be attached yet so this disk's inode nr isn't
632          * initialized then
633          */
634         if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
635             bkey_cmp(&buf->last_scanned, &end) > 0)
636                 buf->last_scanned = start;
637
638         if (dc->partial_stripes_expensive) {
639                 refill_full_stripes(dc);
640                 if (array_freelist_empty(&buf->freelist))
641                         return false;
642         }
643
644         start_pos = buf->last_scanned;
645         bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
646
647         if (bkey_cmp(&buf->last_scanned, &end) < 0)
648                 return false;
649
650         /*
651          * If we get to the end start scanning again from the beginning, and
652          * only scan up to where we initially started scanning from:
653          */
654         buf->last_scanned = start;
655         bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
656
657         return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
658 }
659
660 static int bch_writeback_thread(void *arg)
661 {
662         struct cached_dev *dc = arg;
663         struct cache_set *c = dc->disk.c;
664         bool searched_full_index;
665
666         bch_ratelimit_reset(&dc->writeback_rate);
667
668         while (!kthread_should_stop() &&
669                !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
670                 down_write(&dc->writeback_lock);
671                 set_current_state(TASK_INTERRUPTIBLE);
672                 /*
673                  * If the bache device is detaching, skip here and continue
674                  * to perform writeback. Otherwise, if no dirty data on cache,
675                  * or there is dirty data on cache but writeback is disabled,
676                  * the writeback thread should sleep here and wait for others
677                  * to wake up it.
678                  */
679                 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
680                     (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
681                         up_write(&dc->writeback_lock);
682
683                         if (kthread_should_stop() ||
684                             test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
685                                 set_current_state(TASK_RUNNING);
686                                 break;
687                         }
688
689                         schedule();
690                         continue;
691                 }
692                 set_current_state(TASK_RUNNING);
693
694                 searched_full_index = refill_dirty(dc);
695
696                 if (searched_full_index &&
697                     RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
698                         atomic_set(&dc->has_dirty, 0);
699                         SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
700                         bch_write_bdev_super(dc, NULL);
701                         /*
702                          * If bcache device is detaching via sysfs interface,
703                          * writeback thread should stop after there is no dirty
704                          * data on cache. BCACHE_DEV_DETACHING flag is set in
705                          * bch_cached_dev_detach().
706                          */
707                         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
708                                 struct closure cl;
709
710                                 closure_init_stack(&cl);
711                                 memset(&dc->sb.set_uuid, 0, 16);
712                                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
713
714                                 bch_write_bdev_super(dc, &cl);
715                                 closure_sync(&cl);
716
717                                 up_write(&dc->writeback_lock);
718                                 break;
719                         }
720
721                         /*
722                          * When dirty data rate is high (e.g. 50%+), there might
723                          * be heavy buckets fragmentation after writeback
724                          * finished, which hurts following write performance.
725                          * If users really care about write performance they
726                          * may set BCH_ENABLE_AUTO_GC via sysfs, then when
727                          * BCH_DO_AUTO_GC is set, garbage collection thread
728                          * will be wake up here. After moving gc, the shrunk
729                          * btree and discarded free buckets SSD space may be
730                          * helpful for following write requests.
731                          */
732                         if (c->gc_after_writeback ==
733                             (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
734                                 c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
735                                 force_wake_up_gc(c);
736                         }
737                 }
738
739                 up_write(&dc->writeback_lock);
740
741                 read_dirty(dc);
742
743                 if (searched_full_index) {
744                         unsigned int delay = dc->writeback_delay * HZ;
745
746                         while (delay &&
747                                !kthread_should_stop() &&
748                                !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
749                                !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
750                                 delay = schedule_timeout_interruptible(delay);
751
752                         bch_ratelimit_reset(&dc->writeback_rate);
753                 }
754         }
755
756         if (dc->writeback_write_wq) {
757                 flush_workqueue(dc->writeback_write_wq);
758                 destroy_workqueue(dc->writeback_write_wq);
759         }
760         cached_dev_put(dc);
761         wait_for_kthread_stop();
762
763         return 0;
764 }
765
766 /* Init */
767 #define INIT_KEYS_EACH_TIME     500000
768 #define INIT_KEYS_SLEEP_MS      100
769
770 struct sectors_dirty_init {
771         struct btree_op op;
772         unsigned int    inode;
773         size_t          count;
774         struct bkey     start;
775 };
776
777 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
778                                  struct bkey *k)
779 {
780         struct sectors_dirty_init *op = container_of(_op,
781                                                 struct sectors_dirty_init, op);
782         if (KEY_INODE(k) > op->inode)
783                 return MAP_DONE;
784
785         if (KEY_DIRTY(k))
786                 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
787                                              KEY_START(k), KEY_SIZE(k));
788
789         op->count++;
790         if (atomic_read(&b->c->search_inflight) &&
791             !(op->count % INIT_KEYS_EACH_TIME)) {
792                 bkey_copy_key(&op->start, k);
793                 return -EAGAIN;
794         }
795
796         return MAP_CONTINUE;
797 }
798
799 static int bch_root_node_dirty_init(struct cache_set *c,
800                                      struct bcache_device *d,
801                                      struct bkey *k)
802 {
803         struct sectors_dirty_init op;
804         int ret;
805
806         bch_btree_op_init(&op.op, -1);
807         op.inode = d->id;
808         op.count = 0;
809         op.start = KEY(op.inode, 0, 0);
810
811         do {
812                 ret = bcache_btree(map_keys_recurse,
813                                    k,
814                                    c->root,
815                                    &op.op,
816                                    &op.start,
817                                    sectors_dirty_init_fn,
818                                    0);
819                 if (ret == -EAGAIN)
820                         schedule_timeout_interruptible(
821                                 msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
822                 else if (ret < 0) {
823                         pr_warn("sectors dirty init failed, ret=%d!\n", ret);
824                         break;
825                 }
826         } while (ret == -EAGAIN);
827
828         return ret;
829 }
830
831 static int bch_dirty_init_thread(void *arg)
832 {
833         struct dirty_init_thrd_info *info = arg;
834         struct bch_dirty_init_state *state = info->state;
835         struct cache_set *c = state->c;
836         struct btree_iter iter;
837         struct bkey *k, *p;
838         int cur_idx, prev_idx, skip_nr;
839
840         k = p = NULL;
841         cur_idx = prev_idx = 0;
842
843         bch_btree_iter_init(&c->root->keys, &iter, NULL);
844         k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
845         BUG_ON(!k);
846
847         p = k;
848
849         while (k) {
850                 spin_lock(&state->idx_lock);
851                 cur_idx = state->key_idx;
852                 state->key_idx++;
853                 spin_unlock(&state->idx_lock);
854
855                 skip_nr = cur_idx - prev_idx;
856
857                 while (skip_nr) {
858                         k = bch_btree_iter_next_filter(&iter,
859                                                        &c->root->keys,
860                                                        bch_ptr_bad);
861                         if (k)
862                                 p = k;
863                         else {
864                                 atomic_set(&state->enough, 1);
865                                 /* Update state->enough earlier */
866                                 smp_mb__after_atomic();
867                                 goto out;
868                         }
869                         skip_nr--;
870                         cond_resched();
871                 }
872
873                 if (p) {
874                         if (bch_root_node_dirty_init(c, state->d, p) < 0)
875                                 goto out;
876                 }
877
878                 p = NULL;
879                 prev_idx = cur_idx;
880                 cond_resched();
881         }
882
883 out:
884         /* In order to wake up state->wait in time */
885         smp_mb__before_atomic();
886         if (atomic_dec_and_test(&state->started))
887                 wake_up(&state->wait);
888
889         return 0;
890 }
891
892 static int bch_btre_dirty_init_thread_nr(void)
893 {
894         int n = num_online_cpus()/2;
895
896         if (n == 0)
897                 n = 1;
898         else if (n > BCH_DIRTY_INIT_THRD_MAX)
899                 n = BCH_DIRTY_INIT_THRD_MAX;
900
901         return n;
902 }
903
904 void bch_sectors_dirty_init(struct bcache_device *d)
905 {
906         int i;
907         struct bkey *k = NULL;
908         struct btree_iter iter;
909         struct sectors_dirty_init op;
910         struct cache_set *c = d->c;
911         struct bch_dirty_init_state *state;
912         char name[32];
913
914         /* Just count root keys if no leaf node */
915         if (c->root->level == 0) {
916                 bch_btree_op_init(&op.op, -1);
917                 op.inode = d->id;
918                 op.count = 0;
919                 op.start = KEY(op.inode, 0, 0);
920
921                 for_each_key_filter(&c->root->keys,
922                                     k, &iter, bch_ptr_invalid)
923                         sectors_dirty_init_fn(&op.op, c->root, k);
924                 return;
925         }
926
927         state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
928         if (!state) {
929                 pr_warn("sectors dirty init failed: cannot allocate memory\n");
930                 return;
931         }
932
933         state->c = c;
934         state->d = d;
935         state->total_threads = bch_btre_dirty_init_thread_nr();
936         state->key_idx = 0;
937         spin_lock_init(&state->idx_lock);
938         atomic_set(&state->started, 0);
939         atomic_set(&state->enough, 0);
940         init_waitqueue_head(&state->wait);
941
942         for (i = 0; i < state->total_threads; i++) {
943                 /* Fetch latest state->enough earlier */
944                 smp_mb__before_atomic();
945                 if (atomic_read(&state->enough))
946                         break;
947
948                 state->infos[i].state = state;
949                 atomic_inc(&state->started);
950                 snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
951
952                 state->infos[i].thread =
953                         kthread_run(bch_dirty_init_thread,
954                                     &state->infos[i],
955                                     name);
956                 if (IS_ERR(state->infos[i].thread)) {
957                         pr_err("fails to run thread bch_dirty_init[%d]\n", i);
958                         for (--i; i >= 0; i--)
959                                 kthread_stop(state->infos[i].thread);
960                         goto out;
961                 }
962         }
963
964         wait_event_interruptible(state->wait,
965                  atomic_read(&state->started) == 0 ||
966                  test_bit(CACHE_SET_IO_DISABLE, &c->flags));
967
968 out:
969         kfree(state);
970 }
971
972 void bch_cached_dev_writeback_init(struct cached_dev *dc)
973 {
974         sema_init(&dc->in_flight, 64);
975         init_rwsem(&dc->writeback_lock);
976         bch_keybuf_init(&dc->writeback_keys);
977
978         dc->writeback_metadata          = true;
979         dc->writeback_running           = false;
980         dc->writeback_percent           = 10;
981         dc->writeback_delay             = 30;
982         atomic_long_set(&dc->writeback_rate.rate, 1024);
983         dc->writeback_rate_minimum      = 8;
984
985         dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
986         dc->writeback_rate_p_term_inverse = 40;
987         dc->writeback_rate_i_term_inverse = 10000;
988
989         WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
990         INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
991 }
992
993 int bch_cached_dev_writeback_start(struct cached_dev *dc)
994 {
995         dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
996                                                 WQ_MEM_RECLAIM, 0);
997         if (!dc->writeback_write_wq)
998                 return -ENOMEM;
999
1000         cached_dev_get(dc);
1001         dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
1002                                               "bcache_writeback");
1003         if (IS_ERR(dc->writeback_thread)) {
1004                 cached_dev_put(dc);
1005                 destroy_workqueue(dc->writeback_write_wq);
1006                 return PTR_ERR(dc->writeback_thread);
1007         }
1008         dc->writeback_running = true;
1009
1010         WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1011         schedule_delayed_work(&dc->writeback_rate_update,
1012                               dc->writeback_rate_update_seconds * HZ);
1013
1014         bch_writeback_queue(dc);
1015
1016         return 0;
1017 }