Merge tag 'v4.18-rc6' into for-4.19/block2
[linux-2.6-microblaze.git] / drivers / md / bcache / writeback.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * background writeback - scan btree for dirty data and write it to the backing
4  * device
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "writeback.h"
14
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
19
20 /* Rate limiting */
21 static uint64_t __calc_target_rate(struct cached_dev *dc)
22 {
23         struct cache_set *c = dc->disk.c;
24
25         /*
26          * This is the size of the cache, minus the amount used for
27          * flash-only devices
28          */
29         uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
30                                 atomic_long_read(&c->flash_dev_dirty_sectors);
31
32         /*
33          * Unfortunately there is no control of global dirty data.  If the
34          * user states that they want 10% dirty data in the cache, and has,
35          * e.g., 5 backing volumes of equal size, we try and ensure each
36          * backing volume uses about 2% of the cache for dirty data.
37          */
38         uint32_t bdev_share =
39                 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
40                                 c->cached_dev_sectors);
41
42         uint64_t cache_dirty_target =
43                 div_u64(cache_sectors * dc->writeback_percent, 100);
44
45         /* Ensure each backing dev gets at least one dirty share */
46         if (bdev_share < 1)
47                 bdev_share = 1;
48
49         return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
50 }
51
52 static void __update_writeback_rate(struct cached_dev *dc)
53 {
54         /*
55          * PI controller:
56          * Figures out the amount that should be written per second.
57          *
58          * First, the error (number of sectors that are dirty beyond our
59          * target) is calculated.  The error is accumulated (numerically
60          * integrated).
61          *
62          * Then, the proportional value and integral value are scaled
63          * based on configured values.  These are stored as inverses to
64          * avoid fixed point math and to make configuration easy-- e.g.
65          * the default value of 40 for writeback_rate_p_term_inverse
66          * attempts to write at a rate that would retire all the dirty
67          * blocks in 40 seconds.
68          *
69          * The writeback_rate_i_inverse value of 10000 means that 1/10000th
70          * of the error is accumulated in the integral term per second.
71          * This acts as a slow, long-term average that is not subject to
72          * variations in usage like the p term.
73          */
74         int64_t target = __calc_target_rate(dc);
75         int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
76         int64_t error = dirty - target;
77         int64_t proportional_scaled =
78                 div_s64(error, dc->writeback_rate_p_term_inverse);
79         int64_t integral_scaled;
80         uint32_t new_rate;
81
82         if ((error < 0 && dc->writeback_rate_integral > 0) ||
83             (error > 0 && time_before64(local_clock(),
84                          dc->writeback_rate.next + NSEC_PER_MSEC))) {
85                 /*
86                  * Only decrease the integral term if it's more than
87                  * zero.  Only increase the integral term if the device
88                  * is keeping up.  (Don't wind up the integral
89                  * ineffectively in either case).
90                  *
91                  * It's necessary to scale this by
92                  * writeback_rate_update_seconds to keep the integral
93                  * term dimensioned properly.
94                  */
95                 dc->writeback_rate_integral += error *
96                         dc->writeback_rate_update_seconds;
97         }
98
99         integral_scaled = div_s64(dc->writeback_rate_integral,
100                         dc->writeback_rate_i_term_inverse);
101
102         new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
103                         dc->writeback_rate_minimum, NSEC_PER_SEC);
104
105         dc->writeback_rate_proportional = proportional_scaled;
106         dc->writeback_rate_integral_scaled = integral_scaled;
107         dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
108         dc->writeback_rate.rate = new_rate;
109         dc->writeback_rate_target = target;
110 }
111
112 static void update_writeback_rate(struct work_struct *work)
113 {
114         struct cached_dev *dc = container_of(to_delayed_work(work),
115                                              struct cached_dev,
116                                              writeback_rate_update);
117         struct cache_set *c = dc->disk.c;
118
119         /*
120          * should check BCACHE_DEV_RATE_DW_RUNNING before calling
121          * cancel_delayed_work_sync().
122          */
123         set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
124         /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
125         smp_mb();
126
127         /*
128          * CACHE_SET_IO_DISABLE might be set via sysfs interface,
129          * check it here too.
130          */
131         if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
132             test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
133                 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
134                 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
135                 smp_mb();
136                 return;
137         }
138
139         down_read(&dc->writeback_lock);
140
141         if (atomic_read(&dc->has_dirty) &&
142             dc->writeback_percent)
143                 __update_writeback_rate(dc);
144
145         up_read(&dc->writeback_lock);
146
147         /*
148          * CACHE_SET_IO_DISABLE might be set via sysfs interface,
149          * check it here too.
150          */
151         if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
152             !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
153                 schedule_delayed_work(&dc->writeback_rate_update,
154                               dc->writeback_rate_update_seconds * HZ);
155         }
156
157         /*
158          * should check BCACHE_DEV_RATE_DW_RUNNING before calling
159          * cancel_delayed_work_sync().
160          */
161         clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
162         /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
163         smp_mb();
164 }
165
166 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
167 {
168         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
169             !dc->writeback_percent)
170                 return 0;
171
172         return bch_next_delay(&dc->writeback_rate, sectors);
173 }
174
175 struct dirty_io {
176         struct closure          cl;
177         struct cached_dev       *dc;
178         uint16_t                sequence;
179         struct bio              bio;
180 };
181
182 static void dirty_init(struct keybuf_key *w)
183 {
184         struct dirty_io *io = w->private;
185         struct bio *bio = &io->bio;
186
187         bio_init(bio, bio->bi_inline_vecs,
188                  DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
189         if (!io->dc->writeback_percent)
190                 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
191
192         bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
193         bio->bi_private         = w;
194         bch_bio_map(bio, NULL);
195 }
196
197 static void dirty_io_destructor(struct closure *cl)
198 {
199         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
200         kfree(io);
201 }
202
203 static void write_dirty_finish(struct closure *cl)
204 {
205         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
206         struct keybuf_key *w = io->bio.bi_private;
207         struct cached_dev *dc = io->dc;
208
209         bio_free_pages(&io->bio);
210
211         /* This is kind of a dumb way of signalling errors. */
212         if (KEY_DIRTY(&w->key)) {
213                 int ret;
214                 unsigned i;
215                 struct keylist keys;
216
217                 bch_keylist_init(&keys);
218
219                 bkey_copy(keys.top, &w->key);
220                 SET_KEY_DIRTY(keys.top, false);
221                 bch_keylist_push(&keys);
222
223                 for (i = 0; i < KEY_PTRS(&w->key); i++)
224                         atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
225
226                 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
227
228                 if (ret)
229                         trace_bcache_writeback_collision(&w->key);
230
231                 atomic_long_inc(ret
232                                 ? &dc->disk.c->writeback_keys_failed
233                                 : &dc->disk.c->writeback_keys_done);
234         }
235
236         bch_keybuf_del(&dc->writeback_keys, w);
237         up(&dc->in_flight);
238
239         closure_return_with_destructor(cl, dirty_io_destructor);
240 }
241
242 static void dirty_endio(struct bio *bio)
243 {
244         struct keybuf_key *w = bio->bi_private;
245         struct dirty_io *io = w->private;
246
247         if (bio->bi_status) {
248                 SET_KEY_DIRTY(&w->key, false);
249                 bch_count_backing_io_errors(io->dc, bio);
250         }
251
252         closure_put(&io->cl);
253 }
254
255 static void write_dirty(struct closure *cl)
256 {
257         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
258         struct keybuf_key *w = io->bio.bi_private;
259         struct cached_dev *dc = io->dc;
260
261         uint16_t next_sequence;
262
263         if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
264                 /* Not our turn to write; wait for a write to complete */
265                 closure_wait(&dc->writeback_ordering_wait, cl);
266
267                 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
268                         /*
269                          * Edge case-- it happened in indeterminate order
270                          * relative to when we were added to wait list..
271                          */
272                         closure_wake_up(&dc->writeback_ordering_wait);
273                 }
274
275                 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
276                 return;
277         }
278
279         next_sequence = io->sequence + 1;
280
281         /*
282          * IO errors are signalled using the dirty bit on the key.
283          * If we failed to read, we should not attempt to write to the
284          * backing device.  Instead, immediately go to write_dirty_finish
285          * to clean up.
286          */
287         if (KEY_DIRTY(&w->key)) {
288                 dirty_init(w);
289                 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
290                 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
291                 bio_set_dev(&io->bio, io->dc->bdev);
292                 io->bio.bi_end_io       = dirty_endio;
293
294                 /* I/O request sent to backing device */
295                 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
296         }
297
298         atomic_set(&dc->writeback_sequence_next, next_sequence);
299         closure_wake_up(&dc->writeback_ordering_wait);
300
301         continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
302 }
303
304 static void read_dirty_endio(struct bio *bio)
305 {
306         struct keybuf_key *w = bio->bi_private;
307         struct dirty_io *io = w->private;
308
309         /* is_read = 1 */
310         bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
311                             bio->bi_status, 1,
312                             "reading dirty data from cache");
313
314         dirty_endio(bio);
315 }
316
317 static void read_dirty_submit(struct closure *cl)
318 {
319         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
320
321         closure_bio_submit(io->dc->disk.c, &io->bio, cl);
322
323         continue_at(cl, write_dirty, io->dc->writeback_write_wq);
324 }
325
326 static void read_dirty(struct cached_dev *dc)
327 {
328         unsigned delay = 0;
329         struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
330         size_t size;
331         int nk, i;
332         struct dirty_io *io;
333         struct closure cl;
334         uint16_t sequence = 0;
335
336         BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
337         atomic_set(&dc->writeback_sequence_next, sequence);
338         closure_init_stack(&cl);
339
340         /*
341          * XXX: if we error, background writeback just spins. Should use some
342          * mempools.
343          */
344
345         next = bch_keybuf_next(&dc->writeback_keys);
346
347         while (!kthread_should_stop() &&
348                !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
349                next) {
350                 size = 0;
351                 nk = 0;
352
353                 do {
354                         BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
355
356                         /*
357                          * Don't combine too many operations, even if they
358                          * are all small.
359                          */
360                         if (nk >= MAX_WRITEBACKS_IN_PASS)
361                                 break;
362
363                         /*
364                          * If the current operation is very large, don't
365                          * further combine operations.
366                          */
367                         if (size >= MAX_WRITESIZE_IN_PASS)
368                                 break;
369
370                         /*
371                          * Operations are only eligible to be combined
372                          * if they are contiguous.
373                          *
374                          * TODO: add a heuristic willing to fire a
375                          * certain amount of non-contiguous IO per pass,
376                          * so that we can benefit from backing device
377                          * command queueing.
378                          */
379                         if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
380                                                 &START_KEY(&next->key)))
381                                 break;
382
383                         size += KEY_SIZE(&next->key);
384                         keys[nk++] = next;
385                 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
386
387                 /* Now we have gathered a set of 1..5 keys to write back. */
388                 for (i = 0; i < nk; i++) {
389                         w = keys[i];
390
391                         io = kzalloc(sizeof(struct dirty_io) +
392                                      sizeof(struct bio_vec) *
393                                      DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
394                                      GFP_KERNEL);
395                         if (!io)
396                                 goto err;
397
398                         w->private      = io;
399                         io->dc          = dc;
400                         io->sequence    = sequence++;
401
402                         dirty_init(w);
403                         bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
404                         io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
405                         bio_set_dev(&io->bio,
406                                     PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
407                         io->bio.bi_end_io       = read_dirty_endio;
408
409                         if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
410                                 goto err_free;
411
412                         trace_bcache_writeback(&w->key);
413
414                         down(&dc->in_flight);
415
416                         /* We've acquired a semaphore for the maximum
417                          * simultaneous number of writebacks; from here
418                          * everything happens asynchronously.
419                          */
420                         closure_call(&io->cl, read_dirty_submit, NULL, &cl);
421                 }
422
423                 delay = writeback_delay(dc, size);
424
425                 /* If the control system would wait for at least half a
426                  * second, and there's been no reqs hitting the backing disk
427                  * for awhile: use an alternate mode where we have at most
428                  * one contiguous set of writebacks in flight at a time.  If
429                  * someone wants to do IO it will be quick, as it will only
430                  * have to contend with one operation in flight, and we'll
431                  * be round-tripping data to the backing disk as quickly as
432                  * it can accept it.
433                  */
434                 if (delay >= HZ / 2) {
435                         /* 3 means at least 1.5 seconds, up to 7.5 if we
436                          * have slowed way down.
437                          */
438                         if (atomic_inc_return(&dc->backing_idle) >= 3) {
439                                 /* Wait for current I/Os to finish */
440                                 closure_sync(&cl);
441                                 /* And immediately launch a new set. */
442                                 delay = 0;
443                         }
444                 }
445
446                 while (!kthread_should_stop() &&
447                        !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
448                        delay) {
449                         schedule_timeout_interruptible(delay);
450                         delay = writeback_delay(dc, 0);
451                 }
452         }
453
454         if (0) {
455 err_free:
456                 kfree(w->private);
457 err:
458                 bch_keybuf_del(&dc->writeback_keys, w);
459         }
460
461         /*
462          * Wait for outstanding writeback IOs to finish (and keybuf slots to be
463          * freed) before refilling again
464          */
465         closure_sync(&cl);
466 }
467
468 /* Scan for dirty data */
469
470 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
471                                   uint64_t offset, int nr_sectors)
472 {
473         struct bcache_device *d = c->devices[inode];
474         unsigned stripe_offset, stripe, sectors_dirty;
475
476         if (!d)
477                 return;
478
479         if (UUID_FLASH_ONLY(&c->uuids[inode]))
480                 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
481
482         stripe = offset_to_stripe(d, offset);
483         stripe_offset = offset & (d->stripe_size - 1);
484
485         while (nr_sectors) {
486                 int s = min_t(unsigned, abs(nr_sectors),
487                               d->stripe_size - stripe_offset);
488
489                 if (nr_sectors < 0)
490                         s = -s;
491
492                 if (stripe >= d->nr_stripes)
493                         return;
494
495                 sectors_dirty = atomic_add_return(s,
496                                         d->stripe_sectors_dirty + stripe);
497                 if (sectors_dirty == d->stripe_size)
498                         set_bit(stripe, d->full_dirty_stripes);
499                 else
500                         clear_bit(stripe, d->full_dirty_stripes);
501
502                 nr_sectors -= s;
503                 stripe_offset = 0;
504                 stripe++;
505         }
506 }
507
508 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
509 {
510         struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
511
512         BUG_ON(KEY_INODE(k) != dc->disk.id);
513
514         return KEY_DIRTY(k);
515 }
516
517 static void refill_full_stripes(struct cached_dev *dc)
518 {
519         struct keybuf *buf = &dc->writeback_keys;
520         unsigned start_stripe, stripe, next_stripe;
521         bool wrapped = false;
522
523         stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
524
525         if (stripe >= dc->disk.nr_stripes)
526                 stripe = 0;
527
528         start_stripe = stripe;
529
530         while (1) {
531                 stripe = find_next_bit(dc->disk.full_dirty_stripes,
532                                        dc->disk.nr_stripes, stripe);
533
534                 if (stripe == dc->disk.nr_stripes)
535                         goto next;
536
537                 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
538                                                  dc->disk.nr_stripes, stripe);
539
540                 buf->last_scanned = KEY(dc->disk.id,
541                                         stripe * dc->disk.stripe_size, 0);
542
543                 bch_refill_keybuf(dc->disk.c, buf,
544                                   &KEY(dc->disk.id,
545                                        next_stripe * dc->disk.stripe_size, 0),
546                                   dirty_pred);
547
548                 if (array_freelist_empty(&buf->freelist))
549                         return;
550
551                 stripe = next_stripe;
552 next:
553                 if (wrapped && stripe > start_stripe)
554                         return;
555
556                 if (stripe == dc->disk.nr_stripes) {
557                         stripe = 0;
558                         wrapped = true;
559                 }
560         }
561 }
562
563 /*
564  * Returns true if we scanned the entire disk
565  */
566 static bool refill_dirty(struct cached_dev *dc)
567 {
568         struct keybuf *buf = &dc->writeback_keys;
569         struct bkey start = KEY(dc->disk.id, 0, 0);
570         struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
571         struct bkey start_pos;
572
573         /*
574          * make sure keybuf pos is inside the range for this disk - at bringup
575          * we might not be attached yet so this disk's inode nr isn't
576          * initialized then
577          */
578         if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
579             bkey_cmp(&buf->last_scanned, &end) > 0)
580                 buf->last_scanned = start;
581
582         if (dc->partial_stripes_expensive) {
583                 refill_full_stripes(dc);
584                 if (array_freelist_empty(&buf->freelist))
585                         return false;
586         }
587
588         start_pos = buf->last_scanned;
589         bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
590
591         if (bkey_cmp(&buf->last_scanned, &end) < 0)
592                 return false;
593
594         /*
595          * If we get to the end start scanning again from the beginning, and
596          * only scan up to where we initially started scanning from:
597          */
598         buf->last_scanned = start;
599         bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
600
601         return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
602 }
603
604 static int bch_writeback_thread(void *arg)
605 {
606         struct cached_dev *dc = arg;
607         struct cache_set *c = dc->disk.c;
608         bool searched_full_index;
609
610         bch_ratelimit_reset(&dc->writeback_rate);
611
612         while (!kthread_should_stop() &&
613                !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
614                 down_write(&dc->writeback_lock);
615                 set_current_state(TASK_INTERRUPTIBLE);
616                 /*
617                  * If the bache device is detaching, skip here and continue
618                  * to perform writeback. Otherwise, if no dirty data on cache,
619                  * or there is dirty data on cache but writeback is disabled,
620                  * the writeback thread should sleep here and wait for others
621                  * to wake up it.
622                  */
623                 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
624                     (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
625                         up_write(&dc->writeback_lock);
626
627                         if (kthread_should_stop() ||
628                             test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
629                                 set_current_state(TASK_RUNNING);
630                                 break;
631                         }
632
633                         schedule();
634                         continue;
635                 }
636                 set_current_state(TASK_RUNNING);
637
638                 searched_full_index = refill_dirty(dc);
639
640                 if (searched_full_index &&
641                     RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
642                         atomic_set(&dc->has_dirty, 0);
643                         SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
644                         bch_write_bdev_super(dc, NULL);
645                         /*
646                          * If bcache device is detaching via sysfs interface,
647                          * writeback thread should stop after there is no dirty
648                          * data on cache. BCACHE_DEV_DETACHING flag is set in
649                          * bch_cached_dev_detach().
650                          */
651                         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
652                                 break;
653                 }
654
655                 up_write(&dc->writeback_lock);
656
657                 read_dirty(dc);
658
659                 if (searched_full_index) {
660                         unsigned delay = dc->writeback_delay * HZ;
661
662                         while (delay &&
663                                !kthread_should_stop() &&
664                                !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
665                                !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
666                                 delay = schedule_timeout_interruptible(delay);
667
668                         bch_ratelimit_reset(&dc->writeback_rate);
669                 }
670         }
671
672         cached_dev_put(dc);
673         wait_for_kthread_stop();
674
675         return 0;
676 }
677
678 /* Init */
679 #define INIT_KEYS_EACH_TIME     500000
680 #define INIT_KEYS_SLEEP_MS      100
681
682 struct sectors_dirty_init {
683         struct btree_op op;
684         unsigned        inode;
685         size_t          count;
686         struct bkey     start;
687 };
688
689 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
690                                  struct bkey *k)
691 {
692         struct sectors_dirty_init *op = container_of(_op,
693                                                 struct sectors_dirty_init, op);
694         if (KEY_INODE(k) > op->inode)
695                 return MAP_DONE;
696
697         if (KEY_DIRTY(k))
698                 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
699                                              KEY_START(k), KEY_SIZE(k));
700
701         op->count++;
702         if (atomic_read(&b->c->search_inflight) &&
703             !(op->count % INIT_KEYS_EACH_TIME)) {
704                 bkey_copy_key(&op->start, k);
705                 return -EAGAIN;
706         }
707
708         return MAP_CONTINUE;
709 }
710
711 void bch_sectors_dirty_init(struct bcache_device *d)
712 {
713         struct sectors_dirty_init op;
714         int ret;
715
716         bch_btree_op_init(&op.op, -1);
717         op.inode = d->id;
718         op.count = 0;
719         op.start = KEY(op.inode, 0, 0);
720
721         do {
722                 ret = bch_btree_map_keys(&op.op, d->c, &op.start,
723                                          sectors_dirty_init_fn, 0);
724                 if (ret == -EAGAIN)
725                         schedule_timeout_interruptible(
726                                 msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
727                 else if (ret < 0) {
728                         pr_warn("sectors dirty init failed, ret=%d!", ret);
729                         break;
730                 }
731         } while (ret == -EAGAIN);
732 }
733
734 void bch_cached_dev_writeback_init(struct cached_dev *dc)
735 {
736         sema_init(&dc->in_flight, 64);
737         init_rwsem(&dc->writeback_lock);
738         bch_keybuf_init(&dc->writeback_keys);
739
740         dc->writeback_metadata          = true;
741         dc->writeback_running           = true;
742         dc->writeback_percent           = 10;
743         dc->writeback_delay             = 30;
744         dc->writeback_rate.rate         = 1024;
745         dc->writeback_rate_minimum      = 8;
746
747         dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
748         dc->writeback_rate_p_term_inverse = 40;
749         dc->writeback_rate_i_term_inverse = 10000;
750
751         WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
752         INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
753 }
754
755 int bch_cached_dev_writeback_start(struct cached_dev *dc)
756 {
757         dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
758                                                 WQ_MEM_RECLAIM, 0);
759         if (!dc->writeback_write_wq)
760                 return -ENOMEM;
761
762         cached_dev_get(dc);
763         dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
764                                               "bcache_writeback");
765         if (IS_ERR(dc->writeback_thread)) {
766                 cached_dev_put(dc);
767                 return PTR_ERR(dc->writeback_thread);
768         }
769
770         WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
771         schedule_delayed_work(&dc->writeback_rate_update,
772                               dc->writeback_rate_update_seconds * HZ);
773
774         bch_writeback_queue(dc);
775
776         return 0;
777 }