1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include <linux/blkdev.h>
15 /* Bios with headers */
17 void bch_bbio_free(struct bio *bio, struct cache_set *c)
19 struct bbio *b = container_of(bio, struct bbio, bio);
20 mempool_free(b, &c->bio_meta);
23 struct bio *bch_bbio_alloc(struct cache_set *c)
25 struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
26 struct bio *bio = &b->bio;
28 bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
33 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
35 struct bbio *b = container_of(bio, struct bbio, bio);
37 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
38 bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
40 b->submit_time_us = local_clock_us();
41 closure_bio_submit(c, bio, bio->bi_private);
44 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
45 struct bkey *k, unsigned ptr)
47 struct bbio *b = container_of(bio, struct bbio, bio);
48 bch_bkey_copy_single_ptr(&b->key, k, ptr);
49 __bch_submit_bbio(bio, c);
53 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
57 WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
59 errors = atomic_add_return(1, &dc->io_errors);
60 if (errors < dc->error_limit)
61 pr_err("%s: IO error on backing device, unrecoverable",
62 dc->backing_dev_name);
64 bch_cached_dev_error(dc);
67 void bch_count_io_errors(struct cache *ca,
73 * The halflife of an error is:
74 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
77 if (ca->set->error_decay) {
78 unsigned count = atomic_inc_return(&ca->io_count);
80 while (count > ca->set->error_decay) {
83 unsigned new = count - ca->set->error_decay;
86 * First we subtract refresh from count; each time we
87 * succesfully do so, we rescale the errors once:
90 count = atomic_cmpxchg(&ca->io_count, old, new);
95 errors = atomic_read(&ca->io_errors);
98 new = ((uint64_t) errors * 127) / 128;
99 errors = atomic_cmpxchg(&ca->io_errors,
101 } while (old != errors);
107 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
109 errors >>= IO_ERROR_SHIFT;
111 if (errors < ca->set->error_limit)
112 pr_err("%s: IO error on %s%s",
113 ca->cache_dev_name, m,
114 is_read ? ", recovering." : ".");
116 bch_cache_set_error(ca->set,
117 "%s: too many IO errors %s",
118 ca->cache_dev_name, m);
122 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
123 blk_status_t error, const char *m)
125 struct bbio *b = container_of(bio, struct bbio, bio);
126 struct cache *ca = PTR_CACHE(c, &b->key, 0);
127 int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
129 unsigned threshold = op_is_write(bio_op(bio))
130 ? c->congested_write_threshold_us
131 : c->congested_read_threshold_us;
134 unsigned t = local_clock_us();
136 int us = t - b->submit_time_us;
137 int congested = atomic_read(&c->congested);
139 if (us > (int) threshold) {
141 c->congested_last_us = t;
143 ms = min(ms, CONGESTED_MAX + congested);
144 atomic_sub(ms, &c->congested);
145 } else if (congested < 0)
146 atomic_inc(&c->congested);
149 bch_count_io_errors(ca, error, is_read, m);
152 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
153 blk_status_t error, const char *m)
155 struct closure *cl = bio->bi_private;
157 bch_bbio_count_io_errors(c, bio, error, m);