1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include <linux/blkdev.h>
15 /* Bios with headers */
17 void bch_bbio_free(struct bio *bio, struct cache_set *c)
19 struct bbio *b = container_of(bio, struct bbio, bio);
20 mempool_free(b, c->bio_meta);
23 struct bio *bch_bbio_alloc(struct cache_set *c)
25 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
26 struct bio *bio = &b->bio;
28 bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
33 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
35 struct bbio *b = container_of(bio, struct bbio, bio);
37 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
38 bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
40 b->submit_time_us = local_clock_us();
41 closure_bio_submit(c, bio, bio->bi_private);
44 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
45 struct bkey *k, unsigned ptr)
47 struct bbio *b = container_of(bio, struct bbio, bio);
48 bch_bkey_copy_single_ptr(&b->key, k, ptr);
49 __bch_submit_bbio(bio, c);
53 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
55 char buf[BDEVNAME_SIZE];
58 WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
60 errors = atomic_add_return(1, &dc->io_errors);
61 if (errors < dc->error_limit)
62 pr_err("%s: IO error on backing device, unrecoverable",
63 bio_devname(bio, buf));
65 bch_cached_dev_error(dc);
68 void bch_count_io_errors(struct cache *ca,
74 * The halflife of an error is:
75 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
78 if (ca->set->error_decay) {
79 unsigned count = atomic_inc_return(&ca->io_count);
81 while (count > ca->set->error_decay) {
84 unsigned new = count - ca->set->error_decay;
87 * First we subtract refresh from count; each time we
88 * succesfully do so, we rescale the errors once:
91 count = atomic_cmpxchg(&ca->io_count, old, new);
96 errors = atomic_read(&ca->io_errors);
99 new = ((uint64_t) errors * 127) / 128;
100 errors = atomic_cmpxchg(&ca->io_errors,
102 } while (old != errors);
108 char buf[BDEVNAME_SIZE];
109 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
111 errors >>= IO_ERROR_SHIFT;
113 if (errors < ca->set->error_limit)
114 pr_err("%s: IO error on %s%s",
115 bdevname(ca->bdev, buf), m,
116 is_read ? ", recovering." : ".");
118 bch_cache_set_error(ca->set,
119 "%s: too many IO errors %s",
120 bdevname(ca->bdev, buf), m);
124 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
125 blk_status_t error, const char *m)
127 struct bbio *b = container_of(bio, struct bbio, bio);
128 struct cache *ca = PTR_CACHE(c, &b->key, 0);
129 int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
131 unsigned threshold = op_is_write(bio_op(bio))
132 ? c->congested_write_threshold_us
133 : c->congested_read_threshold_us;
136 unsigned t = local_clock_us();
138 int us = t - b->submit_time_us;
139 int congested = atomic_read(&c->congested);
141 if (us > (int) threshold) {
143 c->congested_last_us = t;
145 ms = min(ms, CONGESTED_MAX + congested);
146 atomic_sub(ms, &c->congested);
147 } else if (congested < 0)
148 atomic_inc(&c->congested);
151 bch_count_io_errors(ca, error, is_read, m);
154 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
155 blk_status_t error, const char *m)
157 struct closure *cl = bio->bi_private;
159 bch_bbio_count_io_errors(c, bio, error, m);