2 * Functions related to generic helpers functions
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
12 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
15 struct bio *new = bio_alloc(gfp, nr_pages);
25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26 sector_t nr_sects, gfp_t gfp_mask, int op_flags,
29 struct request_queue *q = bdev_get_queue(bdev);
30 struct bio *bio = *biop;
31 unsigned int granularity;
36 if (!blk_queue_discard(q))
38 if ((op_flags & REQ_SECURE) && !blk_queue_secdiscard(q))
41 /* Zero-sector (unknown) and one-sector granularities are the same. */
42 granularity = max(q->limits.discard_granularity >> 9, 1U);
43 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
46 unsigned int req_sects;
47 sector_t end_sect, tmp;
49 /* Make sure bi_size doesn't overflow */
50 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
53 * If splitting a request, and the next starting sector would be
54 * misaligned, stop the discard at the previous aligned sector.
56 end_sect = sector + req_sects;
58 if (req_sects < nr_sects &&
59 sector_div(tmp, granularity) != alignment) {
60 end_sect = end_sect - alignment;
61 sector_div(end_sect, granularity);
62 end_sect = end_sect * granularity + alignment;
63 req_sects = end_sect - sector;
66 bio = next_bio(bio, 1, gfp_mask);
67 bio->bi_iter.bi_sector = sector;
69 bio_set_op_attrs(bio, REQ_OP_DISCARD, op_flags);
71 bio->bi_iter.bi_size = req_sects << 9;
72 nr_sects -= req_sects;
76 * We can loop for a long time in here, if someone does
77 * full device discards (like mkfs). Be nice and allow
78 * us to schedule out to avoid softlocking if preempt
87 EXPORT_SYMBOL(__blkdev_issue_discard);
90 * blkdev_issue_discard - queue a discard
91 * @bdev: blockdev to issue discard for
92 * @sector: start sector
93 * @nr_sects: number of sectors to discard
94 * @gfp_mask: memory allocation flags (for bio_alloc)
95 * @flags: BLKDEV_IFL_* flags to control behaviour
98 * Issue a discard request for the sectors in question.
100 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
101 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
104 struct bio *bio = NULL;
105 struct blk_plug plug;
108 if (flags & BLKDEV_DISCARD_SECURE)
109 op_flags |= REQ_SECURE;
111 blk_start_plug(&plug);
112 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, op_flags,
115 ret = submit_bio_wait(bio);
116 if (ret == -EOPNOTSUPP)
120 blk_finish_plug(&plug);
124 EXPORT_SYMBOL(blkdev_issue_discard);
127 * blkdev_issue_write_same - queue a write same operation
128 * @bdev: target blockdev
129 * @sector: start sector
130 * @nr_sects: number of sectors to write
131 * @gfp_mask: memory allocation flags (for bio_alloc)
132 * @page: page containing data to write
135 * Issue a write same request for the sectors in question.
137 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
138 sector_t nr_sects, gfp_t gfp_mask,
141 struct request_queue *q = bdev_get_queue(bdev);
142 unsigned int max_write_same_sectors;
143 struct bio *bio = NULL;
149 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
150 max_write_same_sectors = UINT_MAX >> 9;
153 bio = next_bio(bio, 1, gfp_mask);
154 bio->bi_iter.bi_sector = sector;
157 bio->bi_io_vec->bv_page = page;
158 bio->bi_io_vec->bv_offset = 0;
159 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
160 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
162 if (nr_sects > max_write_same_sectors) {
163 bio->bi_iter.bi_size = max_write_same_sectors << 9;
164 nr_sects -= max_write_same_sectors;
165 sector += max_write_same_sectors;
167 bio->bi_iter.bi_size = nr_sects << 9;
173 ret = submit_bio_wait(bio);
176 return ret != -EOPNOTSUPP ? ret : 0;
178 EXPORT_SYMBOL(blkdev_issue_write_same);
181 * blkdev_issue_zeroout - generate number of zero filed write bios
182 * @bdev: blockdev to issue
183 * @sector: start sector
184 * @nr_sects: number of sectors to write
185 * @gfp_mask: memory allocation flags (for bio_alloc)
188 * Generate and issue number of bios with zerofiled pages.
191 static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
192 sector_t nr_sects, gfp_t gfp_mask)
195 struct bio *bio = NULL;
198 while (nr_sects != 0) {
199 bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
201 bio->bi_iter.bi_sector = sector;
203 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
205 while (nr_sects != 0) {
206 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
207 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
208 nr_sects -= ret >> 9;
216 ret = submit_bio_wait(bio);
224 * blkdev_issue_zeroout - zero-fill a block range
225 * @bdev: blockdev to write
226 * @sector: start sector
227 * @nr_sects: number of sectors to write
228 * @gfp_mask: memory allocation flags (for bio_alloc)
229 * @discard: whether to discard the block range
232 * Zero-fill a block range. If the discard flag is set and the block
233 * device guarantees that subsequent READ operations to the block range
234 * in question will return zeroes, the blocks will be discarded. Should
235 * the discard request fail, if the discard flag is not set, or if
236 * discard_zeroes_data is not supported, this function will resort to
237 * zeroing the blocks manually, thus provisioning (allocating,
238 * anchoring) them. If the block device supports the WRITE SAME command
239 * blkdev_issue_zeroout() will use it to optimize the process of
240 * clearing the block range. Otherwise the zeroing will be performed
241 * using regular WRITE calls.
244 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
245 sector_t nr_sects, gfp_t gfp_mask, bool discard)
247 struct request_queue *q = bdev_get_queue(bdev);
249 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
250 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
253 if (bdev_write_same(bdev) &&
254 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
258 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
260 EXPORT_SYMBOL(blkdev_issue_zeroout);