1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/blk-mq.h>
7 #include <linux/part_stat.h>
8 #include <linux/blk-crypto.h>
10 #include "blk-crypto-internal.h"
12 #include "blk-mq-sched.h"
14 /* Max future timer expiry for timeouts */
15 #define BLK_MAX_TIMEOUT (5 * HZ)
17 extern struct dentry *blk_debugfs_root;
19 struct blk_flush_queue {
20 unsigned int flush_pending_idx:1;
21 unsigned int flush_running_idx:1;
22 blk_status_t rq_status;
23 unsigned long flush_pending_since;
24 struct list_head flush_queue[2];
25 struct list_head flush_data_in_flight;
26 struct request *flush_rq;
28 struct lock_class_key key;
29 spinlock_t mq_flush_lock;
32 extern struct kmem_cache *blk_requestq_cachep;
33 extern struct kobj_type blk_queue_ktype;
34 extern struct ida blk_queue_ida;
36 static inline struct blk_flush_queue *
37 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
42 static inline void __blk_get_queue(struct request_queue *q)
44 kobject_get(&q->kobj);
48 is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
50 return hctx->fq->flush_rq == req;
53 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
55 void blk_free_flush_queue(struct blk_flush_queue *q);
57 void blk_freeze_queue(struct request_queue *q);
59 static inline bool biovec_phys_mergeable(struct request_queue *q,
60 struct bio_vec *vec1, struct bio_vec *vec2)
62 unsigned long mask = queue_segment_boundary(q);
63 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
64 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
66 if (addr1 + vec1->bv_len != addr2)
68 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
70 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
75 static inline bool __bvec_gap_to_prev(struct request_queue *q,
76 struct bio_vec *bprv, unsigned int offset)
78 return (offset & queue_virt_boundary(q)) ||
79 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
83 * Check if adding a bio_vec after bprv with offset would create a gap in
84 * the SG list. Most drivers don't care about this, but some do.
86 static inline bool bvec_gap_to_prev(struct request_queue *q,
87 struct bio_vec *bprv, unsigned int offset)
89 if (!queue_virt_boundary(q))
91 return __bvec_gap_to_prev(q, bprv, offset);
94 #ifdef CONFIG_BLK_DEV_INTEGRITY
95 void blk_flush_integrity(void);
96 bool __bio_integrity_endio(struct bio *);
97 void bio_integrity_free(struct bio *bio);
98 static inline bool bio_integrity_endio(struct bio *bio)
100 if (bio_integrity(bio))
101 return __bio_integrity_endio(bio);
105 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
107 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
110 static inline bool integrity_req_gap_back_merge(struct request *req,
113 struct bio_integrity_payload *bip = bio_integrity(req->bio);
114 struct bio_integrity_payload *bip_next = bio_integrity(next);
116 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
117 bip_next->bip_vec[0].bv_offset);
120 static inline bool integrity_req_gap_front_merge(struct request *req,
123 struct bio_integrity_payload *bip = bio_integrity(bio);
124 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
126 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
127 bip_next->bip_vec[0].bv_offset);
130 void blk_integrity_add(struct gendisk *);
131 void blk_integrity_del(struct gendisk *);
132 #else /* CONFIG_BLK_DEV_INTEGRITY */
133 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
134 struct request *r1, struct request *r2)
138 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
139 struct request *r, struct bio *b)
143 static inline bool integrity_req_gap_back_merge(struct request *req,
148 static inline bool integrity_req_gap_front_merge(struct request *req,
154 static inline void blk_flush_integrity(void)
157 static inline bool bio_integrity_endio(struct bio *bio)
161 static inline void bio_integrity_free(struct bio *bio)
164 static inline void blk_integrity_add(struct gendisk *disk)
167 static inline void blk_integrity_del(struct gendisk *disk)
170 #endif /* CONFIG_BLK_DEV_INTEGRITY */
172 unsigned long blk_rq_timeout(unsigned long timeout);
173 void blk_add_timer(struct request *req);
175 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
176 unsigned int nr_segs, struct request **same_queue_rq);
177 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
178 struct bio *bio, unsigned int nr_segs);
180 void blk_account_io_start(struct request *req);
181 void blk_account_io_done(struct request *req, u64 now);
184 * Internal elevator interface
186 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
188 void blk_insert_flush(struct request *rq);
190 void elevator_init_mq(struct request_queue *q);
191 int elevator_switch_mq(struct request_queue *q,
192 struct elevator_type *new_e);
193 void __elevator_exit(struct request_queue *, struct elevator_queue *);
194 int elv_register_queue(struct request_queue *q, bool uevent);
195 void elv_unregister_queue(struct request_queue *q);
197 static inline void elevator_exit(struct request_queue *q,
198 struct elevator_queue *e)
200 lockdep_assert_held(&q->sysfs_lock);
202 blk_mq_sched_free_requests(q);
203 __elevator_exit(q, e);
206 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
208 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
210 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
212 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
214 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
216 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
217 const char *buf, size_t count);
218 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
219 ssize_t part_timeout_store(struct device *, struct device_attribute *,
220 const char *, size_t);
222 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
223 int ll_back_merge_fn(struct request *req, struct bio *bio,
224 unsigned int nr_segs);
225 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
226 struct request *next);
227 unsigned int blk_recalc_rq_segments(struct request *rq);
228 void blk_rq_set_mixed_merge(struct request *rq);
229 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
230 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
232 int blk_dev_init(void);
235 * Contribute to IO statistics IFF:
237 * a) it's attached to a gendisk, and
238 * b) the queue had IO stats enabled when this request was started
240 static inline bool blk_do_io_stat(struct request *rq)
242 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
245 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
247 req->cmd_flags |= REQ_NOMERGE;
248 if (req == q->last_merge)
249 q->last_merge = NULL;
253 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
254 * is defined as 'unsigned int', meantime it has to aligned to with logical
255 * block size which is the minimum accepted unit by hardware.
257 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
259 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
263 * The max bio size which is aligned to q->limits.discard_granularity. This
264 * is a hint to split large discard bio in generic block layer, then if device
265 * driver needs to split the discard bio into smaller ones, their bi_size can
266 * be very probably and easily aligned to discard_granularity of the device's
269 static inline unsigned int bio_aligned_discard_max_sectors(
270 struct request_queue *q)
272 return round_down(UINT_MAX, q->limits.discard_granularity) >>
277 * Internal io_context interface
279 void get_io_context(struct io_context *ioc);
280 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
281 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
283 void ioc_clear_queue(struct request_queue *q);
285 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
288 * Internal throttling interface
290 #ifdef CONFIG_BLK_DEV_THROTTLING
291 extern int blk_throtl_init(struct request_queue *q);
292 extern void blk_throtl_exit(struct request_queue *q);
293 extern void blk_throtl_register_queue(struct request_queue *q);
294 bool blk_throtl_bio(struct bio *bio);
295 #else /* CONFIG_BLK_DEV_THROTTLING */
296 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
297 static inline void blk_throtl_exit(struct request_queue *q) { }
298 static inline void blk_throtl_register_queue(struct request_queue *q) { }
299 static inline bool blk_throtl_bio(struct bio *bio) { return false; }
300 #endif /* CONFIG_BLK_DEV_THROTTLING */
301 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
302 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
303 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
304 const char *page, size_t count);
305 extern void blk_throtl_bio_endio(struct bio *bio);
306 extern void blk_throtl_stat_add(struct request *rq, u64 time);
308 static inline void blk_throtl_bio_endio(struct bio *bio) { }
309 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
313 extern int init_emergency_isa_pool(void);
314 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
316 static inline int init_emergency_isa_pool(void)
320 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
323 #endif /* CONFIG_BOUNCE */
325 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
326 extern int blk_iolatency_init(struct request_queue *q);
328 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
331 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
333 #ifdef CONFIG_BLK_DEV_ZONED
334 void blk_queue_free_zone_bitmaps(struct request_queue *q);
336 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
339 struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
341 int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
342 void blk_free_devt(dev_t devt);
343 void blk_invalidate_devt(dev_t devt);
344 char *disk_name(struct gendisk *hd, int partno, char *buf);
345 #define ADDPART_FLAG_NONE 0
346 #define ADDPART_FLAG_RAID 1
347 #define ADDPART_FLAG_WHOLEDISK 2
348 void delete_partition(struct hd_struct *part);
349 int bdev_add_partition(struct block_device *bdev, int partno,
350 sector_t start, sector_t length);
351 int bdev_del_partition(struct block_device *bdev, int partno);
352 int bdev_resize_partition(struct block_device *bdev, int partno,
353 sector_t start, sector_t length);
354 int disk_expand_part_tbl(struct gendisk *disk, int target);
355 int hd_ref_init(struct hd_struct *part);
357 /* no need to get/put refcount of part0 */
358 static inline int hd_struct_try_get(struct hd_struct *part)
361 return percpu_ref_tryget_live(&part->ref);
365 static inline void hd_struct_put(struct hd_struct *part)
368 percpu_ref_put(&part->ref);
371 static inline void hd_free_part(struct hd_struct *part)
373 free_percpu(part->dkstats);
375 percpu_ref_exit(&part->ref);
379 * Any access of part->nr_sects which is not protected by partition
380 * bd_mutex or gendisk bdev bd_mutex, should be done using this
383 * Code written along the lines of i_size_read() and i_size_write().
384 * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
387 static inline sector_t part_nr_sects_read(struct hd_struct *part)
389 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
393 seq = read_seqcount_begin(&part->nr_sects_seq);
394 nr_sects = part->nr_sects;
395 } while (read_seqcount_retry(&part->nr_sects_seq, seq));
397 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
401 nr_sects = part->nr_sects;
405 return part->nr_sects;
410 * Should be called with mutex lock held (typically bd_mutex) of partition
411 * to provide mutual exlusion among writers otherwise seqcount might be
412 * left in wrong state leaving the readers spinning infinitely.
414 static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
416 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
418 write_seqcount_begin(&part->nr_sects_seq);
419 part->nr_sects = size;
420 write_seqcount_end(&part->nr_sects_seq);
422 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
424 part->nr_sects = size;
427 part->nr_sects = size;
431 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
432 struct page *page, unsigned int len, unsigned int offset,
433 unsigned int max_sectors, bool *same_page);
435 #endif /* BLK_INTERNAL_H */