1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-mq.h>
12 struct blk_mq_ctx __percpu *queue_ctx;
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
21 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
25 unsigned short index_hw[HCTX_MAX_TYPES];
26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
28 struct request_queue *queue;
29 struct blk_mq_ctxs *ctxs;
31 } ____cacheline_aligned_in_smp;
36 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
39 void blk_mq_submit_bio(struct bio *bio);
40 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
42 void blk_mq_exit_queue(struct request_queue *q);
43 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
44 void blk_mq_wake_waiters(struct request_queue *q);
45 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
47 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
48 bool kick_requeue_list);
49 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
50 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
51 struct blk_mq_ctx *start);
52 void blk_mq_put_rq_ref(struct request *rq);
55 * Internal helpers for allocating/freeing the request map
57 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
58 unsigned int hctx_idx);
59 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
60 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
61 unsigned int hctx_idx, unsigned int depth);
62 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
63 struct blk_mq_tags *tags,
64 unsigned int hctx_idx);
66 * Internal helpers for request insertion into sw queues
68 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
70 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
74 * CPU -> queue mappings
76 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
79 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
81 * @type: the hctx type index
84 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
88 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
91 static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
93 enum hctx_type type = HCTX_TYPE_DEFAULT;
96 * The caller ensure that if REQ_POLLED, poll must be enabled.
99 type = HCTX_TYPE_POLL;
100 else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
101 type = HCTX_TYPE_READ;
106 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
108 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
109 * @ctx: software queue cpu ctx
111 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
113 struct blk_mq_ctx *ctx)
115 return ctx->hctxs[blk_mq_get_hctx_type(opf)];
121 extern void blk_mq_sysfs_init(struct request_queue *q);
122 extern void blk_mq_sysfs_deinit(struct request_queue *q);
123 int blk_mq_sysfs_register(struct gendisk *disk);
124 void blk_mq_sysfs_unregister(struct gendisk *disk);
125 int blk_mq_sysfs_register_hctxs(struct request_queue *q);
126 void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
127 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
128 void blk_mq_free_plug_rqs(struct blk_plug *plug);
129 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
131 void blk_mq_cancel_work_sync(struct request_queue *q);
133 void blk_mq_release(struct request_queue *q);
135 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
138 return per_cpu_ptr(q->queue_ctx, cpu);
142 * This assumes per-cpu software queueing queues. They could be per-node
143 * as well, for instance. For now this is hardcoded as-is. Note that we don't
144 * care about preemption, since we know the ctx's are persistent. This does
145 * mean that we can't rely on ctx always matching the currently running CPU.
147 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
149 return __blk_mq_get_ctx(q, raw_smp_processor_id());
152 struct blk_mq_alloc_data {
153 /* input parameter */
154 struct request_queue *q;
155 blk_mq_req_flags_t flags;
156 unsigned int shallow_depth;
158 req_flags_t rq_flags;
160 /* allocate multiple requests/tags in one go */
161 unsigned int nr_tags;
162 struct request **cached_rq;
164 /* input & output parameter */
165 struct blk_mq_ctx *ctx;
166 struct blk_mq_hw_ctx *hctx;
169 struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
170 unsigned int reserved_tags, int node, int alloc_policy);
171 void blk_mq_free_tags(struct blk_mq_tags *tags);
172 int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
173 struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
174 unsigned int reserved, int node, int alloc_policy);
176 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
177 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
178 unsigned int *offset);
179 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
181 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
182 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
183 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
184 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
186 void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
188 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
189 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
191 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
194 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
195 struct blk_mq_hw_ctx *hctx)
199 return sbq_wait_ptr(bt, &hctx->wait_index);
202 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
203 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
205 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
207 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
208 __blk_mq_tag_busy(hctx);
211 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
213 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
214 __blk_mq_tag_idle(hctx);
217 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
220 return tag < tags->nr_reserved_tags;
223 static inline bool blk_mq_is_shared_tags(unsigned int flags)
225 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
228 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
230 if (!(data->rq_flags & RQF_ELV))
231 return data->hctx->tags;
232 return data->hctx->sched_tags;
235 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
237 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
240 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
242 return hctx->nr_ctx && hctx->tags;
245 unsigned int blk_mq_in_flight(struct request_queue *q,
246 struct block_device *part);
247 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
248 unsigned int inflight[2]);
250 static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
253 if (q->mq_ops->put_budget)
254 q->mq_ops->put_budget(q, budget_token);
257 static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
259 if (q->mq_ops->get_budget)
260 return q->mq_ops->get_budget(q);
264 static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
269 if (rq->q->mq_ops->set_rq_budget_token)
270 rq->q->mq_ops->set_rq_budget_token(rq, token);
273 static inline int blk_mq_get_rq_budget_token(struct request *rq)
275 if (rq->q->mq_ops->get_rq_budget_token)
276 return rq->q->mq_ops->get_rq_budget_token(rq);
280 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
282 if (blk_mq_is_shared_tags(hctx->flags))
283 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
285 atomic_inc(&hctx->nr_active);
288 static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
291 if (blk_mq_is_shared_tags(hctx->flags))
292 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
294 atomic_sub(val, &hctx->nr_active);
297 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
299 __blk_mq_sub_active_requests(hctx, 1);
302 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
304 if (blk_mq_is_shared_tags(hctx->flags))
305 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
306 return atomic_read(&hctx->nr_active);
308 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
311 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
312 rq->tag = BLK_MQ_NO_TAG;
314 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
315 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
316 __blk_mq_dec_active_requests(hctx);
320 static inline void blk_mq_put_driver_tag(struct request *rq)
322 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
325 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
328 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
330 static inline bool blk_mq_get_driver_tag(struct request *rq)
332 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
334 if (rq->tag != BLK_MQ_NO_TAG &&
335 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
336 hctx->tags->rqs[rq->tag] = rq;
340 return __blk_mq_get_driver_tag(hctx, rq);
343 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
347 for_each_possible_cpu(cpu)
348 qmap->mq_map[cpu] = 0;
352 * blk_mq_plug() - Get caller context plug
353 * @bio : the bio being submitted by the caller context
355 * Plugging, by design, may delay the insertion of BIOs into the elevator in
356 * order to increase BIO merging opportunities. This however can cause BIO
357 * insertion order to change from the order in which submit_bio() is being
358 * executed in the case of multiple contexts concurrently issuing BIOs to a
359 * device, even if these context are synchronized to tightly control BIO issuing
360 * order. While this is not a problem with regular block devices, this ordering
361 * change can cause write BIO failures with zoned block devices as these
362 * require sequential write patterns to zones. Prevent this from happening by
363 * ignoring the plug state of a BIO issuing context if it is for a zoned block
364 * device and the BIO to plug is a write operation.
366 * Return current->plug if the bio can be plugged and NULL otherwise
368 static inline struct blk_plug *blk_mq_plug( struct bio *bio)
370 /* Zoned block device write operation case: do not plug the BIO */
371 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
372 bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
376 * For regular block devices or read operations, use the context plug
377 * which may be NULL if blk_start_plug() was not executed.
379 return current->plug;
382 /* Free all requests on the list */
383 static inline void blk_mq_free_requests(struct list_head *list)
385 while (!list_empty(list)) {
386 struct request *rq = list_entry_rq(list->next);
388 list_del_init(&rq->queuelist);
389 blk_mq_free_request(rq);
394 * For shared tag users, we track the number of currently active users
395 * and attempt to provide a fair share of the tag depth for each of them.
397 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
398 struct sbitmap_queue *bt)
400 unsigned int depth, users;
402 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
406 * Don't try dividing an ant
408 if (bt->sb.depth == 1)
411 if (blk_mq_is_shared_tags(hctx->flags)) {
412 struct request_queue *q = hctx->queue;
414 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
417 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
421 users = atomic_read(&hctx->tags->active_queues);
427 * Allow at least some tags
429 depth = max((bt->sb.depth + users - 1) / users, 4U);
430 return __blk_mq_active_requests(hctx) < depth;
433 /* run the code block in @dispatch_ops with rcu/srcu read lock held */
434 #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
436 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
439 might_sleep_if(check_sleep); \
440 srcu_idx = srcu_read_lock((q)->tag_set->srcu); \
442 srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \
450 #define blk_mq_run_dispatch_ops(q, dispatch_ops) \
451 __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \