blk-mq: introduce a blk_mq_peek_cached_request helper
[linux-2.6-microblaze.git] / block / blk-mq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/kmemleak.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/llist.h>
22 #include <linux/cpu.h>
23 #include <linux/cache.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/sched/topology.h>
26 #include <linux/sched/signal.h>
27 #include <linux/delay.h>
28 #include <linux/crash_dump.h>
29 #include <linux/prefetch.h>
30 #include <linux/blk-crypto.h>
31 #include <linux/part_stat.h>
32
33 #include <trace/events/block.h>
34
35 #include <linux/t10-pi.h>
36 #include "blk.h"
37 #include "blk-mq.h"
38 #include "blk-mq-debugfs.h"
39 #include "blk-pm.h"
40 #include "blk-stat.h"
41 #include "blk-mq-sched.h"
42 #include "blk-rq-qos.h"
43
44 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
45 static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
46
47 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
48 static void blk_mq_request_bypass_insert(struct request *rq,
49                 blk_insert_t flags);
50 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
51                 struct list_head *list);
52 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
53                          struct io_comp_batch *iob, unsigned int flags);
54
55 /*
56  * Check if any of the ctx, dispatch list or elevator
57  * have pending work in this hardware queue.
58  */
59 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
60 {
61         return !list_empty_careful(&hctx->dispatch) ||
62                 sbitmap_any_bit_set(&hctx->ctx_map) ||
63                         blk_mq_sched_has_work(hctx);
64 }
65
66 /*
67  * Mark this ctx as having pending work in this hardware queue
68  */
69 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
70                                      struct blk_mq_ctx *ctx)
71 {
72         const int bit = ctx->index_hw[hctx->type];
73
74         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
75                 sbitmap_set_bit(&hctx->ctx_map, bit);
76 }
77
78 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
79                                       struct blk_mq_ctx *ctx)
80 {
81         const int bit = ctx->index_hw[hctx->type];
82
83         sbitmap_clear_bit(&hctx->ctx_map, bit);
84 }
85
86 struct mq_inflight {
87         struct block_device *part;
88         unsigned int inflight[2];
89 };
90
91 static bool blk_mq_check_inflight(struct request *rq, void *priv)
92 {
93         struct mq_inflight *mi = priv;
94
95         if (rq->part && blk_do_io_stat(rq) &&
96             (!mi->part->bd_partno || rq->part == mi->part) &&
97             blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
98                 mi->inflight[rq_data_dir(rq)]++;
99
100         return true;
101 }
102
103 unsigned int blk_mq_in_flight(struct request_queue *q,
104                 struct block_device *part)
105 {
106         struct mq_inflight mi = { .part = part };
107
108         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
109
110         return mi.inflight[0] + mi.inflight[1];
111 }
112
113 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
114                 unsigned int inflight[2])
115 {
116         struct mq_inflight mi = { .part = part };
117
118         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119         inflight[0] = mi.inflight[0];
120         inflight[1] = mi.inflight[1];
121 }
122
123 void blk_freeze_queue_start(struct request_queue *q)
124 {
125         mutex_lock(&q->mq_freeze_lock);
126         if (++q->mq_freeze_depth == 1) {
127                 percpu_ref_kill(&q->q_usage_counter);
128                 mutex_unlock(&q->mq_freeze_lock);
129                 if (queue_is_mq(q))
130                         blk_mq_run_hw_queues(q, false);
131         } else {
132                 mutex_unlock(&q->mq_freeze_lock);
133         }
134 }
135 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
136
137 void blk_mq_freeze_queue_wait(struct request_queue *q)
138 {
139         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
140 }
141 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
142
143 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
144                                      unsigned long timeout)
145 {
146         return wait_event_timeout(q->mq_freeze_wq,
147                                         percpu_ref_is_zero(&q->q_usage_counter),
148                                         timeout);
149 }
150 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
151
152 /*
153  * Guarantee no request is in use, so we can change any data structure of
154  * the queue afterward.
155  */
156 void blk_freeze_queue(struct request_queue *q)
157 {
158         /*
159          * In the !blk_mq case we are only calling this to kill the
160          * q_usage_counter, otherwise this increases the freeze depth
161          * and waits for it to return to zero.  For this reason there is
162          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
163          * exported to drivers as the only user for unfreeze is blk_mq.
164          */
165         blk_freeze_queue_start(q);
166         blk_mq_freeze_queue_wait(q);
167 }
168
169 void blk_mq_freeze_queue(struct request_queue *q)
170 {
171         /*
172          * ...just an alias to keep freeze and unfreeze actions balanced
173          * in the blk_mq_* namespace
174          */
175         blk_freeze_queue(q);
176 }
177 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
178
179 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
180 {
181         mutex_lock(&q->mq_freeze_lock);
182         if (force_atomic)
183                 q->q_usage_counter.data->force_atomic = true;
184         q->mq_freeze_depth--;
185         WARN_ON_ONCE(q->mq_freeze_depth < 0);
186         if (!q->mq_freeze_depth) {
187                 percpu_ref_resurrect(&q->q_usage_counter);
188                 wake_up_all(&q->mq_freeze_wq);
189         }
190         mutex_unlock(&q->mq_freeze_lock);
191 }
192
193 void blk_mq_unfreeze_queue(struct request_queue *q)
194 {
195         __blk_mq_unfreeze_queue(q, false);
196 }
197 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
198
199 /*
200  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
201  * mpt3sas driver such that this function can be removed.
202  */
203 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
204 {
205         unsigned long flags;
206
207         spin_lock_irqsave(&q->queue_lock, flags);
208         if (!q->quiesce_depth++)
209                 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
210         spin_unlock_irqrestore(&q->queue_lock, flags);
211 }
212 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
213
214 /**
215  * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
216  * @set: tag_set to wait on
217  *
218  * Note: it is driver's responsibility for making sure that quiesce has
219  * been started on or more of the request_queues of the tag_set.  This
220  * function only waits for the quiesce on those request_queues that had
221  * the quiesce flag set using blk_mq_quiesce_queue_nowait.
222  */
223 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
224 {
225         if (set->flags & BLK_MQ_F_BLOCKING)
226                 synchronize_srcu(set->srcu);
227         else
228                 synchronize_rcu();
229 }
230 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
231
232 /**
233  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
234  * @q: request queue.
235  *
236  * Note: this function does not prevent that the struct request end_io()
237  * callback function is invoked. Once this function is returned, we make
238  * sure no dispatch can happen until the queue is unquiesced via
239  * blk_mq_unquiesce_queue().
240  */
241 void blk_mq_quiesce_queue(struct request_queue *q)
242 {
243         blk_mq_quiesce_queue_nowait(q);
244         /* nothing to wait for non-mq queues */
245         if (queue_is_mq(q))
246                 blk_mq_wait_quiesce_done(q->tag_set);
247 }
248 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
249
250 /*
251  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
252  * @q: request queue.
253  *
254  * This function recovers queue into the state before quiescing
255  * which is done by blk_mq_quiesce_queue.
256  */
257 void blk_mq_unquiesce_queue(struct request_queue *q)
258 {
259         unsigned long flags;
260         bool run_queue = false;
261
262         spin_lock_irqsave(&q->queue_lock, flags);
263         if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
264                 ;
265         } else if (!--q->quiesce_depth) {
266                 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
267                 run_queue = true;
268         }
269         spin_unlock_irqrestore(&q->queue_lock, flags);
270
271         /* dispatch requests which are inserted during quiescing */
272         if (run_queue)
273                 blk_mq_run_hw_queues(q, true);
274 }
275 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
276
277 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
278 {
279         struct request_queue *q;
280
281         mutex_lock(&set->tag_list_lock);
282         list_for_each_entry(q, &set->tag_list, tag_set_list) {
283                 if (!blk_queue_skip_tagset_quiesce(q))
284                         blk_mq_quiesce_queue_nowait(q);
285         }
286         blk_mq_wait_quiesce_done(set);
287         mutex_unlock(&set->tag_list_lock);
288 }
289 EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
290
291 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
292 {
293         struct request_queue *q;
294
295         mutex_lock(&set->tag_list_lock);
296         list_for_each_entry(q, &set->tag_list, tag_set_list) {
297                 if (!blk_queue_skip_tagset_quiesce(q))
298                         blk_mq_unquiesce_queue(q);
299         }
300         mutex_unlock(&set->tag_list_lock);
301 }
302 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
303
304 void blk_mq_wake_waiters(struct request_queue *q)
305 {
306         struct blk_mq_hw_ctx *hctx;
307         unsigned long i;
308
309         queue_for_each_hw_ctx(q, hctx, i)
310                 if (blk_mq_hw_queue_mapped(hctx))
311                         blk_mq_tag_wakeup_all(hctx->tags, true);
312 }
313
314 void blk_rq_init(struct request_queue *q, struct request *rq)
315 {
316         memset(rq, 0, sizeof(*rq));
317
318         INIT_LIST_HEAD(&rq->queuelist);
319         rq->q = q;
320         rq->__sector = (sector_t) -1;
321         INIT_HLIST_NODE(&rq->hash);
322         RB_CLEAR_NODE(&rq->rb_node);
323         rq->tag = BLK_MQ_NO_TAG;
324         rq->internal_tag = BLK_MQ_NO_TAG;
325         rq->start_time_ns = ktime_get_ns();
326         rq->part = NULL;
327         blk_crypto_rq_set_defaults(rq);
328 }
329 EXPORT_SYMBOL(blk_rq_init);
330
331 /* Set start and alloc time when the allocated request is actually used */
332 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
333 {
334         if (blk_mq_need_time_stamp(rq))
335                 rq->start_time_ns = ktime_get_ns();
336         else
337                 rq->start_time_ns = 0;
338
339 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
340         if (blk_queue_rq_alloc_time(rq->q))
341                 rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
342         else
343                 rq->alloc_time_ns = 0;
344 #endif
345 }
346
347 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
348                 struct blk_mq_tags *tags, unsigned int tag)
349 {
350         struct blk_mq_ctx *ctx = data->ctx;
351         struct blk_mq_hw_ctx *hctx = data->hctx;
352         struct request_queue *q = data->q;
353         struct request *rq = tags->static_rqs[tag];
354
355         rq->q = q;
356         rq->mq_ctx = ctx;
357         rq->mq_hctx = hctx;
358         rq->cmd_flags = data->cmd_flags;
359
360         if (data->flags & BLK_MQ_REQ_PM)
361                 data->rq_flags |= RQF_PM;
362         if (blk_queue_io_stat(q))
363                 data->rq_flags |= RQF_IO_STAT;
364         rq->rq_flags = data->rq_flags;
365
366         if (data->rq_flags & RQF_SCHED_TAGS) {
367                 rq->tag = BLK_MQ_NO_TAG;
368                 rq->internal_tag = tag;
369         } else {
370                 rq->tag = tag;
371                 rq->internal_tag = BLK_MQ_NO_TAG;
372         }
373         rq->timeout = 0;
374
375         rq->part = NULL;
376         rq->io_start_time_ns = 0;
377         rq->stats_sectors = 0;
378         rq->nr_phys_segments = 0;
379 #if defined(CONFIG_BLK_DEV_INTEGRITY)
380         rq->nr_integrity_segments = 0;
381 #endif
382         rq->end_io = NULL;
383         rq->end_io_data = NULL;
384
385         blk_crypto_rq_set_defaults(rq);
386         INIT_LIST_HEAD(&rq->queuelist);
387         /* tag was already set */
388         WRITE_ONCE(rq->deadline, 0);
389         req_ref_set(rq, 1);
390
391         if (rq->rq_flags & RQF_USE_SCHED) {
392                 struct elevator_queue *e = data->q->elevator;
393
394                 INIT_HLIST_NODE(&rq->hash);
395                 RB_CLEAR_NODE(&rq->rb_node);
396
397                 if (e->type->ops.prepare_request)
398                         e->type->ops.prepare_request(rq);
399         }
400
401         return rq;
402 }
403
404 static inline struct request *
405 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
406 {
407         unsigned int tag, tag_offset;
408         struct blk_mq_tags *tags;
409         struct request *rq;
410         unsigned long tag_mask;
411         int i, nr = 0;
412
413         tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
414         if (unlikely(!tag_mask))
415                 return NULL;
416
417         tags = blk_mq_tags_from_data(data);
418         for (i = 0; tag_mask; i++) {
419                 if (!(tag_mask & (1UL << i)))
420                         continue;
421                 tag = tag_offset + i;
422                 prefetch(tags->static_rqs[tag]);
423                 tag_mask &= ~(1UL << i);
424                 rq = blk_mq_rq_ctx_init(data, tags, tag);
425                 rq_list_add(data->cached_rq, rq);
426                 nr++;
427         }
428         if (!(data->rq_flags & RQF_SCHED_TAGS))
429                 blk_mq_add_active_requests(data->hctx, nr);
430         /* caller already holds a reference, add for remainder */
431         percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
432         data->nr_tags -= nr;
433
434         return rq_list_pop(data->cached_rq);
435 }
436
437 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
438 {
439         struct request_queue *q = data->q;
440         u64 alloc_time_ns = 0;
441         struct request *rq;
442         unsigned int tag;
443
444         /* alloc_time includes depth and tag waits */
445         if (blk_queue_rq_alloc_time(q))
446                 alloc_time_ns = ktime_get_ns();
447
448         if (data->cmd_flags & REQ_NOWAIT)
449                 data->flags |= BLK_MQ_REQ_NOWAIT;
450
451         if (q->elevator) {
452                 /*
453                  * All requests use scheduler tags when an I/O scheduler is
454                  * enabled for the queue.
455                  */
456                 data->rq_flags |= RQF_SCHED_TAGS;
457
458                 /*
459                  * Flush/passthrough requests are special and go directly to the
460                  * dispatch list.
461                  */
462                 if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
463                     !blk_op_is_passthrough(data->cmd_flags)) {
464                         struct elevator_mq_ops *ops = &q->elevator->type->ops;
465
466                         WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
467
468                         data->rq_flags |= RQF_USE_SCHED;
469                         if (ops->limit_depth)
470                                 ops->limit_depth(data->cmd_flags, data);
471                 }
472         }
473
474 retry:
475         data->ctx = blk_mq_get_ctx(q);
476         data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
477         if (!(data->rq_flags & RQF_SCHED_TAGS))
478                 blk_mq_tag_busy(data->hctx);
479
480         if (data->flags & BLK_MQ_REQ_RESERVED)
481                 data->rq_flags |= RQF_RESV;
482
483         /*
484          * Try batched alloc if we want more than 1 tag.
485          */
486         if (data->nr_tags > 1) {
487                 rq = __blk_mq_alloc_requests_batch(data);
488                 if (rq) {
489                         blk_mq_rq_time_init(rq, alloc_time_ns);
490                         return rq;
491                 }
492                 data->nr_tags = 1;
493         }
494
495         /*
496          * Waiting allocations only fail because of an inactive hctx.  In that
497          * case just retry the hctx assignment and tag allocation as CPU hotplug
498          * should have migrated us to an online CPU by now.
499          */
500         tag = blk_mq_get_tag(data);
501         if (tag == BLK_MQ_NO_TAG) {
502                 if (data->flags & BLK_MQ_REQ_NOWAIT)
503                         return NULL;
504                 /*
505                  * Give up the CPU and sleep for a random short time to
506                  * ensure that thread using a realtime scheduling class
507                  * are migrated off the CPU, and thus off the hctx that
508                  * is going away.
509                  */
510                 msleep(3);
511                 goto retry;
512         }
513
514         if (!(data->rq_flags & RQF_SCHED_TAGS))
515                 blk_mq_inc_active_requests(data->hctx);
516         rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
517         blk_mq_rq_time_init(rq, alloc_time_ns);
518         return rq;
519 }
520
521 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
522                                             struct blk_plug *plug,
523                                             blk_opf_t opf,
524                                             blk_mq_req_flags_t flags)
525 {
526         struct blk_mq_alloc_data data = {
527                 .q              = q,
528                 .flags          = flags,
529                 .cmd_flags      = opf,
530                 .nr_tags        = plug->nr_ios,
531                 .cached_rq      = &plug->cached_rq,
532         };
533         struct request *rq;
534
535         if (blk_queue_enter(q, flags))
536                 return NULL;
537
538         plug->nr_ios = 1;
539
540         rq = __blk_mq_alloc_requests(&data);
541         if (unlikely(!rq))
542                 blk_queue_exit(q);
543         return rq;
544 }
545
546 static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
547                                                    blk_opf_t opf,
548                                                    blk_mq_req_flags_t flags)
549 {
550         struct blk_plug *plug = current->plug;
551         struct request *rq;
552
553         if (!plug)
554                 return NULL;
555
556         if (rq_list_empty(plug->cached_rq)) {
557                 if (plug->nr_ios == 1)
558                         return NULL;
559                 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
560                 if (!rq)
561                         return NULL;
562         } else {
563                 rq = rq_list_peek(&plug->cached_rq);
564                 if (!rq || rq->q != q)
565                         return NULL;
566
567                 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
568                         return NULL;
569                 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
570                         return NULL;
571
572                 plug->cached_rq = rq_list_next(rq);
573                 blk_mq_rq_time_init(rq, 0);
574         }
575
576         rq->cmd_flags = opf;
577         INIT_LIST_HEAD(&rq->queuelist);
578         return rq;
579 }
580
581 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
582                 blk_mq_req_flags_t flags)
583 {
584         struct request *rq;
585
586         rq = blk_mq_alloc_cached_request(q, opf, flags);
587         if (!rq) {
588                 struct blk_mq_alloc_data data = {
589                         .q              = q,
590                         .flags          = flags,
591                         .cmd_flags      = opf,
592                         .nr_tags        = 1,
593                 };
594                 int ret;
595
596                 ret = blk_queue_enter(q, flags);
597                 if (ret)
598                         return ERR_PTR(ret);
599
600                 rq = __blk_mq_alloc_requests(&data);
601                 if (!rq)
602                         goto out_queue_exit;
603         }
604         rq->__data_len = 0;
605         rq->__sector = (sector_t) -1;
606         rq->bio = rq->biotail = NULL;
607         return rq;
608 out_queue_exit:
609         blk_queue_exit(q);
610         return ERR_PTR(-EWOULDBLOCK);
611 }
612 EXPORT_SYMBOL(blk_mq_alloc_request);
613
614 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
615         blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
616 {
617         struct blk_mq_alloc_data data = {
618                 .q              = q,
619                 .flags          = flags,
620                 .cmd_flags      = opf,
621                 .nr_tags        = 1,
622         };
623         u64 alloc_time_ns = 0;
624         struct request *rq;
625         unsigned int cpu;
626         unsigned int tag;
627         int ret;
628
629         /* alloc_time includes depth and tag waits */
630         if (blk_queue_rq_alloc_time(q))
631                 alloc_time_ns = ktime_get_ns();
632
633         /*
634          * If the tag allocator sleeps we could get an allocation for a
635          * different hardware context.  No need to complicate the low level
636          * allocator for this for the rare use case of a command tied to
637          * a specific queue.
638          */
639         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
640             WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
641                 return ERR_PTR(-EINVAL);
642
643         if (hctx_idx >= q->nr_hw_queues)
644                 return ERR_PTR(-EIO);
645
646         ret = blk_queue_enter(q, flags);
647         if (ret)
648                 return ERR_PTR(ret);
649
650         /*
651          * Check if the hardware context is actually mapped to anything.
652          * If not tell the caller that it should skip this queue.
653          */
654         ret = -EXDEV;
655         data.hctx = xa_load(&q->hctx_table, hctx_idx);
656         if (!blk_mq_hw_queue_mapped(data.hctx))
657                 goto out_queue_exit;
658         cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
659         if (cpu >= nr_cpu_ids)
660                 goto out_queue_exit;
661         data.ctx = __blk_mq_get_ctx(q, cpu);
662
663         if (q->elevator)
664                 data.rq_flags |= RQF_SCHED_TAGS;
665         else
666                 blk_mq_tag_busy(data.hctx);
667
668         if (flags & BLK_MQ_REQ_RESERVED)
669                 data.rq_flags |= RQF_RESV;
670
671         ret = -EWOULDBLOCK;
672         tag = blk_mq_get_tag(&data);
673         if (tag == BLK_MQ_NO_TAG)
674                 goto out_queue_exit;
675         if (!(data.rq_flags & RQF_SCHED_TAGS))
676                 blk_mq_inc_active_requests(data.hctx);
677         rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
678         blk_mq_rq_time_init(rq, alloc_time_ns);
679         rq->__data_len = 0;
680         rq->__sector = (sector_t) -1;
681         rq->bio = rq->biotail = NULL;
682         return rq;
683
684 out_queue_exit:
685         blk_queue_exit(q);
686         return ERR_PTR(ret);
687 }
688 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
689
690 static void blk_mq_finish_request(struct request *rq)
691 {
692         struct request_queue *q = rq->q;
693
694         if (rq->rq_flags & RQF_USE_SCHED) {
695                 q->elevator->type->ops.finish_request(rq);
696                 /*
697                  * For postflush request that may need to be
698                  * completed twice, we should clear this flag
699                  * to avoid double finish_request() on the rq.
700                  */
701                 rq->rq_flags &= ~RQF_USE_SCHED;
702         }
703 }
704
705 static void __blk_mq_free_request(struct request *rq)
706 {
707         struct request_queue *q = rq->q;
708         struct blk_mq_ctx *ctx = rq->mq_ctx;
709         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
710         const int sched_tag = rq->internal_tag;
711
712         blk_crypto_free_request(rq);
713         blk_pm_mark_last_busy(rq);
714         rq->mq_hctx = NULL;
715
716         if (rq->tag != BLK_MQ_NO_TAG) {
717                 blk_mq_dec_active_requests(hctx);
718                 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
719         }
720         if (sched_tag != BLK_MQ_NO_TAG)
721                 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
722         blk_mq_sched_restart(hctx);
723         blk_queue_exit(q);
724 }
725
726 void blk_mq_free_request(struct request *rq)
727 {
728         struct request_queue *q = rq->q;
729
730         blk_mq_finish_request(rq);
731
732         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
733                 laptop_io_completion(q->disk->bdi);
734
735         rq_qos_done(q, rq);
736
737         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
738         if (req_ref_put_and_test(rq))
739                 __blk_mq_free_request(rq);
740 }
741 EXPORT_SYMBOL_GPL(blk_mq_free_request);
742
743 void blk_mq_free_plug_rqs(struct blk_plug *plug)
744 {
745         struct request *rq;
746
747         while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
748                 blk_mq_free_request(rq);
749 }
750
751 void blk_dump_rq_flags(struct request *rq, char *msg)
752 {
753         printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
754                 rq->q->disk ? rq->q->disk->disk_name : "?",
755                 (__force unsigned long long) rq->cmd_flags);
756
757         printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
758                (unsigned long long)blk_rq_pos(rq),
759                blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
760         printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
761                rq->bio, rq->biotail, blk_rq_bytes(rq));
762 }
763 EXPORT_SYMBOL(blk_dump_rq_flags);
764
765 static void req_bio_endio(struct request *rq, struct bio *bio,
766                           unsigned int nbytes, blk_status_t error)
767 {
768         if (unlikely(error)) {
769                 bio->bi_status = error;
770         } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
771                 /*
772                  * Partial zone append completions cannot be supported as the
773                  * BIO fragments may end up not being written sequentially.
774                  * For such case, force the completed nbytes to be equal to
775                  * the BIO size so that bio_advance() sets the BIO remaining
776                  * size to 0 and we end up calling bio_endio() before returning.
777                  */
778                 if (bio->bi_iter.bi_size != nbytes) {
779                         bio->bi_status = BLK_STS_IOERR;
780                         nbytes = bio->bi_iter.bi_size;
781                 } else {
782                         bio->bi_iter.bi_sector = rq->__sector;
783                 }
784         }
785
786         bio_advance(bio, nbytes);
787
788         if (unlikely(rq->rq_flags & RQF_QUIET))
789                 bio_set_flag(bio, BIO_QUIET);
790         /* don't actually finish bio if it's part of flush sequence */
791         if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
792                 bio_endio(bio);
793 }
794
795 static void blk_account_io_completion(struct request *req, unsigned int bytes)
796 {
797         if (req->part && blk_do_io_stat(req)) {
798                 const int sgrp = op_stat_group(req_op(req));
799
800                 part_stat_lock();
801                 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
802                 part_stat_unlock();
803         }
804 }
805
806 static void blk_print_req_error(struct request *req, blk_status_t status)
807 {
808         printk_ratelimited(KERN_ERR
809                 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
810                 "phys_seg %u prio class %u\n",
811                 blk_status_to_str(status),
812                 req->q->disk ? req->q->disk->disk_name : "?",
813                 blk_rq_pos(req), (__force u32)req_op(req),
814                 blk_op_str(req_op(req)),
815                 (__force u32)(req->cmd_flags & ~REQ_OP_MASK),
816                 req->nr_phys_segments,
817                 IOPRIO_PRIO_CLASS(req->ioprio));
818 }
819
820 /*
821  * Fully end IO on a request. Does not support partial completions, or
822  * errors.
823  */
824 static void blk_complete_request(struct request *req)
825 {
826         const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
827         int total_bytes = blk_rq_bytes(req);
828         struct bio *bio = req->bio;
829
830         trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
831
832         if (!bio)
833                 return;
834
835 #ifdef CONFIG_BLK_DEV_INTEGRITY
836         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
837                 req->q->integrity.profile->complete_fn(req, total_bytes);
838 #endif
839
840         /*
841          * Upper layers may call blk_crypto_evict_key() anytime after the last
842          * bio_endio().  Therefore, the keyslot must be released before that.
843          */
844         blk_crypto_rq_put_keyslot(req);
845
846         blk_account_io_completion(req, total_bytes);
847
848         do {
849                 struct bio *next = bio->bi_next;
850
851                 /* Completion has already been traced */
852                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
853
854                 if (req_op(req) == REQ_OP_ZONE_APPEND)
855                         bio->bi_iter.bi_sector = req->__sector;
856
857                 if (!is_flush)
858                         bio_endio(bio);
859                 bio = next;
860         } while (bio);
861
862         /*
863          * Reset counters so that the request stacking driver
864          * can find how many bytes remain in the request
865          * later.
866          */
867         if (!req->end_io) {
868                 req->bio = NULL;
869                 req->__data_len = 0;
870         }
871 }
872
873 /**
874  * blk_update_request - Complete multiple bytes without completing the request
875  * @req:      the request being processed
876  * @error:    block status code
877  * @nr_bytes: number of bytes to complete for @req
878  *
879  * Description:
880  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
881  *     the request structure even if @req doesn't have leftover.
882  *     If @req has leftover, sets it up for the next range of segments.
883  *
884  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
885  *     %false return from this function.
886  *
887  * Note:
888  *      The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
889  *      except in the consistency check at the end of this function.
890  *
891  * Return:
892  *     %false - this request doesn't have any more data
893  *     %true  - this request has more data
894  **/
895 bool blk_update_request(struct request *req, blk_status_t error,
896                 unsigned int nr_bytes)
897 {
898         int total_bytes;
899
900         trace_block_rq_complete(req, error, nr_bytes);
901
902         if (!req->bio)
903                 return false;
904
905 #ifdef CONFIG_BLK_DEV_INTEGRITY
906         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
907             error == BLK_STS_OK)
908                 req->q->integrity.profile->complete_fn(req, nr_bytes);
909 #endif
910
911         /*
912          * Upper layers may call blk_crypto_evict_key() anytime after the last
913          * bio_endio().  Therefore, the keyslot must be released before that.
914          */
915         if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
916                 __blk_crypto_rq_put_keyslot(req);
917
918         if (unlikely(error && !blk_rq_is_passthrough(req) &&
919                      !(req->rq_flags & RQF_QUIET)) &&
920                      !test_bit(GD_DEAD, &req->q->disk->state)) {
921                 blk_print_req_error(req, error);
922                 trace_block_rq_error(req, error, nr_bytes);
923         }
924
925         blk_account_io_completion(req, nr_bytes);
926
927         total_bytes = 0;
928         while (req->bio) {
929                 struct bio *bio = req->bio;
930                 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
931
932                 if (bio_bytes == bio->bi_iter.bi_size)
933                         req->bio = bio->bi_next;
934
935                 /* Completion has already been traced */
936                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
937                 req_bio_endio(req, bio, bio_bytes, error);
938
939                 total_bytes += bio_bytes;
940                 nr_bytes -= bio_bytes;
941
942                 if (!nr_bytes)
943                         break;
944         }
945
946         /*
947          * completely done
948          */
949         if (!req->bio) {
950                 /*
951                  * Reset counters so that the request stacking driver
952                  * can find how many bytes remain in the request
953                  * later.
954                  */
955                 req->__data_len = 0;
956                 return false;
957         }
958
959         req->__data_len -= total_bytes;
960
961         /* update sector only for requests with clear definition of sector */
962         if (!blk_rq_is_passthrough(req))
963                 req->__sector += total_bytes >> 9;
964
965         /* mixed attributes always follow the first bio */
966         if (req->rq_flags & RQF_MIXED_MERGE) {
967                 req->cmd_flags &= ~REQ_FAILFAST_MASK;
968                 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
969         }
970
971         if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
972                 /*
973                  * If total number of sectors is less than the first segment
974                  * size, something has gone terribly wrong.
975                  */
976                 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
977                         blk_dump_rq_flags(req, "request botched");
978                         req->__data_len = blk_rq_cur_bytes(req);
979                 }
980
981                 /* recalculate the number of segments */
982                 req->nr_phys_segments = blk_recalc_rq_segments(req);
983         }
984
985         return true;
986 }
987 EXPORT_SYMBOL_GPL(blk_update_request);
988
989 static inline void blk_account_io_done(struct request *req, u64 now)
990 {
991         trace_block_io_done(req);
992
993         /*
994          * Account IO completion.  flush_rq isn't accounted as a
995          * normal IO on queueing nor completion.  Accounting the
996          * containing request is enough.
997          */
998         if (blk_do_io_stat(req) && req->part &&
999             !(req->rq_flags & RQF_FLUSH_SEQ)) {
1000                 const int sgrp = op_stat_group(req_op(req));
1001
1002                 part_stat_lock();
1003                 update_io_ticks(req->part, jiffies, true);
1004                 part_stat_inc(req->part, ios[sgrp]);
1005                 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
1006                 part_stat_unlock();
1007         }
1008 }
1009
1010 static inline void blk_account_io_start(struct request *req)
1011 {
1012         trace_block_io_start(req);
1013
1014         if (blk_do_io_stat(req)) {
1015                 /*
1016                  * All non-passthrough requests are created from a bio with one
1017                  * exception: when a flush command that is part of a flush sequence
1018                  * generated by the state machine in blk-flush.c is cloned onto the
1019                  * lower device by dm-multipath we can get here without a bio.
1020                  */
1021                 if (req->bio)
1022                         req->part = req->bio->bi_bdev;
1023                 else
1024                         req->part = req->q->disk->part0;
1025
1026                 part_stat_lock();
1027                 update_io_ticks(req->part, jiffies, false);
1028                 part_stat_unlock();
1029         }
1030 }
1031
1032 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
1033 {
1034         if (rq->rq_flags & RQF_STATS)
1035                 blk_stat_add(rq, now);
1036
1037         blk_mq_sched_completed_request(rq, now);
1038         blk_account_io_done(rq, now);
1039 }
1040
1041 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1042 {
1043         if (blk_mq_need_time_stamp(rq))
1044                 __blk_mq_end_request_acct(rq, ktime_get_ns());
1045
1046         blk_mq_finish_request(rq);
1047
1048         if (rq->end_io) {
1049                 rq_qos_done(rq->q, rq);
1050                 if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1051                         blk_mq_free_request(rq);
1052         } else {
1053                 blk_mq_free_request(rq);
1054         }
1055 }
1056 EXPORT_SYMBOL(__blk_mq_end_request);
1057
1058 void blk_mq_end_request(struct request *rq, blk_status_t error)
1059 {
1060         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1061                 BUG();
1062         __blk_mq_end_request(rq, error);
1063 }
1064 EXPORT_SYMBOL(blk_mq_end_request);
1065
1066 #define TAG_COMP_BATCH          32
1067
1068 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1069                                           int *tag_array, int nr_tags)
1070 {
1071         struct request_queue *q = hctx->queue;
1072
1073         blk_mq_sub_active_requests(hctx, nr_tags);
1074
1075         blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1076         percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1077 }
1078
1079 void blk_mq_end_request_batch(struct io_comp_batch *iob)
1080 {
1081         int tags[TAG_COMP_BATCH], nr_tags = 0;
1082         struct blk_mq_hw_ctx *cur_hctx = NULL;
1083         struct request *rq;
1084         u64 now = 0;
1085
1086         if (iob->need_ts)
1087                 now = ktime_get_ns();
1088
1089         while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1090                 prefetch(rq->bio);
1091                 prefetch(rq->rq_next);
1092
1093                 blk_complete_request(rq);
1094                 if (iob->need_ts)
1095                         __blk_mq_end_request_acct(rq, now);
1096
1097                 blk_mq_finish_request(rq);
1098
1099                 rq_qos_done(rq->q, rq);
1100
1101                 /*
1102                  * If end_io handler returns NONE, then it still has
1103                  * ownership of the request.
1104                  */
1105                 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1106                         continue;
1107
1108                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1109                 if (!req_ref_put_and_test(rq))
1110                         continue;
1111
1112                 blk_crypto_free_request(rq);
1113                 blk_pm_mark_last_busy(rq);
1114
1115                 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1116                         if (cur_hctx)
1117                                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1118                         nr_tags = 0;
1119                         cur_hctx = rq->mq_hctx;
1120                 }
1121                 tags[nr_tags++] = rq->tag;
1122         }
1123
1124         if (nr_tags)
1125                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1126 }
1127 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1128
1129 static void blk_complete_reqs(struct llist_head *list)
1130 {
1131         struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1132         struct request *rq, *next;
1133
1134         llist_for_each_entry_safe(rq, next, entry, ipi_list)
1135                 rq->q->mq_ops->complete(rq);
1136 }
1137
1138 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1139 {
1140         blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1141 }
1142
1143 static int blk_softirq_cpu_dead(unsigned int cpu)
1144 {
1145         blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1146         return 0;
1147 }
1148
1149 static void __blk_mq_complete_request_remote(void *data)
1150 {
1151         __raise_softirq_irqoff(BLOCK_SOFTIRQ);
1152 }
1153
1154 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1155 {
1156         int cpu = raw_smp_processor_id();
1157
1158         if (!IS_ENABLED(CONFIG_SMP) ||
1159             !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1160                 return false;
1161         /*
1162          * With force threaded interrupts enabled, raising softirq from an SMP
1163          * function call will always result in waking the ksoftirqd thread.
1164          * This is probably worse than completing the request on a different
1165          * cache domain.
1166          */
1167         if (force_irqthreads())
1168                 return false;
1169
1170         /* same CPU or cache domain?  Complete locally */
1171         if (cpu == rq->mq_ctx->cpu ||
1172             (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1173              cpus_share_cache(cpu, rq->mq_ctx->cpu)))
1174                 return false;
1175
1176         /* don't try to IPI to an offline CPU */
1177         return cpu_online(rq->mq_ctx->cpu);
1178 }
1179
1180 static void blk_mq_complete_send_ipi(struct request *rq)
1181 {
1182         unsigned int cpu;
1183
1184         cpu = rq->mq_ctx->cpu;
1185         if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
1186                 smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
1187 }
1188
1189 static void blk_mq_raise_softirq(struct request *rq)
1190 {
1191         struct llist_head *list;
1192
1193         preempt_disable();
1194         list = this_cpu_ptr(&blk_cpu_done);
1195         if (llist_add(&rq->ipi_list, list))
1196                 raise_softirq(BLOCK_SOFTIRQ);
1197         preempt_enable();
1198 }
1199
1200 bool blk_mq_complete_request_remote(struct request *rq)
1201 {
1202         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1203
1204         /*
1205          * For request which hctx has only one ctx mapping,
1206          * or a polled request, always complete locally,
1207          * it's pointless to redirect the completion.
1208          */
1209         if ((rq->mq_hctx->nr_ctx == 1 &&
1210              rq->mq_ctx->cpu == raw_smp_processor_id()) ||
1211              rq->cmd_flags & REQ_POLLED)
1212                 return false;
1213
1214         if (blk_mq_complete_need_ipi(rq)) {
1215                 blk_mq_complete_send_ipi(rq);
1216                 return true;
1217         }
1218
1219         if (rq->q->nr_hw_queues == 1) {
1220                 blk_mq_raise_softirq(rq);
1221                 return true;
1222         }
1223         return false;
1224 }
1225 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1226
1227 /**
1228  * blk_mq_complete_request - end I/O on a request
1229  * @rq:         the request being processed
1230  *
1231  * Description:
1232  *      Complete a request by scheduling the ->complete_rq operation.
1233  **/
1234 void blk_mq_complete_request(struct request *rq)
1235 {
1236         if (!blk_mq_complete_request_remote(rq))
1237                 rq->q->mq_ops->complete(rq);
1238 }
1239 EXPORT_SYMBOL(blk_mq_complete_request);
1240
1241 /**
1242  * blk_mq_start_request - Start processing a request
1243  * @rq: Pointer to request to be started
1244  *
1245  * Function used by device drivers to notify the block layer that a request
1246  * is going to be processed now, so blk layer can do proper initializations
1247  * such as starting the timeout timer.
1248  */
1249 void blk_mq_start_request(struct request *rq)
1250 {
1251         struct request_queue *q = rq->q;
1252
1253         trace_block_rq_issue(rq);
1254
1255         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
1256             !blk_rq_is_passthrough(rq)) {
1257                 rq->io_start_time_ns = ktime_get_ns();
1258                 rq->stats_sectors = blk_rq_sectors(rq);
1259                 rq->rq_flags |= RQF_STATS;
1260                 rq_qos_issue(q, rq);
1261         }
1262
1263         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1264
1265         blk_add_timer(rq);
1266         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1267         rq->mq_hctx->tags->rqs[rq->tag] = rq;
1268
1269 #ifdef CONFIG_BLK_DEV_INTEGRITY
1270         if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1271                 q->integrity.profile->prepare_fn(rq);
1272 #endif
1273         if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1274                 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
1275 }
1276 EXPORT_SYMBOL(blk_mq_start_request);
1277
1278 /*
1279  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1280  * queues. This is important for md arrays to benefit from merging
1281  * requests.
1282  */
1283 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1284 {
1285         if (plug->multiple_queues)
1286                 return BLK_MAX_REQUEST_COUNT * 2;
1287         return BLK_MAX_REQUEST_COUNT;
1288 }
1289
1290 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1291 {
1292         struct request *last = rq_list_peek(&plug->mq_list);
1293
1294         if (!plug->rq_count) {
1295                 trace_block_plug(rq->q);
1296         } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1297                    (!blk_queue_nomerges(rq->q) &&
1298                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1299                 blk_mq_flush_plug_list(plug, false);
1300                 last = NULL;
1301                 trace_block_plug(rq->q);
1302         }
1303
1304         if (!plug->multiple_queues && last && last->q != rq->q)
1305                 plug->multiple_queues = true;
1306         /*
1307          * Any request allocated from sched tags can't be issued to
1308          * ->queue_rqs() directly
1309          */
1310         if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1311                 plug->has_elevator = true;
1312         rq->rq_next = NULL;
1313         rq_list_add(&plug->mq_list, rq);
1314         plug->rq_count++;
1315 }
1316
1317 /**
1318  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1319  * @rq:         request to insert
1320  * @at_head:    insert request at head or tail of queue
1321  *
1322  * Description:
1323  *    Insert a fully prepared request at the back of the I/O scheduler queue
1324  *    for execution.  Don't wait for completion.
1325  *
1326  * Note:
1327  *    This function will invoke @done directly if the queue is dead.
1328  */
1329 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1330 {
1331         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1332
1333         WARN_ON(irqs_disabled());
1334         WARN_ON(!blk_rq_is_passthrough(rq));
1335
1336         blk_account_io_start(rq);
1337
1338         /*
1339          * As plugging can be enabled for passthrough requests on a zoned
1340          * device, directly accessing the plug instead of using blk_mq_plug()
1341          * should not have any consequences.
1342          */
1343         if (current->plug && !at_head) {
1344                 blk_add_rq_to_plug(current->plug, rq);
1345                 return;
1346         }
1347
1348         blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1349         blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
1350 }
1351 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1352
1353 struct blk_rq_wait {
1354         struct completion done;
1355         blk_status_t ret;
1356 };
1357
1358 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1359 {
1360         struct blk_rq_wait *wait = rq->end_io_data;
1361
1362         wait->ret = ret;
1363         complete(&wait->done);
1364         return RQ_END_IO_NONE;
1365 }
1366
1367 bool blk_rq_is_poll(struct request *rq)
1368 {
1369         if (!rq->mq_hctx)
1370                 return false;
1371         if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1372                 return false;
1373         return true;
1374 }
1375 EXPORT_SYMBOL_GPL(blk_rq_is_poll);
1376
1377 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1378 {
1379         do {
1380                 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
1381                 cond_resched();
1382         } while (!completion_done(wait));
1383 }
1384
1385 /**
1386  * blk_execute_rq - insert a request into queue for execution
1387  * @rq:         request to insert
1388  * @at_head:    insert request at head or tail of queue
1389  *
1390  * Description:
1391  *    Insert a fully prepared request at the back of the I/O scheduler queue
1392  *    for execution and wait for completion.
1393  * Return: The blk_status_t result provided to blk_mq_end_request().
1394  */
1395 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1396 {
1397         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1398         struct blk_rq_wait wait = {
1399                 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
1400         };
1401
1402         WARN_ON(irqs_disabled());
1403         WARN_ON(!blk_rq_is_passthrough(rq));
1404
1405         rq->end_io_data = &wait;
1406         rq->end_io = blk_end_sync_rq;
1407
1408         blk_account_io_start(rq);
1409         blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1410         blk_mq_run_hw_queue(hctx, false);
1411
1412         if (blk_rq_is_poll(rq)) {
1413                 blk_rq_poll_completion(rq, &wait.done);
1414         } else {
1415                 /*
1416                  * Prevent hang_check timer from firing at us during very long
1417                  * I/O
1418                  */
1419                 unsigned long hang_check = sysctl_hung_task_timeout_secs;
1420
1421                 if (hang_check)
1422                         while (!wait_for_completion_io_timeout(&wait.done,
1423                                         hang_check * (HZ/2)))
1424                                 ;
1425                 else
1426                         wait_for_completion_io(&wait.done);
1427         }
1428
1429         return wait.ret;
1430 }
1431 EXPORT_SYMBOL(blk_execute_rq);
1432
1433 static void __blk_mq_requeue_request(struct request *rq)
1434 {
1435         struct request_queue *q = rq->q;
1436
1437         blk_mq_put_driver_tag(rq);
1438
1439         trace_block_rq_requeue(rq);
1440         rq_qos_requeue(q, rq);
1441
1442         if (blk_mq_request_started(rq)) {
1443                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1444                 rq->rq_flags &= ~RQF_TIMED_OUT;
1445         }
1446 }
1447
1448 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1449 {
1450         struct request_queue *q = rq->q;
1451         unsigned long flags;
1452
1453         __blk_mq_requeue_request(rq);
1454
1455         /* this request will be re-inserted to io scheduler queue */
1456         blk_mq_sched_requeue_request(rq);
1457
1458         spin_lock_irqsave(&q->requeue_lock, flags);
1459         list_add_tail(&rq->queuelist, &q->requeue_list);
1460         spin_unlock_irqrestore(&q->requeue_lock, flags);
1461
1462         if (kick_requeue_list)
1463                 blk_mq_kick_requeue_list(q);
1464 }
1465 EXPORT_SYMBOL(blk_mq_requeue_request);
1466
1467 static void blk_mq_requeue_work(struct work_struct *work)
1468 {
1469         struct request_queue *q =
1470                 container_of(work, struct request_queue, requeue_work.work);
1471         LIST_HEAD(rq_list);
1472         LIST_HEAD(flush_list);
1473         struct request *rq;
1474
1475         spin_lock_irq(&q->requeue_lock);
1476         list_splice_init(&q->requeue_list, &rq_list);
1477         list_splice_init(&q->flush_list, &flush_list);
1478         spin_unlock_irq(&q->requeue_lock);
1479
1480         while (!list_empty(&rq_list)) {
1481                 rq = list_entry(rq_list.next, struct request, queuelist);
1482                 /*
1483                  * If RQF_DONTPREP ist set, the request has been started by the
1484                  * driver already and might have driver-specific data allocated
1485                  * already.  Insert it into the hctx dispatch list to avoid
1486                  * block layer merges for the request.
1487                  */
1488                 if (rq->rq_flags & RQF_DONTPREP) {
1489                         list_del_init(&rq->queuelist);
1490                         blk_mq_request_bypass_insert(rq, 0);
1491                 } else {
1492                         list_del_init(&rq->queuelist);
1493                         blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
1494                 }
1495         }
1496
1497         while (!list_empty(&flush_list)) {
1498                 rq = list_entry(flush_list.next, struct request, queuelist);
1499                 list_del_init(&rq->queuelist);
1500                 blk_mq_insert_request(rq, 0);
1501         }
1502
1503         blk_mq_run_hw_queues(q, false);
1504 }
1505
1506 void blk_mq_kick_requeue_list(struct request_queue *q)
1507 {
1508         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1509 }
1510 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1511
1512 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1513                                     unsigned long msecs)
1514 {
1515         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1516                                     msecs_to_jiffies(msecs));
1517 }
1518 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1519
1520 static bool blk_is_flush_data_rq(struct request *rq)
1521 {
1522         return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
1523 }
1524
1525 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1526 {
1527         /*
1528          * If we find a request that isn't idle we know the queue is busy
1529          * as it's checked in the iter.
1530          * Return false to stop the iteration.
1531          *
1532          * In case of queue quiesce, if one flush data request is completed,
1533          * don't count it as inflight given the flush sequence is suspended,
1534          * and the original flush data request is invisible to driver, just
1535          * like other pending requests because of quiesce
1536          */
1537         if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1538                                 blk_is_flush_data_rq(rq) &&
1539                                 blk_mq_request_completed(rq))) {
1540                 bool *busy = priv;
1541
1542                 *busy = true;
1543                 return false;
1544         }
1545
1546         return true;
1547 }
1548
1549 bool blk_mq_queue_inflight(struct request_queue *q)
1550 {
1551         bool busy = false;
1552
1553         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1554         return busy;
1555 }
1556 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1557
1558 static void blk_mq_rq_timed_out(struct request *req)
1559 {
1560         req->rq_flags |= RQF_TIMED_OUT;
1561         if (req->q->mq_ops->timeout) {
1562                 enum blk_eh_timer_return ret;
1563
1564                 ret = req->q->mq_ops->timeout(req);
1565                 if (ret == BLK_EH_DONE)
1566                         return;
1567                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1568         }
1569
1570         blk_add_timer(req);
1571 }
1572
1573 struct blk_expired_data {
1574         bool has_timedout_rq;
1575         unsigned long next;
1576         unsigned long timeout_start;
1577 };
1578
1579 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
1580 {
1581         unsigned long deadline;
1582
1583         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1584                 return false;
1585         if (rq->rq_flags & RQF_TIMED_OUT)
1586                 return false;
1587
1588         deadline = READ_ONCE(rq->deadline);
1589         if (time_after_eq(expired->timeout_start, deadline))
1590                 return true;
1591
1592         if (expired->next == 0)
1593                 expired->next = deadline;
1594         else if (time_after(expired->next, deadline))
1595                 expired->next = deadline;
1596         return false;
1597 }
1598
1599 void blk_mq_put_rq_ref(struct request *rq)
1600 {
1601         if (is_flush_rq(rq)) {
1602                 if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1603                         blk_mq_free_request(rq);
1604         } else if (req_ref_put_and_test(rq)) {
1605                 __blk_mq_free_request(rq);
1606         }
1607 }
1608
1609 static bool blk_mq_check_expired(struct request *rq, void *priv)
1610 {
1611         struct blk_expired_data *expired = priv;
1612
1613         /*
1614          * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1615          * be reallocated underneath the timeout handler's processing, then
1616          * the expire check is reliable. If the request is not expired, then
1617          * it was completed and reallocated as a new request after returning
1618          * from blk_mq_check_expired().
1619          */
1620         if (blk_mq_req_expired(rq, expired)) {
1621                 expired->has_timedout_rq = true;
1622                 return false;
1623         }
1624         return true;
1625 }
1626
1627 static bool blk_mq_handle_expired(struct request *rq, void *priv)
1628 {
1629         struct blk_expired_data *expired = priv;
1630
1631         if (blk_mq_req_expired(rq, expired))
1632                 blk_mq_rq_timed_out(rq);
1633         return true;
1634 }
1635
1636 static void blk_mq_timeout_work(struct work_struct *work)
1637 {
1638         struct request_queue *q =
1639                 container_of(work, struct request_queue, timeout_work);
1640         struct blk_expired_data expired = {
1641                 .timeout_start = jiffies,
1642         };
1643         struct blk_mq_hw_ctx *hctx;
1644         unsigned long i;
1645
1646         /* A deadlock might occur if a request is stuck requiring a
1647          * timeout at the same time a queue freeze is waiting
1648          * completion, since the timeout code would not be able to
1649          * acquire the queue reference here.
1650          *
1651          * That's why we don't use blk_queue_enter here; instead, we use
1652          * percpu_ref_tryget directly, because we need to be able to
1653          * obtain a reference even in the short window between the queue
1654          * starting to freeze, by dropping the first reference in
1655          * blk_freeze_queue_start, and the moment the last request is
1656          * consumed, marked by the instant q_usage_counter reaches
1657          * zero.
1658          */
1659         if (!percpu_ref_tryget(&q->q_usage_counter))
1660                 return;
1661
1662         /* check if there is any timed-out request */
1663         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
1664         if (expired.has_timedout_rq) {
1665                 /*
1666                  * Before walking tags, we must ensure any submit started
1667                  * before the current time has finished. Since the submit
1668                  * uses srcu or rcu, wait for a synchronization point to
1669                  * ensure all running submits have finished
1670                  */
1671                 blk_mq_wait_quiesce_done(q->tag_set);
1672
1673                 expired.next = 0;
1674                 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
1675         }
1676
1677         if (expired.next != 0) {
1678                 mod_timer(&q->timeout, expired.next);
1679         } else {
1680                 /*
1681                  * Request timeouts are handled as a forward rolling timer. If
1682                  * we end up here it means that no requests are pending and
1683                  * also that no request has been pending for a while. Mark
1684                  * each hctx as idle.
1685                  */
1686                 queue_for_each_hw_ctx(q, hctx, i) {
1687                         /* the hctx may be unmapped, so check it here */
1688                         if (blk_mq_hw_queue_mapped(hctx))
1689                                 blk_mq_tag_idle(hctx);
1690                 }
1691         }
1692         blk_queue_exit(q);
1693 }
1694
1695 struct flush_busy_ctx_data {
1696         struct blk_mq_hw_ctx *hctx;
1697         struct list_head *list;
1698 };
1699
1700 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1701 {
1702         struct flush_busy_ctx_data *flush_data = data;
1703         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1704         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1705         enum hctx_type type = hctx->type;
1706
1707         spin_lock(&ctx->lock);
1708         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1709         sbitmap_clear_bit(sb, bitnr);
1710         spin_unlock(&ctx->lock);
1711         return true;
1712 }
1713
1714 /*
1715  * Process software queues that have been marked busy, splicing them
1716  * to the for-dispatch
1717  */
1718 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1719 {
1720         struct flush_busy_ctx_data data = {
1721                 .hctx = hctx,
1722                 .list = list,
1723         };
1724
1725         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1726 }
1727 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1728
1729 struct dispatch_rq_data {
1730         struct blk_mq_hw_ctx *hctx;
1731         struct request *rq;
1732 };
1733
1734 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1735                 void *data)
1736 {
1737         struct dispatch_rq_data *dispatch_data = data;
1738         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1739         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1740         enum hctx_type type = hctx->type;
1741
1742         spin_lock(&ctx->lock);
1743         if (!list_empty(&ctx->rq_lists[type])) {
1744                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1745                 list_del_init(&dispatch_data->rq->queuelist);
1746                 if (list_empty(&ctx->rq_lists[type]))
1747                         sbitmap_clear_bit(sb, bitnr);
1748         }
1749         spin_unlock(&ctx->lock);
1750
1751         return !dispatch_data->rq;
1752 }
1753
1754 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1755                                         struct blk_mq_ctx *start)
1756 {
1757         unsigned off = start ? start->index_hw[hctx->type] : 0;
1758         struct dispatch_rq_data data = {
1759                 .hctx = hctx,
1760                 .rq   = NULL,
1761         };
1762
1763         __sbitmap_for_each_set(&hctx->ctx_map, off,
1764                                dispatch_rq_from_ctx, &data);
1765
1766         return data.rq;
1767 }
1768
1769 bool __blk_mq_alloc_driver_tag(struct request *rq)
1770 {
1771         struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1772         unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1773         int tag;
1774
1775         blk_mq_tag_busy(rq->mq_hctx);
1776
1777         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1778                 bt = &rq->mq_hctx->tags->breserved_tags;
1779                 tag_offset = 0;
1780         } else {
1781                 if (!hctx_may_queue(rq->mq_hctx, bt))
1782                         return false;
1783         }
1784
1785         tag = __sbitmap_queue_get(bt);
1786         if (tag == BLK_MQ_NO_TAG)
1787                 return false;
1788
1789         rq->tag = tag + tag_offset;
1790         blk_mq_inc_active_requests(rq->mq_hctx);
1791         return true;
1792 }
1793
1794 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1795                                 int flags, void *key)
1796 {
1797         struct blk_mq_hw_ctx *hctx;
1798
1799         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1800
1801         spin_lock(&hctx->dispatch_wait_lock);
1802         if (!list_empty(&wait->entry)) {
1803                 struct sbitmap_queue *sbq;
1804
1805                 list_del_init(&wait->entry);
1806                 sbq = &hctx->tags->bitmap_tags;
1807                 atomic_dec(&sbq->ws_active);
1808         }
1809         spin_unlock(&hctx->dispatch_wait_lock);
1810
1811         blk_mq_run_hw_queue(hctx, true);
1812         return 1;
1813 }
1814
1815 /*
1816  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1817  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1818  * restart. For both cases, take care to check the condition again after
1819  * marking us as waiting.
1820  */
1821 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1822                                  struct request *rq)
1823 {
1824         struct sbitmap_queue *sbq;
1825         struct wait_queue_head *wq;
1826         wait_queue_entry_t *wait;
1827         bool ret;
1828
1829         if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1830             !(blk_mq_is_shared_tags(hctx->flags))) {
1831                 blk_mq_sched_mark_restart_hctx(hctx);
1832
1833                 /*
1834                  * It's possible that a tag was freed in the window between the
1835                  * allocation failure and adding the hardware queue to the wait
1836                  * queue.
1837                  *
1838                  * Don't clear RESTART here, someone else could have set it.
1839                  * At most this will cost an extra queue run.
1840                  */
1841                 return blk_mq_get_driver_tag(rq);
1842         }
1843
1844         wait = &hctx->dispatch_wait;
1845         if (!list_empty_careful(&wait->entry))
1846                 return false;
1847
1848         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
1849                 sbq = &hctx->tags->breserved_tags;
1850         else
1851                 sbq = &hctx->tags->bitmap_tags;
1852         wq = &bt_wait_ptr(sbq, hctx)->wait;
1853
1854         spin_lock_irq(&wq->lock);
1855         spin_lock(&hctx->dispatch_wait_lock);
1856         if (!list_empty(&wait->entry)) {
1857                 spin_unlock(&hctx->dispatch_wait_lock);
1858                 spin_unlock_irq(&wq->lock);
1859                 return false;
1860         }
1861
1862         atomic_inc(&sbq->ws_active);
1863         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1864         __add_wait_queue(wq, wait);
1865
1866         /*
1867          * Add one explicit barrier since blk_mq_get_driver_tag() may
1868          * not imply barrier in case of failure.
1869          *
1870          * Order adding us to wait queue and allocating driver tag.
1871          *
1872          * The pair is the one implied in sbitmap_queue_wake_up() which
1873          * orders clearing sbitmap tag bits and waitqueue_active() in
1874          * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
1875          *
1876          * Otherwise, re-order of adding wait queue and getting driver tag
1877          * may cause __sbitmap_queue_wake_up() to wake up nothing because
1878          * the waitqueue_active() may not observe us in wait queue.
1879          */
1880         smp_mb();
1881
1882         /*
1883          * It's possible that a tag was freed in the window between the
1884          * allocation failure and adding the hardware queue to the wait
1885          * queue.
1886          */
1887         ret = blk_mq_get_driver_tag(rq);
1888         if (!ret) {
1889                 spin_unlock(&hctx->dispatch_wait_lock);
1890                 spin_unlock_irq(&wq->lock);
1891                 return false;
1892         }
1893
1894         /*
1895          * We got a tag, remove ourselves from the wait queue to ensure
1896          * someone else gets the wakeup.
1897          */
1898         list_del_init(&wait->entry);
1899         atomic_dec(&sbq->ws_active);
1900         spin_unlock(&hctx->dispatch_wait_lock);
1901         spin_unlock_irq(&wq->lock);
1902
1903         return true;
1904 }
1905
1906 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1907 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1908 /*
1909  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1910  * - EWMA is one simple way to compute running average value
1911  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1912  * - take 4 as factor for avoiding to get too small(0) result, and this
1913  *   factor doesn't matter because EWMA decreases exponentially
1914  */
1915 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1916 {
1917         unsigned int ewma;
1918
1919         ewma = hctx->dispatch_busy;
1920
1921         if (!ewma && !busy)
1922                 return;
1923
1924         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1925         if (busy)
1926                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1927         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1928
1929         hctx->dispatch_busy = ewma;
1930 }
1931
1932 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1933
1934 static void blk_mq_handle_dev_resource(struct request *rq,
1935                                        struct list_head *list)
1936 {
1937         list_add(&rq->queuelist, list);
1938         __blk_mq_requeue_request(rq);
1939 }
1940
1941 static void blk_mq_handle_zone_resource(struct request *rq,
1942                                         struct list_head *zone_list)
1943 {
1944         /*
1945          * If we end up here it is because we cannot dispatch a request to a
1946          * specific zone due to LLD level zone-write locking or other zone
1947          * related resource not being available. In this case, set the request
1948          * aside in zone_list for retrying it later.
1949          */
1950         list_add(&rq->queuelist, zone_list);
1951         __blk_mq_requeue_request(rq);
1952 }
1953
1954 enum prep_dispatch {
1955         PREP_DISPATCH_OK,
1956         PREP_DISPATCH_NO_TAG,
1957         PREP_DISPATCH_NO_BUDGET,
1958 };
1959
1960 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1961                                                   bool need_budget)
1962 {
1963         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1964         int budget_token = -1;
1965
1966         if (need_budget) {
1967                 budget_token = blk_mq_get_dispatch_budget(rq->q);
1968                 if (budget_token < 0) {
1969                         blk_mq_put_driver_tag(rq);
1970                         return PREP_DISPATCH_NO_BUDGET;
1971                 }
1972                 blk_mq_set_rq_budget_token(rq, budget_token);
1973         }
1974
1975         if (!blk_mq_get_driver_tag(rq)) {
1976                 /*
1977                  * The initial allocation attempt failed, so we need to
1978                  * rerun the hardware queue when a tag is freed. The
1979                  * waitqueue takes care of that. If the queue is run
1980                  * before we add this entry back on the dispatch list,
1981                  * we'll re-run it below.
1982                  */
1983                 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1984                         /*
1985                          * All budgets not got from this function will be put
1986                          * together during handling partial dispatch
1987                          */
1988                         if (need_budget)
1989                                 blk_mq_put_dispatch_budget(rq->q, budget_token);
1990                         return PREP_DISPATCH_NO_TAG;
1991                 }
1992         }
1993
1994         return PREP_DISPATCH_OK;
1995 }
1996
1997 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
1998 static void blk_mq_release_budgets(struct request_queue *q,
1999                 struct list_head *list)
2000 {
2001         struct request *rq;
2002
2003         list_for_each_entry(rq, list, queuelist) {
2004                 int budget_token = blk_mq_get_rq_budget_token(rq);
2005
2006                 if (budget_token >= 0)
2007                         blk_mq_put_dispatch_budget(q, budget_token);
2008         }
2009 }
2010
2011 /*
2012  * blk_mq_commit_rqs will notify driver using bd->last that there is no
2013  * more requests. (See comment in struct blk_mq_ops for commit_rqs for
2014  * details)
2015  * Attention, we should explicitly call this in unusual cases:
2016  *  1) did not queue everything initially scheduled to queue
2017  *  2) the last attempt to queue a request failed
2018  */
2019 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
2020                               bool from_schedule)
2021 {
2022         if (hctx->queue->mq_ops->commit_rqs && queued) {
2023                 trace_block_unplug(hctx->queue, queued, !from_schedule);
2024                 hctx->queue->mq_ops->commit_rqs(hctx);
2025         }
2026 }
2027
2028 /*
2029  * Returns true if we did some work AND can potentially do more.
2030  */
2031 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
2032                              unsigned int nr_budgets)
2033 {
2034         enum prep_dispatch prep;
2035         struct request_queue *q = hctx->queue;
2036         struct request *rq;
2037         int queued;
2038         blk_status_t ret = BLK_STS_OK;
2039         LIST_HEAD(zone_list);
2040         bool needs_resource = false;
2041
2042         if (list_empty(list))
2043                 return false;
2044
2045         /*
2046          * Now process all the entries, sending them to the driver.
2047          */
2048         queued = 0;
2049         do {
2050                 struct blk_mq_queue_data bd;
2051
2052                 rq = list_first_entry(list, struct request, queuelist);
2053
2054                 WARN_ON_ONCE(hctx != rq->mq_hctx);
2055                 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
2056                 if (prep != PREP_DISPATCH_OK)
2057                         break;
2058
2059                 list_del_init(&rq->queuelist);
2060
2061                 bd.rq = rq;
2062                 bd.last = list_empty(list);
2063
2064                 /*
2065                  * once the request is queued to lld, no need to cover the
2066                  * budget any more
2067                  */
2068                 if (nr_budgets)
2069                         nr_budgets--;
2070                 ret = q->mq_ops->queue_rq(hctx, &bd);
2071                 switch (ret) {
2072                 case BLK_STS_OK:
2073                         queued++;
2074                         break;
2075                 case BLK_STS_RESOURCE:
2076                         needs_resource = true;
2077                         fallthrough;
2078                 case BLK_STS_DEV_RESOURCE:
2079                         blk_mq_handle_dev_resource(rq, list);
2080                         goto out;
2081                 case BLK_STS_ZONE_RESOURCE:
2082                         /*
2083                          * Move the request to zone_list and keep going through
2084                          * the dispatch list to find more requests the drive can
2085                          * accept.
2086                          */
2087                         blk_mq_handle_zone_resource(rq, &zone_list);
2088                         needs_resource = true;
2089                         break;
2090                 default:
2091                         blk_mq_end_request(rq, ret);
2092                 }
2093         } while (!list_empty(list));
2094 out:
2095         if (!list_empty(&zone_list))
2096                 list_splice_tail_init(&zone_list, list);
2097
2098         /* If we didn't flush the entire list, we could have told the driver
2099          * there was more coming, but that turned out to be a lie.
2100          */
2101         if (!list_empty(list) || ret != BLK_STS_OK)
2102                 blk_mq_commit_rqs(hctx, queued, false);
2103
2104         /*
2105          * Any items that need requeuing? Stuff them into hctx->dispatch,
2106          * that is where we will continue on next queue run.
2107          */
2108         if (!list_empty(list)) {
2109                 bool needs_restart;
2110                 /* For non-shared tags, the RESTART check will suffice */
2111                 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
2112                         ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
2113                         blk_mq_is_shared_tags(hctx->flags));
2114
2115                 if (nr_budgets)
2116                         blk_mq_release_budgets(q, list);
2117
2118                 spin_lock(&hctx->lock);
2119                 list_splice_tail_init(list, &hctx->dispatch);
2120                 spin_unlock(&hctx->lock);
2121
2122                 /*
2123                  * Order adding requests to hctx->dispatch and checking
2124                  * SCHED_RESTART flag. The pair of this smp_mb() is the one
2125                  * in blk_mq_sched_restart(). Avoid restart code path to
2126                  * miss the new added requests to hctx->dispatch, meantime
2127                  * SCHED_RESTART is observed here.
2128                  */
2129                 smp_mb();
2130
2131                 /*
2132                  * If SCHED_RESTART was set by the caller of this function and
2133                  * it is no longer set that means that it was cleared by another
2134                  * thread and hence that a queue rerun is needed.
2135                  *
2136                  * If 'no_tag' is set, that means that we failed getting
2137                  * a driver tag with an I/O scheduler attached. If our dispatch
2138                  * waitqueue is no longer active, ensure that we run the queue
2139                  * AFTER adding our entries back to the list.
2140                  *
2141                  * If no I/O scheduler has been configured it is possible that
2142                  * the hardware queue got stopped and restarted before requests
2143                  * were pushed back onto the dispatch list. Rerun the queue to
2144                  * avoid starvation. Notes:
2145                  * - blk_mq_run_hw_queue() checks whether or not a queue has
2146                  *   been stopped before rerunning a queue.
2147                  * - Some but not all block drivers stop a queue before
2148                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2149                  *   and dm-rq.
2150                  *
2151                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2152                  * bit is set, run queue after a delay to avoid IO stalls
2153                  * that could otherwise occur if the queue is idle.  We'll do
2154                  * similar if we couldn't get budget or couldn't lock a zone
2155                  * and SCHED_RESTART is set.
2156                  */
2157                 needs_restart = blk_mq_sched_needs_restart(hctx);
2158                 if (prep == PREP_DISPATCH_NO_BUDGET)
2159                         needs_resource = true;
2160                 if (!needs_restart ||
2161                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2162                         blk_mq_run_hw_queue(hctx, true);
2163                 else if (needs_resource)
2164                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2165
2166                 blk_mq_update_dispatch_busy(hctx, true);
2167                 return false;
2168         }
2169
2170         blk_mq_update_dispatch_busy(hctx, false);
2171         return true;
2172 }
2173
2174 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2175 {
2176         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2177
2178         if (cpu >= nr_cpu_ids)
2179                 cpu = cpumask_first(hctx->cpumask);
2180         return cpu;
2181 }
2182
2183 /*
2184  * It'd be great if the workqueue API had a way to pass
2185  * in a mask and had some smarts for more clever placement.
2186  * For now we just round-robin here, switching for every
2187  * BLK_MQ_CPU_WORK_BATCH queued items.
2188  */
2189 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2190 {
2191         bool tried = false;
2192         int next_cpu = hctx->next_cpu;
2193
2194         if (hctx->queue->nr_hw_queues == 1)
2195                 return WORK_CPU_UNBOUND;
2196
2197         if (--hctx->next_cpu_batch <= 0) {
2198 select_cpu:
2199                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2200                                 cpu_online_mask);
2201                 if (next_cpu >= nr_cpu_ids)
2202                         next_cpu = blk_mq_first_mapped_cpu(hctx);
2203                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2204         }
2205
2206         /*
2207          * Do unbound schedule if we can't find a online CPU for this hctx,
2208          * and it should only happen in the path of handling CPU DEAD.
2209          */
2210         if (!cpu_online(next_cpu)) {
2211                 if (!tried) {
2212                         tried = true;
2213                         goto select_cpu;
2214                 }
2215
2216                 /*
2217                  * Make sure to re-select CPU next time once after CPUs
2218                  * in hctx->cpumask become online again.
2219                  */
2220                 hctx->next_cpu = next_cpu;
2221                 hctx->next_cpu_batch = 1;
2222                 return WORK_CPU_UNBOUND;
2223         }
2224
2225         hctx->next_cpu = next_cpu;
2226         return next_cpu;
2227 }
2228
2229 /**
2230  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2231  * @hctx: Pointer to the hardware queue to run.
2232  * @msecs: Milliseconds of delay to wait before running the queue.
2233  *
2234  * Run a hardware queue asynchronously with a delay of @msecs.
2235  */
2236 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2237 {
2238         if (unlikely(blk_mq_hctx_stopped(hctx)))
2239                 return;
2240         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2241                                     msecs_to_jiffies(msecs));
2242 }
2243 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2244
2245 /**
2246  * blk_mq_run_hw_queue - Start to run a hardware queue.
2247  * @hctx: Pointer to the hardware queue to run.
2248  * @async: If we want to run the queue asynchronously.
2249  *
2250  * Check if the request queue is not in a quiesced state and if there are
2251  * pending requests to be sent. If this is true, run the queue to send requests
2252  * to hardware.
2253  */
2254 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2255 {
2256         bool need_run;
2257
2258         /*
2259          * We can't run the queue inline with interrupts disabled.
2260          */
2261         WARN_ON_ONCE(!async && in_interrupt());
2262
2263         might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
2264
2265         /*
2266          * When queue is quiesced, we may be switching io scheduler, or
2267          * updating nr_hw_queues, or other things, and we can't run queue
2268          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
2269          *
2270          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2271          * quiesced.
2272          */
2273         __blk_mq_run_dispatch_ops(hctx->queue, false,
2274                 need_run = !blk_queue_quiesced(hctx->queue) &&
2275                 blk_mq_hctx_has_pending(hctx));
2276
2277         if (!need_run)
2278                 return;
2279
2280         if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2281                 blk_mq_delay_run_hw_queue(hctx, 0);
2282                 return;
2283         }
2284
2285         blk_mq_run_dispatch_ops(hctx->queue,
2286                                 blk_mq_sched_dispatch_requests(hctx));
2287 }
2288 EXPORT_SYMBOL(blk_mq_run_hw_queue);
2289
2290 /*
2291  * Return prefered queue to dispatch from (if any) for non-mq aware IO
2292  * scheduler.
2293  */
2294 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2295 {
2296         struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2297         /*
2298          * If the IO scheduler does not respect hardware queues when
2299          * dispatching, we just don't bother with multiple HW queues and
2300          * dispatch from hctx for the current CPU since running multiple queues
2301          * just causes lock contention inside the scheduler and pointless cache
2302          * bouncing.
2303          */
2304         struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2305
2306         if (!blk_mq_hctx_stopped(hctx))
2307                 return hctx;
2308         return NULL;
2309 }
2310
2311 /**
2312  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2313  * @q: Pointer to the request queue to run.
2314  * @async: If we want to run the queue asynchronously.
2315  */
2316 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2317 {
2318         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2319         unsigned long i;
2320
2321         sq_hctx = NULL;
2322         if (blk_queue_sq_sched(q))
2323                 sq_hctx = blk_mq_get_sq_hctx(q);
2324         queue_for_each_hw_ctx(q, hctx, i) {
2325                 if (blk_mq_hctx_stopped(hctx))
2326                         continue;
2327                 /*
2328                  * Dispatch from this hctx either if there's no hctx preferred
2329                  * by IO scheduler or if it has requests that bypass the
2330                  * scheduler.
2331                  */
2332                 if (!sq_hctx || sq_hctx == hctx ||
2333                     !list_empty_careful(&hctx->dispatch))
2334                         blk_mq_run_hw_queue(hctx, async);
2335         }
2336 }
2337 EXPORT_SYMBOL(blk_mq_run_hw_queues);
2338
2339 /**
2340  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2341  * @q: Pointer to the request queue to run.
2342  * @msecs: Milliseconds of delay to wait before running the queues.
2343  */
2344 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2345 {
2346         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2347         unsigned long i;
2348
2349         sq_hctx = NULL;
2350         if (blk_queue_sq_sched(q))
2351                 sq_hctx = blk_mq_get_sq_hctx(q);
2352         queue_for_each_hw_ctx(q, hctx, i) {
2353                 if (blk_mq_hctx_stopped(hctx))
2354                         continue;
2355                 /*
2356                  * If there is already a run_work pending, leave the
2357                  * pending delay untouched. Otherwise, a hctx can stall
2358                  * if another hctx is re-delaying the other's work
2359                  * before the work executes.
2360                  */
2361                 if (delayed_work_pending(&hctx->run_work))
2362                         continue;
2363                 /*
2364                  * Dispatch from this hctx either if there's no hctx preferred
2365                  * by IO scheduler or if it has requests that bypass the
2366                  * scheduler.
2367                  */
2368                 if (!sq_hctx || sq_hctx == hctx ||
2369                     !list_empty_careful(&hctx->dispatch))
2370                         blk_mq_delay_run_hw_queue(hctx, msecs);
2371         }
2372 }
2373 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2374
2375 /*
2376  * This function is often used for pausing .queue_rq() by driver when
2377  * there isn't enough resource or some conditions aren't satisfied, and
2378  * BLK_STS_RESOURCE is usually returned.
2379  *
2380  * We do not guarantee that dispatch can be drained or blocked
2381  * after blk_mq_stop_hw_queue() returns. Please use
2382  * blk_mq_quiesce_queue() for that requirement.
2383  */
2384 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2385 {
2386         cancel_delayed_work(&hctx->run_work);
2387
2388         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2389 }
2390 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2391
2392 /*
2393  * This function is often used for pausing .queue_rq() by driver when
2394  * there isn't enough resource or some conditions aren't satisfied, and
2395  * BLK_STS_RESOURCE is usually returned.
2396  *
2397  * We do not guarantee that dispatch can be drained or blocked
2398  * after blk_mq_stop_hw_queues() returns. Please use
2399  * blk_mq_quiesce_queue() for that requirement.
2400  */
2401 void blk_mq_stop_hw_queues(struct request_queue *q)
2402 {
2403         struct blk_mq_hw_ctx *hctx;
2404         unsigned long i;
2405
2406         queue_for_each_hw_ctx(q, hctx, i)
2407                 blk_mq_stop_hw_queue(hctx);
2408 }
2409 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2410
2411 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2412 {
2413         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2414
2415         blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
2416 }
2417 EXPORT_SYMBOL(blk_mq_start_hw_queue);
2418
2419 void blk_mq_start_hw_queues(struct request_queue *q)
2420 {
2421         struct blk_mq_hw_ctx *hctx;
2422         unsigned long i;
2423
2424         queue_for_each_hw_ctx(q, hctx, i)
2425                 blk_mq_start_hw_queue(hctx);
2426 }
2427 EXPORT_SYMBOL(blk_mq_start_hw_queues);
2428
2429 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2430 {
2431         if (!blk_mq_hctx_stopped(hctx))
2432                 return;
2433
2434         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2435         blk_mq_run_hw_queue(hctx, async);
2436 }
2437 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2438
2439 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2440 {
2441         struct blk_mq_hw_ctx *hctx;
2442         unsigned long i;
2443
2444         queue_for_each_hw_ctx(q, hctx, i)
2445                 blk_mq_start_stopped_hw_queue(hctx, async ||
2446                                         (hctx->flags & BLK_MQ_F_BLOCKING));
2447 }
2448 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2449
2450 static void blk_mq_run_work_fn(struct work_struct *work)
2451 {
2452         struct blk_mq_hw_ctx *hctx =
2453                 container_of(work, struct blk_mq_hw_ctx, run_work.work);
2454
2455         blk_mq_run_dispatch_ops(hctx->queue,
2456                                 blk_mq_sched_dispatch_requests(hctx));
2457 }
2458
2459 /**
2460  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2461  * @rq: Pointer to request to be inserted.
2462  * @flags: BLK_MQ_INSERT_*
2463  *
2464  * Should only be used carefully, when the caller knows we want to
2465  * bypass a potential IO scheduler on the target device.
2466  */
2467 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
2468 {
2469         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2470
2471         spin_lock(&hctx->lock);
2472         if (flags & BLK_MQ_INSERT_AT_HEAD)
2473                 list_add(&rq->queuelist, &hctx->dispatch);
2474         else
2475                 list_add_tail(&rq->queuelist, &hctx->dispatch);
2476         spin_unlock(&hctx->lock);
2477 }
2478
2479 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
2480                 struct blk_mq_ctx *ctx, struct list_head *list,
2481                 bool run_queue_async)
2482 {
2483         struct request *rq;
2484         enum hctx_type type = hctx->type;
2485
2486         /*
2487          * Try to issue requests directly if the hw queue isn't busy to save an
2488          * extra enqueue & dequeue to the sw queue.
2489          */
2490         if (!hctx->dispatch_busy && !run_queue_async) {
2491                 blk_mq_run_dispatch_ops(hctx->queue,
2492                         blk_mq_try_issue_list_directly(hctx, list));
2493                 if (list_empty(list))
2494                         goto out;
2495         }
2496
2497         /*
2498          * preemption doesn't flush plug list, so it's possible ctx->cpu is
2499          * offline now
2500          */
2501         list_for_each_entry(rq, list, queuelist) {
2502                 BUG_ON(rq->mq_ctx != ctx);
2503                 trace_block_rq_insert(rq);
2504                 if (rq->cmd_flags & REQ_NOWAIT)
2505                         run_queue_async = true;
2506         }
2507
2508         spin_lock(&ctx->lock);
2509         list_splice_tail_init(list, &ctx->rq_lists[type]);
2510         blk_mq_hctx_mark_pending(hctx, ctx);
2511         spin_unlock(&ctx->lock);
2512 out:
2513         blk_mq_run_hw_queue(hctx, run_queue_async);
2514 }
2515
2516 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
2517 {
2518         struct request_queue *q = rq->q;
2519         struct blk_mq_ctx *ctx = rq->mq_ctx;
2520         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2521
2522         if (blk_rq_is_passthrough(rq)) {
2523                 /*
2524                  * Passthrough request have to be added to hctx->dispatch
2525                  * directly.  The device may be in a situation where it can't
2526                  * handle FS request, and always returns BLK_STS_RESOURCE for
2527                  * them, which gets them added to hctx->dispatch.
2528                  *
2529                  * If a passthrough request is required to unblock the queues,
2530                  * and it is added to the scheduler queue, there is no chance to
2531                  * dispatch it given we prioritize requests in hctx->dispatch.
2532                  */
2533                 blk_mq_request_bypass_insert(rq, flags);
2534         } else if (req_op(rq) == REQ_OP_FLUSH) {
2535                 /*
2536                  * Firstly normal IO request is inserted to scheduler queue or
2537                  * sw queue, meantime we add flush request to dispatch queue(
2538                  * hctx->dispatch) directly and there is at most one in-flight
2539                  * flush request for each hw queue, so it doesn't matter to add
2540                  * flush request to tail or front of the dispatch queue.
2541                  *
2542                  * Secondly in case of NCQ, flush request belongs to non-NCQ
2543                  * command, and queueing it will fail when there is any
2544                  * in-flight normal IO request(NCQ command). When adding flush
2545                  * rq to the front of hctx->dispatch, it is easier to introduce
2546                  * extra time to flush rq's latency because of S_SCHED_RESTART
2547                  * compared with adding to the tail of dispatch queue, then
2548                  * chance of flush merge is increased, and less flush requests
2549                  * will be issued to controller. It is observed that ~10% time
2550                  * is saved in blktests block/004 on disk attached to AHCI/NCQ
2551                  * drive when adding flush rq to the front of hctx->dispatch.
2552                  *
2553                  * Simply queue flush rq to the front of hctx->dispatch so that
2554                  * intensive flush workloads can benefit in case of NCQ HW.
2555                  */
2556                 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
2557         } else if (q->elevator) {
2558                 LIST_HEAD(list);
2559
2560                 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
2561
2562                 list_add(&rq->queuelist, &list);
2563                 q->elevator->type->ops.insert_requests(hctx, &list, flags);
2564         } else {
2565                 trace_block_rq_insert(rq);
2566
2567                 spin_lock(&ctx->lock);
2568                 if (flags & BLK_MQ_INSERT_AT_HEAD)
2569                         list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2570                 else
2571                         list_add_tail(&rq->queuelist,
2572                                       &ctx->rq_lists[hctx->type]);
2573                 blk_mq_hctx_mark_pending(hctx, ctx);
2574                 spin_unlock(&ctx->lock);
2575         }
2576 }
2577
2578 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2579                 unsigned int nr_segs)
2580 {
2581         int err;
2582
2583         if (bio->bi_opf & REQ_RAHEAD)
2584                 rq->cmd_flags |= REQ_FAILFAST_MASK;
2585
2586         rq->__sector = bio->bi_iter.bi_sector;
2587         blk_rq_bio_prep(rq, bio, nr_segs);
2588
2589         /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2590         err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2591         WARN_ON_ONCE(err);
2592
2593         blk_account_io_start(rq);
2594 }
2595
2596 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2597                                             struct request *rq, bool last)
2598 {
2599         struct request_queue *q = rq->q;
2600         struct blk_mq_queue_data bd = {
2601                 .rq = rq,
2602                 .last = last,
2603         };
2604         blk_status_t ret;
2605
2606         /*
2607          * For OK queue, we are done. For error, caller may kill it.
2608          * Any other error (busy), just add it to our list as we
2609          * previously would have done.
2610          */
2611         ret = q->mq_ops->queue_rq(hctx, &bd);
2612         switch (ret) {
2613         case BLK_STS_OK:
2614                 blk_mq_update_dispatch_busy(hctx, false);
2615                 break;
2616         case BLK_STS_RESOURCE:
2617         case BLK_STS_DEV_RESOURCE:
2618                 blk_mq_update_dispatch_busy(hctx, true);
2619                 __blk_mq_requeue_request(rq);
2620                 break;
2621         default:
2622                 blk_mq_update_dispatch_busy(hctx, false);
2623                 break;
2624         }
2625
2626         return ret;
2627 }
2628
2629 static bool blk_mq_get_budget_and_tag(struct request *rq)
2630 {
2631         int budget_token;
2632
2633         budget_token = blk_mq_get_dispatch_budget(rq->q);
2634         if (budget_token < 0)
2635                 return false;
2636         blk_mq_set_rq_budget_token(rq, budget_token);
2637         if (!blk_mq_get_driver_tag(rq)) {
2638                 blk_mq_put_dispatch_budget(rq->q, budget_token);
2639                 return false;
2640         }
2641         return true;
2642 }
2643
2644 /**
2645  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2646  * @hctx: Pointer of the associated hardware queue.
2647  * @rq: Pointer to request to be sent.
2648  *
2649  * If the device has enough resources to accept a new request now, send the
2650  * request directly to device driver. Else, insert at hctx->dispatch queue, so
2651  * we can try send it another time in the future. Requests inserted at this
2652  * queue have higher priority.
2653  */
2654 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2655                 struct request *rq)
2656 {
2657         blk_status_t ret;
2658
2659         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2660                 blk_mq_insert_request(rq, 0);
2661                 return;
2662         }
2663
2664         if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
2665                 blk_mq_insert_request(rq, 0);
2666                 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2667                 return;
2668         }
2669
2670         ret = __blk_mq_issue_directly(hctx, rq, true);
2671         switch (ret) {
2672         case BLK_STS_OK:
2673                 break;
2674         case BLK_STS_RESOURCE:
2675         case BLK_STS_DEV_RESOURCE:
2676                 blk_mq_request_bypass_insert(rq, 0);
2677                 blk_mq_run_hw_queue(hctx, false);
2678                 break;
2679         default:
2680                 blk_mq_end_request(rq, ret);
2681                 break;
2682         }
2683 }
2684
2685 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2686 {
2687         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2688
2689         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2690                 blk_mq_insert_request(rq, 0);
2691                 return BLK_STS_OK;
2692         }
2693
2694         if (!blk_mq_get_budget_and_tag(rq))
2695                 return BLK_STS_RESOURCE;
2696         return __blk_mq_issue_directly(hctx, rq, last);
2697 }
2698
2699 static void blk_mq_plug_issue_direct(struct blk_plug *plug)
2700 {
2701         struct blk_mq_hw_ctx *hctx = NULL;
2702         struct request *rq;
2703         int queued = 0;
2704         blk_status_t ret = BLK_STS_OK;
2705
2706         while ((rq = rq_list_pop(&plug->mq_list))) {
2707                 bool last = rq_list_empty(plug->mq_list);
2708
2709                 if (hctx != rq->mq_hctx) {
2710                         if (hctx) {
2711                                 blk_mq_commit_rqs(hctx, queued, false);
2712                                 queued = 0;
2713                         }
2714                         hctx = rq->mq_hctx;
2715                 }
2716
2717                 ret = blk_mq_request_issue_directly(rq, last);
2718                 switch (ret) {
2719                 case BLK_STS_OK:
2720                         queued++;
2721                         break;
2722                 case BLK_STS_RESOURCE:
2723                 case BLK_STS_DEV_RESOURCE:
2724                         blk_mq_request_bypass_insert(rq, 0);
2725                         blk_mq_run_hw_queue(hctx, false);
2726                         goto out;
2727                 default:
2728                         blk_mq_end_request(rq, ret);
2729                         break;
2730                 }
2731         }
2732
2733 out:
2734         if (ret != BLK_STS_OK)
2735                 blk_mq_commit_rqs(hctx, queued, false);
2736 }
2737
2738 static void __blk_mq_flush_plug_list(struct request_queue *q,
2739                                      struct blk_plug *plug)
2740 {
2741         if (blk_queue_quiesced(q))
2742                 return;
2743         q->mq_ops->queue_rqs(&plug->mq_list);
2744 }
2745
2746 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2747 {
2748         struct blk_mq_hw_ctx *this_hctx = NULL;
2749         struct blk_mq_ctx *this_ctx = NULL;
2750         struct request *requeue_list = NULL;
2751         struct request **requeue_lastp = &requeue_list;
2752         unsigned int depth = 0;
2753         bool is_passthrough = false;
2754         LIST_HEAD(list);
2755
2756         do {
2757                 struct request *rq = rq_list_pop(&plug->mq_list);
2758
2759                 if (!this_hctx) {
2760                         this_hctx = rq->mq_hctx;
2761                         this_ctx = rq->mq_ctx;
2762                         is_passthrough = blk_rq_is_passthrough(rq);
2763                 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
2764                            is_passthrough != blk_rq_is_passthrough(rq)) {
2765                         rq_list_add_tail(&requeue_lastp, rq);
2766                         continue;
2767                 }
2768                 list_add(&rq->queuelist, &list);
2769                 depth++;
2770         } while (!rq_list_empty(plug->mq_list));
2771
2772         plug->mq_list = requeue_list;
2773         trace_block_unplug(this_hctx->queue, depth, !from_sched);
2774
2775         percpu_ref_get(&this_hctx->queue->q_usage_counter);
2776         /* passthrough requests should never be issued to the I/O scheduler */
2777         if (is_passthrough) {
2778                 spin_lock(&this_hctx->lock);
2779                 list_splice_tail_init(&list, &this_hctx->dispatch);
2780                 spin_unlock(&this_hctx->lock);
2781                 blk_mq_run_hw_queue(this_hctx, from_sched);
2782         } else if (this_hctx->queue->elevator) {
2783                 this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
2784                                 &list, 0);
2785                 blk_mq_run_hw_queue(this_hctx, from_sched);
2786         } else {
2787                 blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
2788         }
2789         percpu_ref_put(&this_hctx->queue->q_usage_counter);
2790 }
2791
2792 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2793 {
2794         struct request *rq;
2795
2796         /*
2797          * We may have been called recursively midway through handling
2798          * plug->mq_list via a schedule() in the driver's queue_rq() callback.
2799          * To avoid mq_list changing under our feet, clear rq_count early and
2800          * bail out specifically if rq_count is 0 rather than checking
2801          * whether the mq_list is empty.
2802          */
2803         if (plug->rq_count == 0)
2804                 return;
2805         plug->rq_count = 0;
2806
2807         if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2808                 struct request_queue *q;
2809
2810                 rq = rq_list_peek(&plug->mq_list);
2811                 q = rq->q;
2812
2813                 /*
2814                  * Peek first request and see if we have a ->queue_rqs() hook.
2815                  * If we do, we can dispatch the whole plug list in one go. We
2816                  * already know at this point that all requests belong to the
2817                  * same queue, caller must ensure that's the case.
2818                  */
2819                 if (q->mq_ops->queue_rqs) {
2820                         blk_mq_run_dispatch_ops(q,
2821                                 __blk_mq_flush_plug_list(q, plug));
2822                         if (rq_list_empty(plug->mq_list))
2823                                 return;
2824                 }
2825
2826                 blk_mq_run_dispatch_ops(q,
2827                                 blk_mq_plug_issue_direct(plug));
2828                 if (rq_list_empty(plug->mq_list))
2829                         return;
2830         }
2831
2832         do {
2833                 blk_mq_dispatch_plug_list(plug, from_schedule);
2834         } while (!rq_list_empty(plug->mq_list));
2835 }
2836
2837 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2838                 struct list_head *list)
2839 {
2840         int queued = 0;
2841         blk_status_t ret = BLK_STS_OK;
2842
2843         while (!list_empty(list)) {
2844                 struct request *rq = list_first_entry(list, struct request,
2845                                 queuelist);
2846
2847                 list_del_init(&rq->queuelist);
2848                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2849                 switch (ret) {
2850                 case BLK_STS_OK:
2851                         queued++;
2852                         break;
2853                 case BLK_STS_RESOURCE:
2854                 case BLK_STS_DEV_RESOURCE:
2855                         blk_mq_request_bypass_insert(rq, 0);
2856                         if (list_empty(list))
2857                                 blk_mq_run_hw_queue(hctx, false);
2858                         goto out;
2859                 default:
2860                         blk_mq_end_request(rq, ret);
2861                         break;
2862                 }
2863         }
2864
2865 out:
2866         if (ret != BLK_STS_OK)
2867                 blk_mq_commit_rqs(hctx, queued, false);
2868 }
2869
2870 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2871                                      struct bio *bio, unsigned int nr_segs)
2872 {
2873         if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2874                 if (blk_attempt_plug_merge(q, bio, nr_segs))
2875                         return true;
2876                 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2877                         return true;
2878         }
2879         return false;
2880 }
2881
2882 static struct request *blk_mq_get_new_requests(struct request_queue *q,
2883                                                struct blk_plug *plug,
2884                                                struct bio *bio,
2885                                                unsigned int nsegs)
2886 {
2887         struct blk_mq_alloc_data data = {
2888                 .q              = q,
2889                 .nr_tags        = 1,
2890                 .cmd_flags      = bio->bi_opf,
2891         };
2892         struct request *rq;
2893
2894         rq_qos_throttle(q, bio);
2895
2896         if (plug) {
2897                 data.nr_tags = plug->nr_ios;
2898                 plug->nr_ios = 1;
2899                 data.cached_rq = &plug->cached_rq;
2900         }
2901
2902         rq = __blk_mq_alloc_requests(&data);
2903         if (rq)
2904                 return rq;
2905         rq_qos_cleanup(q, bio);
2906         if (bio->bi_opf & REQ_NOWAIT)
2907                 bio_wouldblock_error(bio);
2908         return NULL;
2909 }
2910
2911 /*
2912  * Check if there is a suitable cached request and return it.
2913  */
2914 static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
2915                 struct request_queue *q, blk_opf_t opf)
2916 {
2917         enum hctx_type type = blk_mq_get_hctx_type(opf);
2918         struct request *rq;
2919
2920         if (!plug)
2921                 return NULL;
2922         rq = rq_list_peek(&plug->cached_rq);
2923         if (!rq || rq->q != q)
2924                 return NULL;
2925         if (type != rq->mq_hctx->type &&
2926             (type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT))
2927                 return NULL;
2928         if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
2929                 return NULL;
2930         return rq;
2931 }
2932
2933 static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
2934                 struct bio *bio)
2935 {
2936         WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
2937
2938         /*
2939          * If any qos ->throttle() end up blocking, we will have flushed the
2940          * plug and hence killed the cached_rq list as well. Pop this entry
2941          * before we throttle.
2942          */
2943         plug->cached_rq = rq_list_next(rq);
2944         rq_qos_throttle(rq->q, bio);
2945
2946         blk_mq_rq_time_init(rq, 0);
2947         rq->cmd_flags = bio->bi_opf;
2948         INIT_LIST_HEAD(&rq->queuelist);
2949 }
2950
2951 /**
2952  * blk_mq_submit_bio - Create and send a request to block device.
2953  * @bio: Bio pointer.
2954  *
2955  * Builds up a request structure from @q and @bio and send to the device. The
2956  * request may not be queued directly to hardware if:
2957  * * This request can be merged with another one
2958  * * We want to place request at plug queue for possible future merging
2959  * * There is an IO scheduler active at this queue
2960  *
2961  * It will not queue the request if there is an error with the bio, or at the
2962  * request creation.
2963  */
2964 void blk_mq_submit_bio(struct bio *bio)
2965 {
2966         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2967         struct blk_plug *plug = blk_mq_plug(bio);
2968         const int is_sync = op_is_sync(bio->bi_opf);
2969         struct blk_mq_hw_ctx *hctx;
2970         struct request *rq = NULL;
2971         unsigned int nr_segs = 1;
2972         blk_status_t ret;
2973
2974         bio = blk_queue_bounce(bio, q);
2975
2976         rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
2977         if (rq) {
2978                 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
2979                         bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2980                         if (!bio)
2981                                 return;
2982                 }
2983                 if (!bio_integrity_prep(bio))
2984                         return;
2985                 if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
2986                         return;
2987                 blk_mq_use_cached_rq(rq, plug, bio);
2988                 goto done;
2989         }
2990
2991         if (unlikely(bio_queue_enter(bio)))
2992                 return;
2993         if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
2994                 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2995                 if (!bio)
2996                         goto queue_exit;
2997         }
2998         if (!bio_integrity_prep(bio))
2999                 goto queue_exit;
3000
3001         if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
3002                 goto queue_exit;
3003
3004         rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3005         if (unlikely(!rq))
3006                 goto queue_exit;
3007
3008 done:
3009         trace_block_getrq(bio);
3010
3011         rq_qos_track(q, rq, bio);
3012
3013         blk_mq_bio_to_request(rq, bio, nr_segs);
3014
3015         ret = blk_crypto_rq_get_keyslot(rq);
3016         if (ret != BLK_STS_OK) {
3017                 bio->bi_status = ret;
3018                 bio_endio(bio);
3019                 blk_mq_free_request(rq);
3020                 return;
3021         }
3022
3023         if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3024                 return;
3025
3026         if (plug) {
3027                 blk_add_rq_to_plug(plug, rq);
3028                 return;
3029         }
3030
3031         hctx = rq->mq_hctx;
3032         if ((rq->rq_flags & RQF_USE_SCHED) ||
3033             (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3034                 blk_mq_insert_request(rq, 0);
3035                 blk_mq_run_hw_queue(hctx, true);
3036         } else {
3037                 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3038         }
3039         return;
3040
3041 queue_exit:
3042         blk_queue_exit(q);
3043 }
3044
3045 #ifdef CONFIG_BLK_MQ_STACKING
3046 /**
3047  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
3048  * @rq: the request being queued
3049  */
3050 blk_status_t blk_insert_cloned_request(struct request *rq)
3051 {
3052         struct request_queue *q = rq->q;
3053         unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
3054         unsigned int max_segments = blk_rq_get_max_segments(rq);
3055         blk_status_t ret;
3056
3057         if (blk_rq_sectors(rq) > max_sectors) {
3058                 /*
3059                  * SCSI device does not have a good way to return if
3060                  * Write Same/Zero is actually supported. If a device rejects
3061                  * a non-read/write command (discard, write same,etc.) the
3062                  * low-level device driver will set the relevant queue limit to
3063                  * 0 to prevent blk-lib from issuing more of the offending
3064                  * operations. Commands queued prior to the queue limit being
3065                  * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
3066                  * errors being propagated to upper layers.
3067                  */
3068                 if (max_sectors == 0)
3069                         return BLK_STS_NOTSUPP;
3070
3071                 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
3072                         __func__, blk_rq_sectors(rq), max_sectors);
3073                 return BLK_STS_IOERR;
3074         }
3075
3076         /*
3077          * The queue settings related to segment counting may differ from the
3078          * original queue.
3079          */
3080         rq->nr_phys_segments = blk_recalc_rq_segments(rq);
3081         if (rq->nr_phys_segments > max_segments) {
3082                 printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
3083                         __func__, rq->nr_phys_segments, max_segments);
3084                 return BLK_STS_IOERR;
3085         }
3086
3087         if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3088                 return BLK_STS_IOERR;
3089
3090         ret = blk_crypto_rq_get_keyslot(rq);
3091         if (ret != BLK_STS_OK)
3092                 return ret;
3093
3094         blk_account_io_start(rq);
3095
3096         /*
3097          * Since we have a scheduler attached on the top device,
3098          * bypass a potential scheduler on the bottom device for
3099          * insert.
3100          */
3101         blk_mq_run_dispatch_ops(q,
3102                         ret = blk_mq_request_issue_directly(rq, true));
3103         if (ret)
3104                 blk_account_io_done(rq, ktime_get_ns());
3105         return ret;
3106 }
3107 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
3108
3109 /**
3110  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3111  * @rq: the clone request to be cleaned up
3112  *
3113  * Description:
3114  *     Free all bios in @rq for a cloned request.
3115  */
3116 void blk_rq_unprep_clone(struct request *rq)
3117 {
3118         struct bio *bio;
3119
3120         while ((bio = rq->bio) != NULL) {
3121                 rq->bio = bio->bi_next;
3122
3123                 bio_put(bio);
3124         }
3125 }
3126 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3127
3128 /**
3129  * blk_rq_prep_clone - Helper function to setup clone request
3130  * @rq: the request to be setup
3131  * @rq_src: original request to be cloned
3132  * @bs: bio_set that bios for clone are allocated from
3133  * @gfp_mask: memory allocation mask for bio
3134  * @bio_ctr: setup function to be called for each clone bio.
3135  *           Returns %0 for success, non %0 for failure.
3136  * @data: private data to be passed to @bio_ctr
3137  *
3138  * Description:
3139  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3140  *     Also, pages which the original bios are pointing to are not copied
3141  *     and the cloned bios just point same pages.
3142  *     So cloned bios must be completed before original bios, which means
3143  *     the caller must complete @rq before @rq_src.
3144  */
3145 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3146                       struct bio_set *bs, gfp_t gfp_mask,
3147                       int (*bio_ctr)(struct bio *, struct bio *, void *),
3148                       void *data)
3149 {
3150         struct bio *bio, *bio_src;
3151
3152         if (!bs)
3153                 bs = &fs_bio_set;
3154
3155         __rq_for_each_bio(bio_src, rq_src) {
3156                 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3157                                       bs);
3158                 if (!bio)
3159                         goto free_and_out;
3160
3161                 if (bio_ctr && bio_ctr(bio, bio_src, data))
3162                         goto free_and_out;
3163
3164                 if (rq->bio) {
3165                         rq->biotail->bi_next = bio;
3166                         rq->biotail = bio;
3167                 } else {
3168                         rq->bio = rq->biotail = bio;
3169                 }
3170                 bio = NULL;
3171         }
3172
3173         /* Copy attributes of the original request to the clone request. */
3174         rq->__sector = blk_rq_pos(rq_src);
3175         rq->__data_len = blk_rq_bytes(rq_src);
3176         if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3177                 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3178                 rq->special_vec = rq_src->special_vec;
3179         }
3180         rq->nr_phys_segments = rq_src->nr_phys_segments;
3181         rq->ioprio = rq_src->ioprio;
3182
3183         if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3184                 goto free_and_out;
3185
3186         return 0;
3187
3188 free_and_out:
3189         if (bio)
3190                 bio_put(bio);
3191         blk_rq_unprep_clone(rq);
3192
3193         return -ENOMEM;
3194 }
3195 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3196 #endif /* CONFIG_BLK_MQ_STACKING */
3197
3198 /*
3199  * Steal bios from a request and add them to a bio list.
3200  * The request must not have been partially completed before.
3201  */
3202 void blk_steal_bios(struct bio_list *list, struct request *rq)
3203 {
3204         if (rq->bio) {
3205                 if (list->tail)
3206                         list->tail->bi_next = rq->bio;
3207                 else
3208                         list->head = rq->bio;
3209                 list->tail = rq->biotail;
3210
3211                 rq->bio = NULL;
3212                 rq->biotail = NULL;
3213         }
3214
3215         rq->__data_len = 0;
3216 }
3217 EXPORT_SYMBOL_GPL(blk_steal_bios);
3218
3219 static size_t order_to_size(unsigned int order)
3220 {
3221         return (size_t)PAGE_SIZE << order;
3222 }
3223
3224 /* called before freeing request pool in @tags */
3225 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3226                                     struct blk_mq_tags *tags)
3227 {
3228         struct page *page;
3229         unsigned long flags;
3230
3231         /*
3232          * There is no need to clear mapping if driver tags is not initialized
3233          * or the mapping belongs to the driver tags.
3234          */
3235         if (!drv_tags || drv_tags == tags)
3236                 return;
3237
3238         list_for_each_entry(page, &tags->page_list, lru) {
3239                 unsigned long start = (unsigned long)page_address(page);
3240                 unsigned long end = start + order_to_size(page->private);
3241                 int i;
3242
3243                 for (i = 0; i < drv_tags->nr_tags; i++) {
3244                         struct request *rq = drv_tags->rqs[i];
3245                         unsigned long rq_addr = (unsigned long)rq;
3246
3247                         if (rq_addr >= start && rq_addr < end) {
3248                                 WARN_ON_ONCE(req_ref_read(rq) != 0);
3249                                 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3250                         }
3251                 }
3252         }
3253
3254         /*
3255          * Wait until all pending iteration is done.
3256          *
3257          * Request reference is cleared and it is guaranteed to be observed
3258          * after the ->lock is released.
3259          */
3260         spin_lock_irqsave(&drv_tags->lock, flags);
3261         spin_unlock_irqrestore(&drv_tags->lock, flags);
3262 }
3263
3264 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3265                      unsigned int hctx_idx)
3266 {
3267         struct blk_mq_tags *drv_tags;
3268         struct page *page;
3269
3270         if (list_empty(&tags->page_list))
3271                 return;
3272
3273         if (blk_mq_is_shared_tags(set->flags))
3274                 drv_tags = set->shared_tags;
3275         else
3276                 drv_tags = set->tags[hctx_idx];
3277
3278         if (tags->static_rqs && set->ops->exit_request) {
3279                 int i;
3280
3281                 for (i = 0; i < tags->nr_tags; i++) {
3282                         struct request *rq = tags->static_rqs[i];
3283
3284                         if (!rq)
3285                                 continue;
3286                         set->ops->exit_request(set, rq, hctx_idx);
3287                         tags->static_rqs[i] = NULL;
3288                 }
3289         }
3290
3291         blk_mq_clear_rq_mapping(drv_tags, tags);
3292
3293         while (!list_empty(&tags->page_list)) {
3294                 page = list_first_entry(&tags->page_list, struct page, lru);
3295                 list_del_init(&page->lru);
3296                 /*
3297                  * Remove kmemleak object previously allocated in
3298                  * blk_mq_alloc_rqs().
3299                  */
3300                 kmemleak_free(page_address(page));
3301                 __free_pages(page, page->private);
3302         }
3303 }
3304
3305 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3306 {
3307         kfree(tags->rqs);
3308         tags->rqs = NULL;
3309         kfree(tags->static_rqs);
3310         tags->static_rqs = NULL;
3311
3312         blk_mq_free_tags(tags);
3313 }
3314
3315 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
3316                 unsigned int hctx_idx)
3317 {
3318         int i;
3319
3320         for (i = 0; i < set->nr_maps; i++) {
3321                 unsigned int start = set->map[i].queue_offset;
3322                 unsigned int end = start + set->map[i].nr_queues;
3323
3324                 if (hctx_idx >= start && hctx_idx < end)
3325                         break;
3326         }
3327
3328         if (i >= set->nr_maps)
3329                 i = HCTX_TYPE_DEFAULT;
3330
3331         return i;
3332 }
3333
3334 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
3335                 unsigned int hctx_idx)
3336 {
3337         enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
3338
3339         return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
3340 }
3341
3342 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3343                                                unsigned int hctx_idx,
3344                                                unsigned int nr_tags,
3345                                                unsigned int reserved_tags)
3346 {
3347         int node = blk_mq_get_hctx_node(set, hctx_idx);
3348         struct blk_mq_tags *tags;
3349
3350         if (node == NUMA_NO_NODE)
3351                 node = set->numa_node;
3352
3353         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3354                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3355         if (!tags)
3356                 return NULL;
3357
3358         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3359                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3360                                  node);
3361         if (!tags->rqs)
3362                 goto err_free_tags;
3363
3364         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3365                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3366                                         node);
3367         if (!tags->static_rqs)
3368                 goto err_free_rqs;
3369
3370         return tags;
3371
3372 err_free_rqs:
3373         kfree(tags->rqs);
3374 err_free_tags:
3375         blk_mq_free_tags(tags);
3376         return NULL;
3377 }
3378
3379 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3380                                unsigned int hctx_idx, int node)
3381 {
3382         int ret;
3383
3384         if (set->ops->init_request) {
3385                 ret = set->ops->init_request(set, rq, hctx_idx, node);
3386                 if (ret)
3387                         return ret;
3388         }
3389
3390         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3391         return 0;
3392 }
3393
3394 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3395                             struct blk_mq_tags *tags,
3396                             unsigned int hctx_idx, unsigned int depth)
3397 {
3398         unsigned int i, j, entries_per_page, max_order = 4;
3399         int node = blk_mq_get_hctx_node(set, hctx_idx);
3400         size_t rq_size, left;
3401
3402         if (node == NUMA_NO_NODE)
3403                 node = set->numa_node;
3404
3405         INIT_LIST_HEAD(&tags->page_list);
3406
3407         /*
3408          * rq_size is the size of the request plus driver payload, rounded
3409          * to the cacheline size
3410          */
3411         rq_size = round_up(sizeof(struct request) + set->cmd_size,
3412                                 cache_line_size());
3413         left = rq_size * depth;
3414
3415         for (i = 0; i < depth; ) {
3416                 int this_order = max_order;
3417                 struct page *page;
3418                 int to_do;
3419                 void *p;
3420
3421                 while (this_order && left < order_to_size(this_order - 1))
3422                         this_order--;
3423
3424                 do {
3425                         page = alloc_pages_node(node,
3426                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3427                                 this_order);
3428                         if (page)
3429                                 break;
3430                         if (!this_order--)
3431                                 break;
3432                         if (order_to_size(this_order) < rq_size)
3433                                 break;
3434                 } while (1);
3435
3436                 if (!page)
3437                         goto fail;
3438
3439                 page->private = this_order;
3440                 list_add_tail(&page->lru, &tags->page_list);
3441
3442                 p = page_address(page);
3443                 /*
3444                  * Allow kmemleak to scan these pages as they contain pointers
3445                  * to additional allocations like via ops->init_request().
3446                  */
3447                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3448                 entries_per_page = order_to_size(this_order) / rq_size;
3449                 to_do = min(entries_per_page, depth - i);
3450                 left -= to_do * rq_size;
3451                 for (j = 0; j < to_do; j++) {
3452                         struct request *rq = p;
3453
3454                         tags->static_rqs[i] = rq;
3455                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3456                                 tags->static_rqs[i] = NULL;
3457                                 goto fail;
3458                         }
3459
3460                         p += rq_size;
3461                         i++;
3462                 }
3463         }
3464         return 0;
3465
3466 fail:
3467         blk_mq_free_rqs(set, tags, hctx_idx);
3468         return -ENOMEM;
3469 }
3470
3471 struct rq_iter_data {
3472         struct blk_mq_hw_ctx *hctx;
3473         bool has_rq;
3474 };
3475
3476 static bool blk_mq_has_request(struct request *rq, void *data)
3477 {
3478         struct rq_iter_data *iter_data = data;
3479
3480         if (rq->mq_hctx != iter_data->hctx)
3481                 return true;
3482         iter_data->has_rq = true;
3483         return false;
3484 }
3485
3486 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3487 {
3488         struct blk_mq_tags *tags = hctx->sched_tags ?
3489                         hctx->sched_tags : hctx->tags;
3490         struct rq_iter_data data = {
3491                 .hctx   = hctx,
3492         };
3493
3494         blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3495         return data.has_rq;
3496 }
3497
3498 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3499                 struct blk_mq_hw_ctx *hctx)
3500 {
3501         if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3502                 return false;
3503         if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3504                 return false;
3505         return true;
3506 }
3507
3508 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3509 {
3510         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3511                         struct blk_mq_hw_ctx, cpuhp_online);
3512
3513         if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3514             !blk_mq_last_cpu_in_hctx(cpu, hctx))
3515                 return 0;
3516
3517         /*
3518          * Prevent new request from being allocated on the current hctx.
3519          *
3520          * The smp_mb__after_atomic() Pairs with the implied barrier in
3521          * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
3522          * seen once we return from the tag allocator.
3523          */
3524         set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3525         smp_mb__after_atomic();
3526
3527         /*
3528          * Try to grab a reference to the queue and wait for any outstanding
3529          * requests.  If we could not grab a reference the queue has been
3530          * frozen and there are no requests.
3531          */
3532         if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3533                 while (blk_mq_hctx_has_requests(hctx))
3534                         msleep(5);
3535                 percpu_ref_put(&hctx->queue->q_usage_counter);
3536         }
3537
3538         return 0;
3539 }
3540
3541 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3542 {
3543         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3544                         struct blk_mq_hw_ctx, cpuhp_online);
3545
3546         if (cpumask_test_cpu(cpu, hctx->cpumask))
3547                 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3548         return 0;
3549 }
3550
3551 /*
3552  * 'cpu' is going away. splice any existing rq_list entries from this
3553  * software queue to the hw queue dispatch list, and ensure that it
3554  * gets run.
3555  */
3556 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3557 {
3558         struct blk_mq_hw_ctx *hctx;
3559         struct blk_mq_ctx *ctx;
3560         LIST_HEAD(tmp);
3561         enum hctx_type type;
3562
3563         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3564         if (!cpumask_test_cpu(cpu, hctx->cpumask))
3565                 return 0;
3566
3567         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3568         type = hctx->type;
3569
3570         spin_lock(&ctx->lock);
3571         if (!list_empty(&ctx->rq_lists[type])) {
3572                 list_splice_init(&ctx->rq_lists[type], &tmp);
3573                 blk_mq_hctx_clear_pending(hctx, ctx);
3574         }
3575         spin_unlock(&ctx->lock);
3576
3577         if (list_empty(&tmp))
3578                 return 0;
3579
3580         spin_lock(&hctx->lock);
3581         list_splice_tail_init(&tmp, &hctx->dispatch);
3582         spin_unlock(&hctx->lock);
3583
3584         blk_mq_run_hw_queue(hctx, true);
3585         return 0;
3586 }
3587
3588 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3589 {
3590         if (!(hctx->flags & BLK_MQ_F_STACKING))
3591                 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3592                                                     &hctx->cpuhp_online);
3593         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3594                                             &hctx->cpuhp_dead);
3595 }
3596
3597 /*
3598  * Before freeing hw queue, clearing the flush request reference in
3599  * tags->rqs[] for avoiding potential UAF.
3600  */
3601 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3602                 unsigned int queue_depth, struct request *flush_rq)
3603 {
3604         int i;
3605         unsigned long flags;
3606
3607         /* The hw queue may not be mapped yet */
3608         if (!tags)
3609                 return;
3610
3611         WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3612
3613         for (i = 0; i < queue_depth; i++)
3614                 cmpxchg(&tags->rqs[i], flush_rq, NULL);
3615
3616         /*
3617          * Wait until all pending iteration is done.
3618          *
3619          * Request reference is cleared and it is guaranteed to be observed
3620          * after the ->lock is released.
3621          */
3622         spin_lock_irqsave(&tags->lock, flags);
3623         spin_unlock_irqrestore(&tags->lock, flags);
3624 }
3625
3626 /* hctx->ctxs will be freed in queue's release handler */
3627 static void blk_mq_exit_hctx(struct request_queue *q,
3628                 struct blk_mq_tag_set *set,
3629                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3630 {
3631         struct request *flush_rq = hctx->fq->flush_rq;
3632
3633         if (blk_mq_hw_queue_mapped(hctx))
3634                 blk_mq_tag_idle(hctx);
3635
3636         if (blk_queue_init_done(q))
3637                 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3638                                 set->queue_depth, flush_rq);
3639         if (set->ops->exit_request)
3640                 set->ops->exit_request(set, flush_rq, hctx_idx);
3641
3642         if (set->ops->exit_hctx)
3643                 set->ops->exit_hctx(hctx, hctx_idx);
3644
3645         blk_mq_remove_cpuhp(hctx);
3646
3647         xa_erase(&q->hctx_table, hctx_idx);
3648
3649         spin_lock(&q->unused_hctx_lock);
3650         list_add(&hctx->hctx_list, &q->unused_hctx_list);
3651         spin_unlock(&q->unused_hctx_lock);
3652 }
3653
3654 static void blk_mq_exit_hw_queues(struct request_queue *q,
3655                 struct blk_mq_tag_set *set, int nr_queue)
3656 {
3657         struct blk_mq_hw_ctx *hctx;
3658         unsigned long i;
3659
3660         queue_for_each_hw_ctx(q, hctx, i) {
3661                 if (i == nr_queue)
3662                         break;
3663                 blk_mq_exit_hctx(q, set, hctx, i);
3664         }
3665 }
3666
3667 static int blk_mq_init_hctx(struct request_queue *q,
3668                 struct blk_mq_tag_set *set,
3669                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3670 {
3671         hctx->queue_num = hctx_idx;
3672
3673         if (!(hctx->flags & BLK_MQ_F_STACKING))
3674                 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3675                                 &hctx->cpuhp_online);
3676         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3677
3678         hctx->tags = set->tags[hctx_idx];
3679
3680         if (set->ops->init_hctx &&
3681             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3682                 goto unregister_cpu_notifier;
3683
3684         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3685                                 hctx->numa_node))
3686                 goto exit_hctx;
3687
3688         if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3689                 goto exit_flush_rq;
3690
3691         return 0;
3692
3693  exit_flush_rq:
3694         if (set->ops->exit_request)
3695                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3696  exit_hctx:
3697         if (set->ops->exit_hctx)
3698                 set->ops->exit_hctx(hctx, hctx_idx);
3699  unregister_cpu_notifier:
3700         blk_mq_remove_cpuhp(hctx);
3701         return -1;
3702 }
3703
3704 static struct blk_mq_hw_ctx *
3705 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3706                 int node)
3707 {
3708         struct blk_mq_hw_ctx *hctx;
3709         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3710
3711         hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3712         if (!hctx)
3713                 goto fail_alloc_hctx;
3714
3715         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3716                 goto free_hctx;
3717
3718         atomic_set(&hctx->nr_active, 0);
3719         if (node == NUMA_NO_NODE)
3720                 node = set->numa_node;
3721         hctx->numa_node = node;
3722
3723         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3724         spin_lock_init(&hctx->lock);
3725         INIT_LIST_HEAD(&hctx->dispatch);
3726         hctx->queue = q;
3727         hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3728
3729         INIT_LIST_HEAD(&hctx->hctx_list);
3730
3731         /*
3732          * Allocate space for all possible cpus to avoid allocation at
3733          * runtime
3734          */
3735         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3736                         gfp, node);
3737         if (!hctx->ctxs)
3738                 goto free_cpumask;
3739
3740         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3741                                 gfp, node, false, false))
3742                 goto free_ctxs;
3743         hctx->nr_ctx = 0;
3744
3745         spin_lock_init(&hctx->dispatch_wait_lock);
3746         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3747         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3748
3749         hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3750         if (!hctx->fq)
3751                 goto free_bitmap;
3752
3753         blk_mq_hctx_kobj_init(hctx);
3754
3755         return hctx;
3756
3757  free_bitmap:
3758         sbitmap_free(&hctx->ctx_map);
3759  free_ctxs:
3760         kfree(hctx->ctxs);
3761  free_cpumask:
3762         free_cpumask_var(hctx->cpumask);
3763  free_hctx:
3764         kfree(hctx);
3765  fail_alloc_hctx:
3766         return NULL;
3767 }
3768
3769 static void blk_mq_init_cpu_queues(struct request_queue *q,
3770                                    unsigned int nr_hw_queues)
3771 {
3772         struct blk_mq_tag_set *set = q->tag_set;
3773         unsigned int i, j;
3774
3775         for_each_possible_cpu(i) {
3776                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3777                 struct blk_mq_hw_ctx *hctx;
3778                 int k;
3779
3780                 __ctx->cpu = i;
3781                 spin_lock_init(&__ctx->lock);
3782                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3783                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3784
3785                 __ctx->queue = q;
3786
3787                 /*
3788                  * Set local node, IFF we have more than one hw queue. If
3789                  * not, we remain on the home node of the device
3790                  */
3791                 for (j = 0; j < set->nr_maps; j++) {
3792                         hctx = blk_mq_map_queue_type(q, j, i);
3793                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3794                                 hctx->numa_node = cpu_to_node(i);
3795                 }
3796         }
3797 }
3798
3799 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3800                                              unsigned int hctx_idx,
3801                                              unsigned int depth)
3802 {
3803         struct blk_mq_tags *tags;
3804         int ret;
3805
3806         tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3807         if (!tags)
3808                 return NULL;
3809
3810         ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3811         if (ret) {
3812                 blk_mq_free_rq_map(tags);
3813                 return NULL;
3814         }
3815
3816         return tags;
3817 }
3818
3819 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3820                                        int hctx_idx)
3821 {
3822         if (blk_mq_is_shared_tags(set->flags)) {
3823                 set->tags[hctx_idx] = set->shared_tags;
3824
3825                 return true;
3826         }
3827
3828         set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3829                                                        set->queue_depth);
3830
3831         return set->tags[hctx_idx];
3832 }
3833
3834 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3835                              struct blk_mq_tags *tags,
3836                              unsigned int hctx_idx)
3837 {
3838         if (tags) {
3839                 blk_mq_free_rqs(set, tags, hctx_idx);
3840                 blk_mq_free_rq_map(tags);
3841         }
3842 }
3843
3844 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3845                                       unsigned int hctx_idx)
3846 {
3847         if (!blk_mq_is_shared_tags(set->flags))
3848                 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3849
3850         set->tags[hctx_idx] = NULL;
3851 }
3852
3853 static void blk_mq_map_swqueue(struct request_queue *q)
3854 {
3855         unsigned int j, hctx_idx;
3856         unsigned long i;
3857         struct blk_mq_hw_ctx *hctx;
3858         struct blk_mq_ctx *ctx;
3859         struct blk_mq_tag_set *set = q->tag_set;
3860
3861         queue_for_each_hw_ctx(q, hctx, i) {
3862                 cpumask_clear(hctx->cpumask);
3863                 hctx->nr_ctx = 0;
3864                 hctx->dispatch_from = NULL;
3865         }
3866
3867         /*
3868          * Map software to hardware queues.
3869          *
3870          * If the cpu isn't present, the cpu is mapped to first hctx.
3871          */
3872         for_each_possible_cpu(i) {
3873
3874                 ctx = per_cpu_ptr(q->queue_ctx, i);
3875                 for (j = 0; j < set->nr_maps; j++) {
3876                         if (!set->map[j].nr_queues) {
3877                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3878                                                 HCTX_TYPE_DEFAULT, i);
3879                                 continue;
3880                         }
3881                         hctx_idx = set->map[j].mq_map[i];
3882                         /* unmapped hw queue can be remapped after CPU topo changed */
3883                         if (!set->tags[hctx_idx] &&
3884                             !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3885                                 /*
3886                                  * If tags initialization fail for some hctx,
3887                                  * that hctx won't be brought online.  In this
3888                                  * case, remap the current ctx to hctx[0] which
3889                                  * is guaranteed to always have tags allocated
3890                                  */
3891                                 set->map[j].mq_map[i] = 0;
3892                         }
3893
3894                         hctx = blk_mq_map_queue_type(q, j, i);
3895                         ctx->hctxs[j] = hctx;
3896                         /*
3897                          * If the CPU is already set in the mask, then we've
3898                          * mapped this one already. This can happen if
3899                          * devices share queues across queue maps.
3900                          */
3901                         if (cpumask_test_cpu(i, hctx->cpumask))
3902                                 continue;
3903
3904                         cpumask_set_cpu(i, hctx->cpumask);
3905                         hctx->type = j;
3906                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
3907                         hctx->ctxs[hctx->nr_ctx++] = ctx;
3908
3909                         /*
3910                          * If the nr_ctx type overflows, we have exceeded the
3911                          * amount of sw queues we can support.
3912                          */
3913                         BUG_ON(!hctx->nr_ctx);
3914                 }
3915
3916                 for (; j < HCTX_MAX_TYPES; j++)
3917                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
3918                                         HCTX_TYPE_DEFAULT, i);
3919         }
3920
3921         queue_for_each_hw_ctx(q, hctx, i) {
3922                 /*
3923                  * If no software queues are mapped to this hardware queue,
3924                  * disable it and free the request entries.
3925                  */
3926                 if (!hctx->nr_ctx) {
3927                         /* Never unmap queue 0.  We need it as a
3928                          * fallback in case of a new remap fails
3929                          * allocation
3930                          */
3931                         if (i)
3932                                 __blk_mq_free_map_and_rqs(set, i);
3933
3934                         hctx->tags = NULL;
3935                         continue;
3936                 }
3937
3938                 hctx->tags = set->tags[i];
3939                 WARN_ON(!hctx->tags);
3940
3941                 /*
3942                  * Set the map size to the number of mapped software queues.
3943                  * This is more accurate and more efficient than looping
3944                  * over all possibly mapped software queues.
3945                  */
3946                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3947
3948                 /*
3949                  * Initialize batch roundrobin counts
3950                  */
3951                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3952                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3953         }
3954 }
3955
3956 /*
3957  * Caller needs to ensure that we're either frozen/quiesced, or that
3958  * the queue isn't live yet.
3959  */
3960 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3961 {
3962         struct blk_mq_hw_ctx *hctx;
3963         unsigned long i;
3964
3965         queue_for_each_hw_ctx(q, hctx, i) {
3966                 if (shared) {
3967                         hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3968                 } else {
3969                         blk_mq_tag_idle(hctx);
3970                         hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3971                 }
3972         }
3973 }
3974
3975 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3976                                          bool shared)
3977 {
3978         struct request_queue *q;
3979
3980         lockdep_assert_held(&set->tag_list_lock);
3981
3982         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3983                 blk_mq_freeze_queue(q);
3984                 queue_set_hctx_shared(q, shared);
3985                 blk_mq_unfreeze_queue(q);
3986         }
3987 }
3988
3989 static void blk_mq_del_queue_tag_set(struct request_queue *q)
3990 {
3991         struct blk_mq_tag_set *set = q->tag_set;
3992
3993         mutex_lock(&set->tag_list_lock);
3994         list_del(&q->tag_set_list);
3995         if (list_is_singular(&set->tag_list)) {
3996                 /* just transitioned to unshared */
3997                 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3998                 /* update existing queue */
3999                 blk_mq_update_tag_set_shared(set, false);
4000         }
4001         mutex_unlock(&set->tag_list_lock);
4002         INIT_LIST_HEAD(&q->tag_set_list);
4003 }
4004
4005 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
4006                                      struct request_queue *q)
4007 {
4008         mutex_lock(&set->tag_list_lock);
4009
4010         /*
4011          * Check to see if we're transitioning to shared (from 1 to 2 queues).
4012          */
4013         if (!list_empty(&set->tag_list) &&
4014             !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
4015                 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
4016                 /* update existing queue */
4017                 blk_mq_update_tag_set_shared(set, true);
4018         }
4019         if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
4020                 queue_set_hctx_shared(q, true);
4021         list_add_tail(&q->tag_set_list, &set->tag_list);
4022
4023         mutex_unlock(&set->tag_list_lock);
4024 }
4025
4026 /* All allocations will be freed in release handler of q->mq_kobj */
4027 static int blk_mq_alloc_ctxs(struct request_queue *q)
4028 {
4029         struct blk_mq_ctxs *ctxs;
4030         int cpu;
4031
4032         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
4033         if (!ctxs)
4034                 return -ENOMEM;
4035
4036         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
4037         if (!ctxs->queue_ctx)
4038                 goto fail;
4039
4040         for_each_possible_cpu(cpu) {
4041                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
4042                 ctx->ctxs = ctxs;
4043         }
4044
4045         q->mq_kobj = &ctxs->kobj;
4046         q->queue_ctx = ctxs->queue_ctx;
4047
4048         return 0;
4049  fail:
4050         kfree(ctxs);
4051         return -ENOMEM;
4052 }
4053
4054 /*
4055  * It is the actual release handler for mq, but we do it from
4056  * request queue's release handler for avoiding use-after-free
4057  * and headache because q->mq_kobj shouldn't have been introduced,
4058  * but we can't group ctx/kctx kobj without it.
4059  */
4060 void blk_mq_release(struct request_queue *q)
4061 {
4062         struct blk_mq_hw_ctx *hctx, *next;
4063         unsigned long i;
4064
4065         queue_for_each_hw_ctx(q, hctx, i)
4066                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
4067
4068         /* all hctx are in .unused_hctx_list now */
4069         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
4070                 list_del_init(&hctx->hctx_list);
4071                 kobject_put(&hctx->kobj);
4072         }
4073
4074         xa_destroy(&q->hctx_table);
4075
4076         /*
4077          * release .mq_kobj and sw queue's kobject now because
4078          * both share lifetime with request queue.
4079          */
4080         blk_mq_sysfs_deinit(q);
4081 }
4082
4083 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
4084                 void *queuedata)
4085 {
4086         struct request_queue *q;
4087         int ret;
4088
4089         q = blk_alloc_queue(set->numa_node);
4090         if (!q)
4091                 return ERR_PTR(-ENOMEM);
4092         q->queuedata = queuedata;
4093         ret = blk_mq_init_allocated_queue(set, q);
4094         if (ret) {
4095                 blk_put_queue(q);
4096                 return ERR_PTR(ret);
4097         }
4098         return q;
4099 }
4100
4101 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
4102 {
4103         return blk_mq_init_queue_data(set, NULL);
4104 }
4105 EXPORT_SYMBOL(blk_mq_init_queue);
4106
4107 /**
4108  * blk_mq_destroy_queue - shutdown a request queue
4109  * @q: request queue to shutdown
4110  *
4111  * This shuts down a request queue allocated by blk_mq_init_queue(). All future
4112  * requests will be failed with -ENODEV. The caller is responsible for dropping
4113  * the reference from blk_mq_init_queue() by calling blk_put_queue().
4114  *
4115  * Context: can sleep
4116  */
4117 void blk_mq_destroy_queue(struct request_queue *q)
4118 {
4119         WARN_ON_ONCE(!queue_is_mq(q));
4120         WARN_ON_ONCE(blk_queue_registered(q));
4121
4122         might_sleep();
4123
4124         blk_queue_flag_set(QUEUE_FLAG_DYING, q);
4125         blk_queue_start_drain(q);
4126         blk_mq_freeze_queue_wait(q);
4127
4128         blk_sync_queue(q);
4129         blk_mq_cancel_work_sync(q);
4130         blk_mq_exit_queue(q);
4131 }
4132 EXPORT_SYMBOL(blk_mq_destroy_queue);
4133
4134 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
4135                 struct lock_class_key *lkclass)
4136 {
4137         struct request_queue *q;
4138         struct gendisk *disk;
4139
4140         q = blk_mq_init_queue_data(set, queuedata);
4141         if (IS_ERR(q))
4142                 return ERR_CAST(q);
4143
4144         disk = __alloc_disk_node(q, set->numa_node, lkclass);
4145         if (!disk) {
4146                 blk_mq_destroy_queue(q);
4147                 blk_put_queue(q);
4148                 return ERR_PTR(-ENOMEM);
4149         }
4150         set_bit(GD_OWNS_QUEUE, &disk->state);
4151         return disk;
4152 }
4153 EXPORT_SYMBOL(__blk_mq_alloc_disk);
4154
4155 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4156                 struct lock_class_key *lkclass)
4157 {
4158         struct gendisk *disk;
4159
4160         if (!blk_get_queue(q))
4161                 return NULL;
4162         disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4163         if (!disk)
4164                 blk_put_queue(q);
4165         return disk;
4166 }
4167 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
4168
4169 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
4170                 struct blk_mq_tag_set *set, struct request_queue *q,
4171                 int hctx_idx, int node)
4172 {
4173         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4174
4175         /* reuse dead hctx first */
4176         spin_lock(&q->unused_hctx_lock);
4177         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4178                 if (tmp->numa_node == node) {
4179                         hctx = tmp;
4180                         break;
4181                 }
4182         }
4183         if (hctx)
4184                 list_del_init(&hctx->hctx_list);
4185         spin_unlock(&q->unused_hctx_lock);
4186
4187         if (!hctx)
4188                 hctx = blk_mq_alloc_hctx(q, set, node);
4189         if (!hctx)
4190                 goto fail;
4191
4192         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4193                 goto free_hctx;
4194
4195         return hctx;
4196
4197  free_hctx:
4198         kobject_put(&hctx->kobj);
4199  fail:
4200         return NULL;
4201 }
4202
4203 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4204                                                 struct request_queue *q)
4205 {
4206         struct blk_mq_hw_ctx *hctx;
4207         unsigned long i, j;
4208
4209         /* protect against switching io scheduler  */
4210         mutex_lock(&q->sysfs_lock);
4211         for (i = 0; i < set->nr_hw_queues; i++) {
4212                 int old_node;
4213                 int node = blk_mq_get_hctx_node(set, i);
4214                 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4215
4216                 if (old_hctx) {
4217                         old_node = old_hctx->numa_node;
4218                         blk_mq_exit_hctx(q, set, old_hctx, i);
4219                 }
4220
4221                 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4222                         if (!old_hctx)
4223                                 break;
4224                         pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4225                                         node, old_node);
4226                         hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4227                         WARN_ON_ONCE(!hctx);
4228                 }
4229         }
4230         /*
4231          * Increasing nr_hw_queues fails. Free the newly allocated
4232          * hctxs and keep the previous q->nr_hw_queues.
4233          */
4234         if (i != set->nr_hw_queues) {
4235                 j = q->nr_hw_queues;
4236         } else {
4237                 j = i;
4238                 q->nr_hw_queues = set->nr_hw_queues;
4239         }
4240
4241         xa_for_each_start(&q->hctx_table, j, hctx, j)
4242                 blk_mq_exit_hctx(q, set, hctx, j);
4243         mutex_unlock(&q->sysfs_lock);
4244 }
4245
4246 static void blk_mq_update_poll_flag(struct request_queue *q)
4247 {
4248         struct blk_mq_tag_set *set = q->tag_set;
4249
4250         if (set->nr_maps > HCTX_TYPE_POLL &&
4251             set->map[HCTX_TYPE_POLL].nr_queues)
4252                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4253         else
4254                 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
4255 }
4256
4257 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4258                 struct request_queue *q)
4259 {
4260         /* mark the queue as mq asap */
4261         q->mq_ops = set->ops;
4262
4263         if (blk_mq_alloc_ctxs(q))
4264                 goto err_exit;
4265
4266         /* init q->mq_kobj and sw queues' kobjects */
4267         blk_mq_sysfs_init(q);
4268
4269         INIT_LIST_HEAD(&q->unused_hctx_list);
4270         spin_lock_init(&q->unused_hctx_lock);
4271
4272         xa_init(&q->hctx_table);
4273
4274         blk_mq_realloc_hw_ctxs(set, q);
4275         if (!q->nr_hw_queues)
4276                 goto err_hctxs;
4277
4278         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4279         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4280
4281         q->tag_set = set;
4282
4283         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4284         blk_mq_update_poll_flag(q);
4285
4286         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4287         INIT_LIST_HEAD(&q->flush_list);
4288         INIT_LIST_HEAD(&q->requeue_list);
4289         spin_lock_init(&q->requeue_lock);
4290
4291         q->nr_requests = set->queue_depth;
4292
4293         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4294         blk_mq_add_queue_tag_set(set, q);
4295         blk_mq_map_swqueue(q);
4296         return 0;
4297
4298 err_hctxs:
4299         blk_mq_release(q);
4300 err_exit:
4301         q->mq_ops = NULL;
4302         return -ENOMEM;
4303 }
4304 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4305
4306 /* tags can _not_ be used after returning from blk_mq_exit_queue */
4307 void blk_mq_exit_queue(struct request_queue *q)
4308 {
4309         struct blk_mq_tag_set *set = q->tag_set;
4310
4311         /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4312         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4313         /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4314         blk_mq_del_queue_tag_set(q);
4315 }
4316
4317 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4318 {
4319         int i;
4320
4321         if (blk_mq_is_shared_tags(set->flags)) {
4322                 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4323                                                 BLK_MQ_NO_HCTX_IDX,
4324                                                 set->queue_depth);
4325                 if (!set->shared_tags)
4326                         return -ENOMEM;
4327         }
4328
4329         for (i = 0; i < set->nr_hw_queues; i++) {
4330                 if (!__blk_mq_alloc_map_and_rqs(set, i))
4331                         goto out_unwind;
4332                 cond_resched();
4333         }
4334
4335         return 0;
4336
4337 out_unwind:
4338         while (--i >= 0)
4339                 __blk_mq_free_map_and_rqs(set, i);
4340
4341         if (blk_mq_is_shared_tags(set->flags)) {
4342                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4343                                         BLK_MQ_NO_HCTX_IDX);
4344         }
4345
4346         return -ENOMEM;
4347 }
4348
4349 /*
4350  * Allocate the request maps associated with this tag_set. Note that this
4351  * may reduce the depth asked for, if memory is tight. set->queue_depth
4352  * will be updated to reflect the allocated depth.
4353  */
4354 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4355 {
4356         unsigned int depth;
4357         int err;
4358
4359         depth = set->queue_depth;
4360         do {
4361                 err = __blk_mq_alloc_rq_maps(set);
4362                 if (!err)
4363                         break;
4364
4365                 set->queue_depth >>= 1;
4366                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4367                         err = -ENOMEM;
4368                         break;
4369                 }
4370         } while (set->queue_depth);
4371
4372         if (!set->queue_depth || err) {
4373                 pr_err("blk-mq: failed to allocate request map\n");
4374                 return -ENOMEM;
4375         }
4376
4377         if (depth != set->queue_depth)
4378                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4379                                                 depth, set->queue_depth);
4380
4381         return 0;
4382 }
4383
4384 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4385 {
4386         /*
4387          * blk_mq_map_queues() and multiple .map_queues() implementations
4388          * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4389          * number of hardware queues.
4390          */
4391         if (set->nr_maps == 1)
4392                 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4393
4394         if (set->ops->map_queues && !is_kdump_kernel()) {
4395                 int i;
4396
4397                 /*
4398                  * transport .map_queues is usually done in the following
4399                  * way:
4400                  *
4401                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4402                  *      mask = get_cpu_mask(queue)
4403                  *      for_each_cpu(cpu, mask)
4404                  *              set->map[x].mq_map[cpu] = queue;
4405                  * }
4406                  *
4407                  * When we need to remap, the table has to be cleared for
4408                  * killing stale mapping since one CPU may not be mapped
4409                  * to any hw queue.
4410                  */
4411                 for (i = 0; i < set->nr_maps; i++)
4412                         blk_mq_clear_mq_map(&set->map[i]);
4413
4414                 set->ops->map_queues(set);
4415         } else {
4416                 BUG_ON(set->nr_maps > 1);
4417                 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4418         }
4419 }
4420
4421 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4422                                        int new_nr_hw_queues)
4423 {
4424         struct blk_mq_tags **new_tags;
4425         int i;
4426
4427         if (set->nr_hw_queues >= new_nr_hw_queues)
4428                 goto done;
4429
4430         new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4431                                 GFP_KERNEL, set->numa_node);
4432         if (!new_tags)
4433                 return -ENOMEM;
4434
4435         if (set->tags)
4436                 memcpy(new_tags, set->tags, set->nr_hw_queues *
4437                        sizeof(*set->tags));
4438         kfree(set->tags);
4439         set->tags = new_tags;
4440
4441         for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
4442                 if (!__blk_mq_alloc_map_and_rqs(set, i)) {
4443                         while (--i >= set->nr_hw_queues)
4444                                 __blk_mq_free_map_and_rqs(set, i);
4445                         return -ENOMEM;
4446                 }
4447                 cond_resched();
4448         }
4449
4450 done:
4451         set->nr_hw_queues = new_nr_hw_queues;
4452         return 0;
4453 }
4454
4455 /*
4456  * Alloc a tag set to be associated with one or more request queues.
4457  * May fail with EINVAL for various error conditions. May adjust the
4458  * requested depth down, if it's too large. In that case, the set
4459  * value will be stored in set->queue_depth.
4460  */
4461 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4462 {
4463         int i, ret;
4464
4465         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4466
4467         if (!set->nr_hw_queues)
4468                 return -EINVAL;
4469         if (!set->queue_depth)
4470                 return -EINVAL;
4471         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4472                 return -EINVAL;
4473
4474         if (!set->ops->queue_rq)
4475                 return -EINVAL;
4476
4477         if (!set->ops->get_budget ^ !set->ops->put_budget)
4478                 return -EINVAL;
4479
4480         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4481                 pr_info("blk-mq: reduced tag depth to %u\n",
4482                         BLK_MQ_MAX_DEPTH);
4483                 set->queue_depth = BLK_MQ_MAX_DEPTH;
4484         }
4485
4486         if (!set->nr_maps)
4487                 set->nr_maps = 1;
4488         else if (set->nr_maps > HCTX_MAX_TYPES)
4489                 return -EINVAL;
4490
4491         /*
4492          * If a crashdump is active, then we are potentially in a very
4493          * memory constrained environment. Limit us to 1 queue and
4494          * 64 tags to prevent using too much memory.
4495          */
4496         if (is_kdump_kernel()) {
4497                 set->nr_hw_queues = 1;
4498                 set->nr_maps = 1;
4499                 set->queue_depth = min(64U, set->queue_depth);
4500         }
4501         /*
4502          * There is no use for more h/w queues than cpus if we just have
4503          * a single map
4504          */
4505         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4506                 set->nr_hw_queues = nr_cpu_ids;
4507
4508         if (set->flags & BLK_MQ_F_BLOCKING) {
4509                 set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
4510                 if (!set->srcu)
4511                         return -ENOMEM;
4512                 ret = init_srcu_struct(set->srcu);
4513                 if (ret)
4514                         goto out_free_srcu;
4515         }
4516
4517         ret = -ENOMEM;
4518         set->tags = kcalloc_node(set->nr_hw_queues,
4519                                  sizeof(struct blk_mq_tags *), GFP_KERNEL,
4520                                  set->numa_node);
4521         if (!set->tags)
4522                 goto out_cleanup_srcu;
4523
4524         for (i = 0; i < set->nr_maps; i++) {
4525                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4526                                                   sizeof(set->map[i].mq_map[0]),
4527                                                   GFP_KERNEL, set->numa_node);
4528                 if (!set->map[i].mq_map)
4529                         goto out_free_mq_map;
4530                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4531         }
4532
4533         blk_mq_update_queue_map(set);
4534
4535         ret = blk_mq_alloc_set_map_and_rqs(set);
4536         if (ret)
4537                 goto out_free_mq_map;
4538
4539         mutex_init(&set->tag_list_lock);
4540         INIT_LIST_HEAD(&set->tag_list);
4541
4542         return 0;
4543
4544 out_free_mq_map:
4545         for (i = 0; i < set->nr_maps; i++) {
4546                 kfree(set->map[i].mq_map);
4547                 set->map[i].mq_map = NULL;
4548         }
4549         kfree(set->tags);
4550         set->tags = NULL;
4551 out_cleanup_srcu:
4552         if (set->flags & BLK_MQ_F_BLOCKING)
4553                 cleanup_srcu_struct(set->srcu);
4554 out_free_srcu:
4555         if (set->flags & BLK_MQ_F_BLOCKING)
4556                 kfree(set->srcu);
4557         return ret;
4558 }
4559 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4560
4561 /* allocate and initialize a tagset for a simple single-queue device */
4562 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4563                 const struct blk_mq_ops *ops, unsigned int queue_depth,
4564                 unsigned int set_flags)
4565 {
4566         memset(set, 0, sizeof(*set));
4567         set->ops = ops;
4568         set->nr_hw_queues = 1;
4569         set->nr_maps = 1;
4570         set->queue_depth = queue_depth;
4571         set->numa_node = NUMA_NO_NODE;
4572         set->flags = set_flags;
4573         return blk_mq_alloc_tag_set(set);
4574 }
4575 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4576
4577 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4578 {
4579         int i, j;
4580
4581         for (i = 0; i < set->nr_hw_queues; i++)
4582                 __blk_mq_free_map_and_rqs(set, i);
4583
4584         if (blk_mq_is_shared_tags(set->flags)) {
4585                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4586                                         BLK_MQ_NO_HCTX_IDX);
4587         }
4588
4589         for (j = 0; j < set->nr_maps; j++) {
4590                 kfree(set->map[j].mq_map);
4591                 set->map[j].mq_map = NULL;
4592         }
4593
4594         kfree(set->tags);
4595         set->tags = NULL;
4596         if (set->flags & BLK_MQ_F_BLOCKING) {
4597                 cleanup_srcu_struct(set->srcu);
4598                 kfree(set->srcu);
4599         }
4600 }
4601 EXPORT_SYMBOL(blk_mq_free_tag_set);
4602
4603 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4604 {
4605         struct blk_mq_tag_set *set = q->tag_set;
4606         struct blk_mq_hw_ctx *hctx;
4607         int ret;
4608         unsigned long i;
4609
4610         if (!set)
4611                 return -EINVAL;
4612
4613         if (q->nr_requests == nr)
4614                 return 0;
4615
4616         blk_mq_freeze_queue(q);
4617         blk_mq_quiesce_queue(q);
4618
4619         ret = 0;
4620         queue_for_each_hw_ctx(q, hctx, i) {
4621                 if (!hctx->tags)
4622                         continue;
4623                 /*
4624                  * If we're using an MQ scheduler, just update the scheduler
4625                  * queue depth. This is similar to what the old code would do.
4626                  */
4627                 if (hctx->sched_tags) {
4628                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4629                                                       nr, true);
4630                 } else {
4631                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4632                                                       false);
4633                 }
4634                 if (ret)
4635                         break;
4636                 if (q->elevator && q->elevator->type->ops.depth_updated)
4637                         q->elevator->type->ops.depth_updated(hctx);
4638         }
4639         if (!ret) {
4640                 q->nr_requests = nr;
4641                 if (blk_mq_is_shared_tags(set->flags)) {
4642                         if (q->elevator)
4643                                 blk_mq_tag_update_sched_shared_tags(q);
4644                         else
4645                                 blk_mq_tag_resize_shared_tags(set, nr);
4646                 }
4647         }
4648
4649         blk_mq_unquiesce_queue(q);
4650         blk_mq_unfreeze_queue(q);
4651
4652         return ret;
4653 }
4654
4655 /*
4656  * request_queue and elevator_type pair.
4657  * It is just used by __blk_mq_update_nr_hw_queues to cache
4658  * the elevator_type associated with a request_queue.
4659  */
4660 struct blk_mq_qe_pair {
4661         struct list_head node;
4662         struct request_queue *q;
4663         struct elevator_type *type;
4664 };
4665
4666 /*
4667  * Cache the elevator_type in qe pair list and switch the
4668  * io scheduler to 'none'
4669  */
4670 static bool blk_mq_elv_switch_none(struct list_head *head,
4671                 struct request_queue *q)
4672 {
4673         struct blk_mq_qe_pair *qe;
4674
4675         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4676         if (!qe)
4677                 return false;
4678
4679         /* q->elevator needs protection from ->sysfs_lock */
4680         mutex_lock(&q->sysfs_lock);
4681
4682         /* the check has to be done with holding sysfs_lock */
4683         if (!q->elevator) {
4684                 kfree(qe);
4685                 goto unlock;
4686         }
4687
4688         INIT_LIST_HEAD(&qe->node);
4689         qe->q = q;
4690         qe->type = q->elevator->type;
4691         /* keep a reference to the elevator module as we'll switch back */
4692         __elevator_get(qe->type);
4693         list_add(&qe->node, head);
4694         elevator_disable(q);
4695 unlock:
4696         mutex_unlock(&q->sysfs_lock);
4697
4698         return true;
4699 }
4700
4701 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4702                                                 struct request_queue *q)
4703 {
4704         struct blk_mq_qe_pair *qe;
4705
4706         list_for_each_entry(qe, head, node)
4707                 if (qe->q == q)
4708                         return qe;
4709
4710         return NULL;
4711 }
4712
4713 static void blk_mq_elv_switch_back(struct list_head *head,
4714                                   struct request_queue *q)
4715 {
4716         struct blk_mq_qe_pair *qe;
4717         struct elevator_type *t;
4718
4719         qe = blk_lookup_qe_pair(head, q);
4720         if (!qe)
4721                 return;
4722         t = qe->type;
4723         list_del(&qe->node);
4724         kfree(qe);
4725
4726         mutex_lock(&q->sysfs_lock);
4727         elevator_switch(q, t);
4728         /* drop the reference acquired in blk_mq_elv_switch_none */
4729         elevator_put(t);
4730         mutex_unlock(&q->sysfs_lock);
4731 }
4732
4733 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4734                                                         int nr_hw_queues)
4735 {
4736         struct request_queue *q;
4737         LIST_HEAD(head);
4738         int prev_nr_hw_queues = set->nr_hw_queues;
4739         int i;
4740
4741         lockdep_assert_held(&set->tag_list_lock);
4742
4743         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4744                 nr_hw_queues = nr_cpu_ids;
4745         if (nr_hw_queues < 1)
4746                 return;
4747         if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4748                 return;
4749
4750         list_for_each_entry(q, &set->tag_list, tag_set_list)
4751                 blk_mq_freeze_queue(q);
4752         /*
4753          * Switch IO scheduler to 'none', cleaning up the data associated
4754          * with the previous scheduler. We will switch back once we are done
4755          * updating the new sw to hw queue mappings.
4756          */
4757         list_for_each_entry(q, &set->tag_list, tag_set_list)
4758                 if (!blk_mq_elv_switch_none(&head, q))
4759                         goto switch_back;
4760
4761         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4762                 blk_mq_debugfs_unregister_hctxs(q);
4763                 blk_mq_sysfs_unregister_hctxs(q);
4764         }
4765
4766         if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
4767                 goto reregister;
4768
4769 fallback:
4770         blk_mq_update_queue_map(set);
4771         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4772                 blk_mq_realloc_hw_ctxs(set, q);
4773                 blk_mq_update_poll_flag(q);
4774                 if (q->nr_hw_queues != set->nr_hw_queues) {
4775                         int i = prev_nr_hw_queues;
4776
4777                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4778                                         nr_hw_queues, prev_nr_hw_queues);
4779                         for (; i < set->nr_hw_queues; i++)
4780                                 __blk_mq_free_map_and_rqs(set, i);
4781
4782                         set->nr_hw_queues = prev_nr_hw_queues;
4783                         goto fallback;
4784                 }
4785                 blk_mq_map_swqueue(q);
4786         }
4787
4788 reregister:
4789         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4790                 blk_mq_sysfs_register_hctxs(q);
4791                 blk_mq_debugfs_register_hctxs(q);
4792         }
4793
4794 switch_back:
4795         list_for_each_entry(q, &set->tag_list, tag_set_list)
4796                 blk_mq_elv_switch_back(&head, q);
4797
4798         list_for_each_entry(q, &set->tag_list, tag_set_list)
4799                 blk_mq_unfreeze_queue(q);
4800
4801         /* Free the excess tags when nr_hw_queues shrink. */
4802         for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
4803                 __blk_mq_free_map_and_rqs(set, i);
4804 }
4805
4806 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4807 {
4808         mutex_lock(&set->tag_list_lock);
4809         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4810         mutex_unlock(&set->tag_list_lock);
4811 }
4812 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4813
4814 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4815                          struct io_comp_batch *iob, unsigned int flags)
4816 {
4817         long state = get_current_state();
4818         int ret;
4819
4820         do {
4821                 ret = q->mq_ops->poll(hctx, iob);
4822                 if (ret > 0) {
4823                         __set_current_state(TASK_RUNNING);
4824                         return ret;
4825                 }
4826
4827                 if (signal_pending_state(state, current))
4828                         __set_current_state(TASK_RUNNING);
4829                 if (task_is_running(current))
4830                         return 1;
4831
4832                 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4833                         break;
4834                 cpu_relax();
4835         } while (!need_resched());
4836
4837         __set_current_state(TASK_RUNNING);
4838         return 0;
4839 }
4840
4841 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
4842                 struct io_comp_batch *iob, unsigned int flags)
4843 {
4844         struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4845
4846         return blk_hctx_poll(q, hctx, iob, flags);
4847 }
4848
4849 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4850                 unsigned int poll_flags)
4851 {
4852         struct request_queue *q = rq->q;
4853         int ret;
4854
4855         if (!blk_rq_is_poll(rq))
4856                 return 0;
4857         if (!percpu_ref_tryget(&q->q_usage_counter))
4858                 return 0;
4859
4860         ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4861         blk_queue_exit(q);
4862
4863         return ret;
4864 }
4865 EXPORT_SYMBOL_GPL(blk_rq_poll);
4866
4867 unsigned int blk_mq_rq_cpu(struct request *rq)
4868 {
4869         return rq->mq_ctx->cpu;
4870 }
4871 EXPORT_SYMBOL(blk_mq_rq_cpu);
4872
4873 void blk_mq_cancel_work_sync(struct request_queue *q)
4874 {
4875         struct blk_mq_hw_ctx *hctx;
4876         unsigned long i;
4877
4878         cancel_delayed_work_sync(&q->requeue_work);
4879
4880         queue_for_each_hw_ctx(q, hctx, i)
4881                 cancel_delayed_work_sync(&hctx->run_work);
4882 }
4883
4884 static int __init blk_mq_init(void)
4885 {
4886         int i;
4887
4888         for_each_possible_cpu(i)
4889                 init_llist_head(&per_cpu(blk_cpu_done, i));
4890         for_each_possible_cpu(i)
4891                 INIT_CSD(&per_cpu(blk_cpu_csd, i),
4892                          __blk_mq_complete_request_remote, NULL);
4893         open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4894
4895         cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4896                                   "block/softirq:dead", NULL,
4897                                   blk_softirq_cpu_dead);
4898         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4899                                 blk_mq_hctx_notify_dead);
4900         cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4901                                 blk_mq_hctx_notify_online,
4902                                 blk_mq_hctx_notify_offline);
4903         return 0;
4904 }
4905 subsys_initcall(blk_mq_init);