Merge tag 'iio-fixes-for-5.17a' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / block / blk-mq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/kmemleak.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/llist.h>
22 #include <linux/cpu.h>
23 #include <linux/cache.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/sched/topology.h>
26 #include <linux/sched/signal.h>
27 #include <linux/delay.h>
28 #include <linux/crash_dump.h>
29 #include <linux/prefetch.h>
30 #include <linux/blk-crypto.h>
31 #include <linux/part_stat.h>
32
33 #include <trace/events/block.h>
34
35 #include <linux/blk-mq.h>
36 #include <linux/t10-pi.h>
37 #include "blk.h"
38 #include "blk-mq.h"
39 #include "blk-mq-debugfs.h"
40 #include "blk-mq-tag.h"
41 #include "blk-pm.h"
42 #include "blk-stat.h"
43 #include "blk-mq-sched.h"
44 #include "blk-rq-qos.h"
45
46 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
47
48 static void blk_mq_poll_stats_start(struct request_queue *q);
49 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
50
51 static int blk_mq_poll_stats_bkt(const struct request *rq)
52 {
53         int ddir, sectors, bucket;
54
55         ddir = rq_data_dir(rq);
56         sectors = blk_rq_stats_sectors(rq);
57
58         bucket = ddir + 2 * ilog2(sectors);
59
60         if (bucket < 0)
61                 return -1;
62         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
63                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
64
65         return bucket;
66 }
67
68 #define BLK_QC_T_SHIFT          16
69 #define BLK_QC_T_INTERNAL       (1U << 31)
70
71 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
72                 blk_qc_t qc)
73 {
74         return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT];
75 }
76
77 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
78                 blk_qc_t qc)
79 {
80         unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);
81
82         if (qc & BLK_QC_T_INTERNAL)
83                 return blk_mq_tag_to_rq(hctx->sched_tags, tag);
84         return blk_mq_tag_to_rq(hctx->tags, tag);
85 }
86
87 static inline blk_qc_t blk_rq_to_qc(struct request *rq)
88 {
89         return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
90                 (rq->tag != -1 ?
91                  rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
92 }
93
94 /*
95  * Check if any of the ctx, dispatch list or elevator
96  * have pending work in this hardware queue.
97  */
98 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
99 {
100         return !list_empty_careful(&hctx->dispatch) ||
101                 sbitmap_any_bit_set(&hctx->ctx_map) ||
102                         blk_mq_sched_has_work(hctx);
103 }
104
105 /*
106  * Mark this ctx as having pending work in this hardware queue
107  */
108 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
109                                      struct blk_mq_ctx *ctx)
110 {
111         const int bit = ctx->index_hw[hctx->type];
112
113         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
114                 sbitmap_set_bit(&hctx->ctx_map, bit);
115 }
116
117 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
118                                       struct blk_mq_ctx *ctx)
119 {
120         const int bit = ctx->index_hw[hctx->type];
121
122         sbitmap_clear_bit(&hctx->ctx_map, bit);
123 }
124
125 struct mq_inflight {
126         struct block_device *part;
127         unsigned int inflight[2];
128 };
129
130 static bool blk_mq_check_inflight(struct request *rq, void *priv,
131                                   bool reserved)
132 {
133         struct mq_inflight *mi = priv;
134
135         if ((!mi->part->bd_partno || rq->part == mi->part) &&
136             blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
137                 mi->inflight[rq_data_dir(rq)]++;
138
139         return true;
140 }
141
142 unsigned int blk_mq_in_flight(struct request_queue *q,
143                 struct block_device *part)
144 {
145         struct mq_inflight mi = { .part = part };
146
147         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
148
149         return mi.inflight[0] + mi.inflight[1];
150 }
151
152 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
153                 unsigned int inflight[2])
154 {
155         struct mq_inflight mi = { .part = part };
156
157         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
158         inflight[0] = mi.inflight[0];
159         inflight[1] = mi.inflight[1];
160 }
161
162 void blk_freeze_queue_start(struct request_queue *q)
163 {
164         mutex_lock(&q->mq_freeze_lock);
165         if (++q->mq_freeze_depth == 1) {
166                 percpu_ref_kill(&q->q_usage_counter);
167                 mutex_unlock(&q->mq_freeze_lock);
168                 if (queue_is_mq(q))
169                         blk_mq_run_hw_queues(q, false);
170         } else {
171                 mutex_unlock(&q->mq_freeze_lock);
172         }
173 }
174 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
175
176 void blk_mq_freeze_queue_wait(struct request_queue *q)
177 {
178         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
179 }
180 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
181
182 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
183                                      unsigned long timeout)
184 {
185         return wait_event_timeout(q->mq_freeze_wq,
186                                         percpu_ref_is_zero(&q->q_usage_counter),
187                                         timeout);
188 }
189 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
190
191 /*
192  * Guarantee no request is in use, so we can change any data structure of
193  * the queue afterward.
194  */
195 void blk_freeze_queue(struct request_queue *q)
196 {
197         /*
198          * In the !blk_mq case we are only calling this to kill the
199          * q_usage_counter, otherwise this increases the freeze depth
200          * and waits for it to return to zero.  For this reason there is
201          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
202          * exported to drivers as the only user for unfreeze is blk_mq.
203          */
204         blk_freeze_queue_start(q);
205         blk_mq_freeze_queue_wait(q);
206 }
207
208 void blk_mq_freeze_queue(struct request_queue *q)
209 {
210         /*
211          * ...just an alias to keep freeze and unfreeze actions balanced
212          * in the blk_mq_* namespace
213          */
214         blk_freeze_queue(q);
215 }
216 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
217
218 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
219 {
220         mutex_lock(&q->mq_freeze_lock);
221         if (force_atomic)
222                 q->q_usage_counter.data->force_atomic = true;
223         q->mq_freeze_depth--;
224         WARN_ON_ONCE(q->mq_freeze_depth < 0);
225         if (!q->mq_freeze_depth) {
226                 percpu_ref_resurrect(&q->q_usage_counter);
227                 wake_up_all(&q->mq_freeze_wq);
228         }
229         mutex_unlock(&q->mq_freeze_lock);
230 }
231
232 void blk_mq_unfreeze_queue(struct request_queue *q)
233 {
234         __blk_mq_unfreeze_queue(q, false);
235 }
236 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
237
238 /*
239  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
240  * mpt3sas driver such that this function can be removed.
241  */
242 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
243 {
244         unsigned long flags;
245
246         spin_lock_irqsave(&q->queue_lock, flags);
247         if (!q->quiesce_depth++)
248                 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
249         spin_unlock_irqrestore(&q->queue_lock, flags);
250 }
251 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
252
253 /**
254  * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
255  * @q: request queue.
256  *
257  * Note: it is driver's responsibility for making sure that quiesce has
258  * been started.
259  */
260 void blk_mq_wait_quiesce_done(struct request_queue *q)
261 {
262         if (blk_queue_has_srcu(q))
263                 synchronize_srcu(q->srcu);
264         else
265                 synchronize_rcu();
266 }
267 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
268
269 /**
270  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
271  * @q: request queue.
272  *
273  * Note: this function does not prevent that the struct request end_io()
274  * callback function is invoked. Once this function is returned, we make
275  * sure no dispatch can happen until the queue is unquiesced via
276  * blk_mq_unquiesce_queue().
277  */
278 void blk_mq_quiesce_queue(struct request_queue *q)
279 {
280         blk_mq_quiesce_queue_nowait(q);
281         blk_mq_wait_quiesce_done(q);
282 }
283 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
284
285 /*
286  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
287  * @q: request queue.
288  *
289  * This function recovers queue into the state before quiescing
290  * which is done by blk_mq_quiesce_queue.
291  */
292 void blk_mq_unquiesce_queue(struct request_queue *q)
293 {
294         unsigned long flags;
295         bool run_queue = false;
296
297         spin_lock_irqsave(&q->queue_lock, flags);
298         if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
299                 ;
300         } else if (!--q->quiesce_depth) {
301                 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
302                 run_queue = true;
303         }
304         spin_unlock_irqrestore(&q->queue_lock, flags);
305
306         /* dispatch requests which are inserted during quiescing */
307         if (run_queue)
308                 blk_mq_run_hw_queues(q, true);
309 }
310 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
311
312 void blk_mq_wake_waiters(struct request_queue *q)
313 {
314         struct blk_mq_hw_ctx *hctx;
315         unsigned int i;
316
317         queue_for_each_hw_ctx(q, hctx, i)
318                 if (blk_mq_hw_queue_mapped(hctx))
319                         blk_mq_tag_wakeup_all(hctx->tags, true);
320 }
321
322 void blk_rq_init(struct request_queue *q, struct request *rq)
323 {
324         memset(rq, 0, sizeof(*rq));
325
326         INIT_LIST_HEAD(&rq->queuelist);
327         rq->q = q;
328         rq->__sector = (sector_t) -1;
329         INIT_HLIST_NODE(&rq->hash);
330         RB_CLEAR_NODE(&rq->rb_node);
331         rq->tag = BLK_MQ_NO_TAG;
332         rq->internal_tag = BLK_MQ_NO_TAG;
333         rq->start_time_ns = ktime_get_ns();
334         rq->part = NULL;
335         blk_crypto_rq_set_defaults(rq);
336 }
337 EXPORT_SYMBOL(blk_rq_init);
338
339 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
340                 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
341 {
342         struct blk_mq_ctx *ctx = data->ctx;
343         struct blk_mq_hw_ctx *hctx = data->hctx;
344         struct request_queue *q = data->q;
345         struct request *rq = tags->static_rqs[tag];
346
347         rq->q = q;
348         rq->mq_ctx = ctx;
349         rq->mq_hctx = hctx;
350         rq->cmd_flags = data->cmd_flags;
351
352         if (data->flags & BLK_MQ_REQ_PM)
353                 data->rq_flags |= RQF_PM;
354         if (blk_queue_io_stat(q))
355                 data->rq_flags |= RQF_IO_STAT;
356         rq->rq_flags = data->rq_flags;
357
358         if (!(data->rq_flags & RQF_ELV)) {
359                 rq->tag = tag;
360                 rq->internal_tag = BLK_MQ_NO_TAG;
361         } else {
362                 rq->tag = BLK_MQ_NO_TAG;
363                 rq->internal_tag = tag;
364         }
365         rq->timeout = 0;
366
367         if (blk_mq_need_time_stamp(rq))
368                 rq->start_time_ns = ktime_get_ns();
369         else
370                 rq->start_time_ns = 0;
371         rq->part = NULL;
372 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
373         rq->alloc_time_ns = alloc_time_ns;
374 #endif
375         rq->io_start_time_ns = 0;
376         rq->stats_sectors = 0;
377         rq->nr_phys_segments = 0;
378 #if defined(CONFIG_BLK_DEV_INTEGRITY)
379         rq->nr_integrity_segments = 0;
380 #endif
381         rq->end_io = NULL;
382         rq->end_io_data = NULL;
383
384         blk_crypto_rq_set_defaults(rq);
385         INIT_LIST_HEAD(&rq->queuelist);
386         /* tag was already set */
387         WRITE_ONCE(rq->deadline, 0);
388         req_ref_set(rq, 1);
389
390         if (rq->rq_flags & RQF_ELV) {
391                 struct elevator_queue *e = data->q->elevator;
392
393                 INIT_HLIST_NODE(&rq->hash);
394                 RB_CLEAR_NODE(&rq->rb_node);
395
396                 if (!op_is_flush(data->cmd_flags) &&
397                     e->type->ops.prepare_request) {
398                         e->type->ops.prepare_request(rq);
399                         rq->rq_flags |= RQF_ELVPRIV;
400                 }
401         }
402
403         return rq;
404 }
405
406 static inline struct request *
407 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
408                 u64 alloc_time_ns)
409 {
410         unsigned int tag, tag_offset;
411         struct blk_mq_tags *tags;
412         struct request *rq;
413         unsigned long tag_mask;
414         int i, nr = 0;
415
416         tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
417         if (unlikely(!tag_mask))
418                 return NULL;
419
420         tags = blk_mq_tags_from_data(data);
421         for (i = 0; tag_mask; i++) {
422                 if (!(tag_mask & (1UL << i)))
423                         continue;
424                 tag = tag_offset + i;
425                 prefetch(tags->static_rqs[tag]);
426                 tag_mask &= ~(1UL << i);
427                 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
428                 rq_list_add(data->cached_rq, rq);
429                 nr++;
430         }
431         /* caller already holds a reference, add for remainder */
432         percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
433         data->nr_tags -= nr;
434
435         return rq_list_pop(data->cached_rq);
436 }
437
438 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
439 {
440         struct request_queue *q = data->q;
441         u64 alloc_time_ns = 0;
442         struct request *rq;
443         unsigned int tag;
444
445         /* alloc_time includes depth and tag waits */
446         if (blk_queue_rq_alloc_time(q))
447                 alloc_time_ns = ktime_get_ns();
448
449         if (data->cmd_flags & REQ_NOWAIT)
450                 data->flags |= BLK_MQ_REQ_NOWAIT;
451
452         if (q->elevator) {
453                 struct elevator_queue *e = q->elevator;
454
455                 data->rq_flags |= RQF_ELV;
456
457                 /*
458                  * Flush/passthrough requests are special and go directly to the
459                  * dispatch list. Don't include reserved tags in the
460                  * limiting, as it isn't useful.
461                  */
462                 if (!op_is_flush(data->cmd_flags) &&
463                     !blk_op_is_passthrough(data->cmd_flags) &&
464                     e->type->ops.limit_depth &&
465                     !(data->flags & BLK_MQ_REQ_RESERVED))
466                         e->type->ops.limit_depth(data->cmd_flags, data);
467         }
468
469 retry:
470         data->ctx = blk_mq_get_ctx(q);
471         data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
472         if (!(data->rq_flags & RQF_ELV))
473                 blk_mq_tag_busy(data->hctx);
474
475         /*
476          * Try batched alloc if we want more than 1 tag.
477          */
478         if (data->nr_tags > 1) {
479                 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
480                 if (rq)
481                         return rq;
482                 data->nr_tags = 1;
483         }
484
485         /*
486          * Waiting allocations only fail because of an inactive hctx.  In that
487          * case just retry the hctx assignment and tag allocation as CPU hotplug
488          * should have migrated us to an online CPU by now.
489          */
490         tag = blk_mq_get_tag(data);
491         if (tag == BLK_MQ_NO_TAG) {
492                 if (data->flags & BLK_MQ_REQ_NOWAIT)
493                         return NULL;
494                 /*
495                  * Give up the CPU and sleep for a random short time to
496                  * ensure that thread using a realtime scheduling class
497                  * are migrated off the CPU, and thus off the hctx that
498                  * is going away.
499                  */
500                 msleep(3);
501                 goto retry;
502         }
503
504         return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
505                                         alloc_time_ns);
506 }
507
508 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
509                 blk_mq_req_flags_t flags)
510 {
511         struct blk_mq_alloc_data data = {
512                 .q              = q,
513                 .flags          = flags,
514                 .cmd_flags      = op,
515                 .nr_tags        = 1,
516         };
517         struct request *rq;
518         int ret;
519
520         ret = blk_queue_enter(q, flags);
521         if (ret)
522                 return ERR_PTR(ret);
523
524         rq = __blk_mq_alloc_requests(&data);
525         if (!rq)
526                 goto out_queue_exit;
527         rq->__data_len = 0;
528         rq->__sector = (sector_t) -1;
529         rq->bio = rq->biotail = NULL;
530         return rq;
531 out_queue_exit:
532         blk_queue_exit(q);
533         return ERR_PTR(-EWOULDBLOCK);
534 }
535 EXPORT_SYMBOL(blk_mq_alloc_request);
536
537 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
538         unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
539 {
540         struct blk_mq_alloc_data data = {
541                 .q              = q,
542                 .flags          = flags,
543                 .cmd_flags      = op,
544                 .nr_tags        = 1,
545         };
546         u64 alloc_time_ns = 0;
547         unsigned int cpu;
548         unsigned int tag;
549         int ret;
550
551         /* alloc_time includes depth and tag waits */
552         if (blk_queue_rq_alloc_time(q))
553                 alloc_time_ns = ktime_get_ns();
554
555         /*
556          * If the tag allocator sleeps we could get an allocation for a
557          * different hardware context.  No need to complicate the low level
558          * allocator for this for the rare use case of a command tied to
559          * a specific queue.
560          */
561         if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
562                 return ERR_PTR(-EINVAL);
563
564         if (hctx_idx >= q->nr_hw_queues)
565                 return ERR_PTR(-EIO);
566
567         ret = blk_queue_enter(q, flags);
568         if (ret)
569                 return ERR_PTR(ret);
570
571         /*
572          * Check if the hardware context is actually mapped to anything.
573          * If not tell the caller that it should skip this queue.
574          */
575         ret = -EXDEV;
576         data.hctx = q->queue_hw_ctx[hctx_idx];
577         if (!blk_mq_hw_queue_mapped(data.hctx))
578                 goto out_queue_exit;
579         cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
580         data.ctx = __blk_mq_get_ctx(q, cpu);
581
582         if (!q->elevator)
583                 blk_mq_tag_busy(data.hctx);
584         else
585                 data.rq_flags |= RQF_ELV;
586
587         ret = -EWOULDBLOCK;
588         tag = blk_mq_get_tag(&data);
589         if (tag == BLK_MQ_NO_TAG)
590                 goto out_queue_exit;
591         return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
592                                         alloc_time_ns);
593
594 out_queue_exit:
595         blk_queue_exit(q);
596         return ERR_PTR(ret);
597 }
598 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
599
600 static void __blk_mq_free_request(struct request *rq)
601 {
602         struct request_queue *q = rq->q;
603         struct blk_mq_ctx *ctx = rq->mq_ctx;
604         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
605         const int sched_tag = rq->internal_tag;
606
607         blk_crypto_free_request(rq);
608         blk_pm_mark_last_busy(rq);
609         rq->mq_hctx = NULL;
610         if (rq->tag != BLK_MQ_NO_TAG)
611                 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
612         if (sched_tag != BLK_MQ_NO_TAG)
613                 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
614         blk_mq_sched_restart(hctx);
615         blk_queue_exit(q);
616 }
617
618 void blk_mq_free_request(struct request *rq)
619 {
620         struct request_queue *q = rq->q;
621         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
622
623         if ((rq->rq_flags & RQF_ELVPRIV) &&
624             q->elevator->type->ops.finish_request)
625                 q->elevator->type->ops.finish_request(rq);
626
627         if (rq->rq_flags & RQF_MQ_INFLIGHT)
628                 __blk_mq_dec_active_requests(hctx);
629
630         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
631                 laptop_io_completion(q->disk->bdi);
632
633         rq_qos_done(q, rq);
634
635         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
636         if (req_ref_put_and_test(rq))
637                 __blk_mq_free_request(rq);
638 }
639 EXPORT_SYMBOL_GPL(blk_mq_free_request);
640
641 void blk_mq_free_plug_rqs(struct blk_plug *plug)
642 {
643         struct request *rq;
644
645         while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
646                 blk_mq_free_request(rq);
647 }
648
649 void blk_dump_rq_flags(struct request *rq, char *msg)
650 {
651         printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
652                 rq->q->disk ? rq->q->disk->disk_name : "?",
653                 (unsigned long long) rq->cmd_flags);
654
655         printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
656                (unsigned long long)blk_rq_pos(rq),
657                blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
658         printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
659                rq->bio, rq->biotail, blk_rq_bytes(rq));
660 }
661 EXPORT_SYMBOL(blk_dump_rq_flags);
662
663 static void req_bio_endio(struct request *rq, struct bio *bio,
664                           unsigned int nbytes, blk_status_t error)
665 {
666         if (unlikely(error)) {
667                 bio->bi_status = error;
668         } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
669                 /*
670                  * Partial zone append completions cannot be supported as the
671                  * BIO fragments may end up not being written sequentially.
672                  */
673                 if (bio->bi_iter.bi_size != nbytes)
674                         bio->bi_status = BLK_STS_IOERR;
675                 else
676                         bio->bi_iter.bi_sector = rq->__sector;
677         }
678
679         bio_advance(bio, nbytes);
680
681         if (unlikely(rq->rq_flags & RQF_QUIET))
682                 bio_set_flag(bio, BIO_QUIET);
683         /* don't actually finish bio if it's part of flush sequence */
684         if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
685                 bio_endio(bio);
686 }
687
688 static void blk_account_io_completion(struct request *req, unsigned int bytes)
689 {
690         if (req->part && blk_do_io_stat(req)) {
691                 const int sgrp = op_stat_group(req_op(req));
692
693                 part_stat_lock();
694                 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
695                 part_stat_unlock();
696         }
697 }
698
699 static void blk_print_req_error(struct request *req, blk_status_t status)
700 {
701         printk_ratelimited(KERN_ERR
702                 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
703                 "phys_seg %u prio class %u\n",
704                 blk_status_to_str(status),
705                 req->q->disk ? req->q->disk->disk_name : "?",
706                 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
707                 req->cmd_flags & ~REQ_OP_MASK,
708                 req->nr_phys_segments,
709                 IOPRIO_PRIO_CLASS(req->ioprio));
710 }
711
712 /*
713  * Fully end IO on a request. Does not support partial completions, or
714  * errors.
715  */
716 static void blk_complete_request(struct request *req)
717 {
718         const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
719         int total_bytes = blk_rq_bytes(req);
720         struct bio *bio = req->bio;
721
722         trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
723
724         if (!bio)
725                 return;
726
727 #ifdef CONFIG_BLK_DEV_INTEGRITY
728         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
729                 req->q->integrity.profile->complete_fn(req, total_bytes);
730 #endif
731
732         blk_account_io_completion(req, total_bytes);
733
734         do {
735                 struct bio *next = bio->bi_next;
736
737                 /* Completion has already been traced */
738                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
739                 if (!is_flush)
740                         bio_endio(bio);
741                 bio = next;
742         } while (bio);
743
744         /*
745          * Reset counters so that the request stacking driver
746          * can find how many bytes remain in the request
747          * later.
748          */
749         req->bio = NULL;
750         req->__data_len = 0;
751 }
752
753 /**
754  * blk_update_request - Complete multiple bytes without completing the request
755  * @req:      the request being processed
756  * @error:    block status code
757  * @nr_bytes: number of bytes to complete for @req
758  *
759  * Description:
760  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
761  *     the request structure even if @req doesn't have leftover.
762  *     If @req has leftover, sets it up for the next range of segments.
763  *
764  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
765  *     %false return from this function.
766  *
767  * Note:
768  *      The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
769  *      except in the consistency check at the end of this function.
770  *
771  * Return:
772  *     %false - this request doesn't have any more data
773  *     %true  - this request has more data
774  **/
775 bool blk_update_request(struct request *req, blk_status_t error,
776                 unsigned int nr_bytes)
777 {
778         int total_bytes;
779
780         trace_block_rq_complete(req, error, nr_bytes);
781
782         if (!req->bio)
783                 return false;
784
785 #ifdef CONFIG_BLK_DEV_INTEGRITY
786         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
787             error == BLK_STS_OK)
788                 req->q->integrity.profile->complete_fn(req, nr_bytes);
789 #endif
790
791         if (unlikely(error && !blk_rq_is_passthrough(req) &&
792                      !(req->rq_flags & RQF_QUIET)))
793                 blk_print_req_error(req, error);
794
795         blk_account_io_completion(req, nr_bytes);
796
797         total_bytes = 0;
798         while (req->bio) {
799                 struct bio *bio = req->bio;
800                 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
801
802                 if (bio_bytes == bio->bi_iter.bi_size)
803                         req->bio = bio->bi_next;
804
805                 /* Completion has already been traced */
806                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
807                 req_bio_endio(req, bio, bio_bytes, error);
808
809                 total_bytes += bio_bytes;
810                 nr_bytes -= bio_bytes;
811
812                 if (!nr_bytes)
813                         break;
814         }
815
816         /*
817          * completely done
818          */
819         if (!req->bio) {
820                 /*
821                  * Reset counters so that the request stacking driver
822                  * can find how many bytes remain in the request
823                  * later.
824                  */
825                 req->__data_len = 0;
826                 return false;
827         }
828
829         req->__data_len -= total_bytes;
830
831         /* update sector only for requests with clear definition of sector */
832         if (!blk_rq_is_passthrough(req))
833                 req->__sector += total_bytes >> 9;
834
835         /* mixed attributes always follow the first bio */
836         if (req->rq_flags & RQF_MIXED_MERGE) {
837                 req->cmd_flags &= ~REQ_FAILFAST_MASK;
838                 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
839         }
840
841         if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
842                 /*
843                  * If total number of sectors is less than the first segment
844                  * size, something has gone terribly wrong.
845                  */
846                 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
847                         blk_dump_rq_flags(req, "request botched");
848                         req->__data_len = blk_rq_cur_bytes(req);
849                 }
850
851                 /* recalculate the number of segments */
852                 req->nr_phys_segments = blk_recalc_rq_segments(req);
853         }
854
855         return true;
856 }
857 EXPORT_SYMBOL_GPL(blk_update_request);
858
859 static void __blk_account_io_done(struct request *req, u64 now)
860 {
861         const int sgrp = op_stat_group(req_op(req));
862
863         part_stat_lock();
864         update_io_ticks(req->part, jiffies, true);
865         part_stat_inc(req->part, ios[sgrp]);
866         part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
867         part_stat_unlock();
868 }
869
870 static inline void blk_account_io_done(struct request *req, u64 now)
871 {
872         /*
873          * Account IO completion.  flush_rq isn't accounted as a
874          * normal IO on queueing nor completion.  Accounting the
875          * containing request is enough.
876          */
877         if (blk_do_io_stat(req) && req->part &&
878             !(req->rq_flags & RQF_FLUSH_SEQ))
879                 __blk_account_io_done(req, now);
880 }
881
882 static void __blk_account_io_start(struct request *rq)
883 {
884         /* passthrough requests can hold bios that do not have ->bi_bdev set */
885         if (rq->bio && rq->bio->bi_bdev)
886                 rq->part = rq->bio->bi_bdev;
887         else if (rq->q->disk)
888                 rq->part = rq->q->disk->part0;
889
890         part_stat_lock();
891         update_io_ticks(rq->part, jiffies, false);
892         part_stat_unlock();
893 }
894
895 static inline void blk_account_io_start(struct request *req)
896 {
897         if (blk_do_io_stat(req))
898                 __blk_account_io_start(req);
899 }
900
901 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
902 {
903         if (rq->rq_flags & RQF_STATS) {
904                 blk_mq_poll_stats_start(rq->q);
905                 blk_stat_add(rq, now);
906         }
907
908         blk_mq_sched_completed_request(rq, now);
909         blk_account_io_done(rq, now);
910 }
911
912 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
913 {
914         if (blk_mq_need_time_stamp(rq))
915                 __blk_mq_end_request_acct(rq, ktime_get_ns());
916
917         if (rq->end_io) {
918                 rq_qos_done(rq->q, rq);
919                 rq->end_io(rq, error);
920         } else {
921                 blk_mq_free_request(rq);
922         }
923 }
924 EXPORT_SYMBOL(__blk_mq_end_request);
925
926 void blk_mq_end_request(struct request *rq, blk_status_t error)
927 {
928         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
929                 BUG();
930         __blk_mq_end_request(rq, error);
931 }
932 EXPORT_SYMBOL(blk_mq_end_request);
933
934 #define TAG_COMP_BATCH          32
935
936 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
937                                           int *tag_array, int nr_tags)
938 {
939         struct request_queue *q = hctx->queue;
940
941         /*
942          * All requests should have been marked as RQF_MQ_INFLIGHT, so
943          * update hctx->nr_active in batch
944          */
945         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
946                 __blk_mq_sub_active_requests(hctx, nr_tags);
947
948         blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
949         percpu_ref_put_many(&q->q_usage_counter, nr_tags);
950 }
951
952 void blk_mq_end_request_batch(struct io_comp_batch *iob)
953 {
954         int tags[TAG_COMP_BATCH], nr_tags = 0;
955         struct blk_mq_hw_ctx *cur_hctx = NULL;
956         struct request *rq;
957         u64 now = 0;
958
959         if (iob->need_ts)
960                 now = ktime_get_ns();
961
962         while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
963                 prefetch(rq->bio);
964                 prefetch(rq->rq_next);
965
966                 blk_complete_request(rq);
967                 if (iob->need_ts)
968                         __blk_mq_end_request_acct(rq, now);
969
970                 rq_qos_done(rq->q, rq);
971
972                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
973                 if (!req_ref_put_and_test(rq))
974                         continue;
975
976                 blk_crypto_free_request(rq);
977                 blk_pm_mark_last_busy(rq);
978
979                 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
980                         if (cur_hctx)
981                                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
982                         nr_tags = 0;
983                         cur_hctx = rq->mq_hctx;
984                 }
985                 tags[nr_tags++] = rq->tag;
986         }
987
988         if (nr_tags)
989                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
990 }
991 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
992
993 static void blk_complete_reqs(struct llist_head *list)
994 {
995         struct llist_node *entry = llist_reverse_order(llist_del_all(list));
996         struct request *rq, *next;
997
998         llist_for_each_entry_safe(rq, next, entry, ipi_list)
999                 rq->q->mq_ops->complete(rq);
1000 }
1001
1002 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1003 {
1004         blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1005 }
1006
1007 static int blk_softirq_cpu_dead(unsigned int cpu)
1008 {
1009         blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1010         return 0;
1011 }
1012
1013 static void __blk_mq_complete_request_remote(void *data)
1014 {
1015         __raise_softirq_irqoff(BLOCK_SOFTIRQ);
1016 }
1017
1018 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1019 {
1020         int cpu = raw_smp_processor_id();
1021
1022         if (!IS_ENABLED(CONFIG_SMP) ||
1023             !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1024                 return false;
1025         /*
1026          * With force threaded interrupts enabled, raising softirq from an SMP
1027          * function call will always result in waking the ksoftirqd thread.
1028          * This is probably worse than completing the request on a different
1029          * cache domain.
1030          */
1031         if (force_irqthreads())
1032                 return false;
1033
1034         /* same CPU or cache domain?  Complete locally */
1035         if (cpu == rq->mq_ctx->cpu ||
1036             (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1037              cpus_share_cache(cpu, rq->mq_ctx->cpu)))
1038                 return false;
1039
1040         /* don't try to IPI to an offline CPU */
1041         return cpu_online(rq->mq_ctx->cpu);
1042 }
1043
1044 static void blk_mq_complete_send_ipi(struct request *rq)
1045 {
1046         struct llist_head *list;
1047         unsigned int cpu;
1048
1049         cpu = rq->mq_ctx->cpu;
1050         list = &per_cpu(blk_cpu_done, cpu);
1051         if (llist_add(&rq->ipi_list, list)) {
1052                 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
1053                 smp_call_function_single_async(cpu, &rq->csd);
1054         }
1055 }
1056
1057 static void blk_mq_raise_softirq(struct request *rq)
1058 {
1059         struct llist_head *list;
1060
1061         preempt_disable();
1062         list = this_cpu_ptr(&blk_cpu_done);
1063         if (llist_add(&rq->ipi_list, list))
1064                 raise_softirq(BLOCK_SOFTIRQ);
1065         preempt_enable();
1066 }
1067
1068 bool blk_mq_complete_request_remote(struct request *rq)
1069 {
1070         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1071
1072         /*
1073          * For a polled request, always complete locallly, it's pointless
1074          * to redirect the completion.
1075          */
1076         if (rq->cmd_flags & REQ_POLLED)
1077                 return false;
1078
1079         if (blk_mq_complete_need_ipi(rq)) {
1080                 blk_mq_complete_send_ipi(rq);
1081                 return true;
1082         }
1083
1084         if (rq->q->nr_hw_queues == 1) {
1085                 blk_mq_raise_softirq(rq);
1086                 return true;
1087         }
1088         return false;
1089 }
1090 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1091
1092 /**
1093  * blk_mq_complete_request - end I/O on a request
1094  * @rq:         the request being processed
1095  *
1096  * Description:
1097  *      Complete a request by scheduling the ->complete_rq operation.
1098  **/
1099 void blk_mq_complete_request(struct request *rq)
1100 {
1101         if (!blk_mq_complete_request_remote(rq))
1102                 rq->q->mq_ops->complete(rq);
1103 }
1104 EXPORT_SYMBOL(blk_mq_complete_request);
1105
1106 /**
1107  * blk_mq_start_request - Start processing a request
1108  * @rq: Pointer to request to be started
1109  *
1110  * Function used by device drivers to notify the block layer that a request
1111  * is going to be processed now, so blk layer can do proper initializations
1112  * such as starting the timeout timer.
1113  */
1114 void blk_mq_start_request(struct request *rq)
1115 {
1116         struct request_queue *q = rq->q;
1117
1118         trace_block_rq_issue(rq);
1119
1120         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1121                 u64 start_time;
1122 #ifdef CONFIG_BLK_CGROUP
1123                 if (rq->bio)
1124                         start_time = bio_issue_time(&rq->bio->bi_issue);
1125                 else
1126 #endif
1127                         start_time = ktime_get_ns();
1128                 rq->io_start_time_ns = start_time;
1129                 rq->stats_sectors = blk_rq_sectors(rq);
1130                 rq->rq_flags |= RQF_STATS;
1131                 rq_qos_issue(q, rq);
1132         }
1133
1134         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1135
1136         blk_add_timer(rq);
1137         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1138
1139 #ifdef CONFIG_BLK_DEV_INTEGRITY
1140         if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1141                 q->integrity.profile->prepare_fn(rq);
1142 #endif
1143         if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1144                 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1145 }
1146 EXPORT_SYMBOL(blk_mq_start_request);
1147
1148 /**
1149  * blk_end_sync_rq - executes a completion event on a request
1150  * @rq: request to complete
1151  * @error: end I/O status of the request
1152  */
1153 static void blk_end_sync_rq(struct request *rq, blk_status_t error)
1154 {
1155         struct completion *waiting = rq->end_io_data;
1156
1157         rq->end_io_data = (void *)(uintptr_t)error;
1158
1159         /*
1160          * complete last, if this is a stack request the process (and thus
1161          * the rq pointer) could be invalid right after this complete()
1162          */
1163         complete(waiting);
1164 }
1165
1166 /**
1167  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1168  * @rq:         request to insert
1169  * @at_head:    insert request at head or tail of queue
1170  * @done:       I/O completion handler
1171  *
1172  * Description:
1173  *    Insert a fully prepared request at the back of the I/O scheduler queue
1174  *    for execution.  Don't wait for completion.
1175  *
1176  * Note:
1177  *    This function will invoke @done directly if the queue is dead.
1178  */
1179 void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
1180 {
1181         WARN_ON(irqs_disabled());
1182         WARN_ON(!blk_rq_is_passthrough(rq));
1183
1184         rq->end_io = done;
1185
1186         blk_account_io_start(rq);
1187
1188         /*
1189          * don't check dying flag for MQ because the request won't
1190          * be reused after dying flag is set
1191          */
1192         blk_mq_sched_insert_request(rq, at_head, true, false);
1193 }
1194 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1195
1196 static bool blk_rq_is_poll(struct request *rq)
1197 {
1198         if (!rq->mq_hctx)
1199                 return false;
1200         if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1201                 return false;
1202         if (WARN_ON_ONCE(!rq->bio))
1203                 return false;
1204         return true;
1205 }
1206
1207 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1208 {
1209         do {
1210                 bio_poll(rq->bio, NULL, 0);
1211                 cond_resched();
1212         } while (!completion_done(wait));
1213 }
1214
1215 /**
1216  * blk_execute_rq - insert a request into queue for execution
1217  * @rq:         request to insert
1218  * @at_head:    insert request at head or tail of queue
1219  *
1220  * Description:
1221  *    Insert a fully prepared request at the back of the I/O scheduler queue
1222  *    for execution and wait for completion.
1223  * Return: The blk_status_t result provided to blk_mq_end_request().
1224  */
1225 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1226 {
1227         DECLARE_COMPLETION_ONSTACK(wait);
1228         unsigned long hang_check;
1229
1230         rq->end_io_data = &wait;
1231         blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
1232
1233         /* Prevent hang_check timer from firing at us during very long I/O */
1234         hang_check = sysctl_hung_task_timeout_secs;
1235
1236         if (blk_rq_is_poll(rq))
1237                 blk_rq_poll_completion(rq, &wait);
1238         else if (hang_check)
1239                 while (!wait_for_completion_io_timeout(&wait,
1240                                 hang_check * (HZ/2)))
1241                         ;
1242         else
1243                 wait_for_completion_io(&wait);
1244
1245         return (blk_status_t)(uintptr_t)rq->end_io_data;
1246 }
1247 EXPORT_SYMBOL(blk_execute_rq);
1248
1249 static void __blk_mq_requeue_request(struct request *rq)
1250 {
1251         struct request_queue *q = rq->q;
1252
1253         blk_mq_put_driver_tag(rq);
1254
1255         trace_block_rq_requeue(rq);
1256         rq_qos_requeue(q, rq);
1257
1258         if (blk_mq_request_started(rq)) {
1259                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1260                 rq->rq_flags &= ~RQF_TIMED_OUT;
1261         }
1262 }
1263
1264 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1265 {
1266         __blk_mq_requeue_request(rq);
1267
1268         /* this request will be re-inserted to io scheduler queue */
1269         blk_mq_sched_requeue_request(rq);
1270
1271         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1272 }
1273 EXPORT_SYMBOL(blk_mq_requeue_request);
1274
1275 static void blk_mq_requeue_work(struct work_struct *work)
1276 {
1277         struct request_queue *q =
1278                 container_of(work, struct request_queue, requeue_work.work);
1279         LIST_HEAD(rq_list);
1280         struct request *rq, *next;
1281
1282         spin_lock_irq(&q->requeue_lock);
1283         list_splice_init(&q->requeue_list, &rq_list);
1284         spin_unlock_irq(&q->requeue_lock);
1285
1286         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
1287                 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
1288                         continue;
1289
1290                 rq->rq_flags &= ~RQF_SOFTBARRIER;
1291                 list_del_init(&rq->queuelist);
1292                 /*
1293                  * If RQF_DONTPREP, rq has contained some driver specific
1294                  * data, so insert it to hctx dispatch list to avoid any
1295                  * merge.
1296                  */
1297                 if (rq->rq_flags & RQF_DONTPREP)
1298                         blk_mq_request_bypass_insert(rq, false, false);
1299                 else
1300                         blk_mq_sched_insert_request(rq, true, false, false);
1301         }
1302
1303         while (!list_empty(&rq_list)) {
1304                 rq = list_entry(rq_list.next, struct request, queuelist);
1305                 list_del_init(&rq->queuelist);
1306                 blk_mq_sched_insert_request(rq, false, false, false);
1307         }
1308
1309         blk_mq_run_hw_queues(q, false);
1310 }
1311
1312 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
1313                                 bool kick_requeue_list)
1314 {
1315         struct request_queue *q = rq->q;
1316         unsigned long flags;
1317
1318         /*
1319          * We abuse this flag that is otherwise used by the I/O scheduler to
1320          * request head insertion from the workqueue.
1321          */
1322         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
1323
1324         spin_lock_irqsave(&q->requeue_lock, flags);
1325         if (at_head) {
1326                 rq->rq_flags |= RQF_SOFTBARRIER;
1327                 list_add(&rq->queuelist, &q->requeue_list);
1328         } else {
1329                 list_add_tail(&rq->queuelist, &q->requeue_list);
1330         }
1331         spin_unlock_irqrestore(&q->requeue_lock, flags);
1332
1333         if (kick_requeue_list)
1334                 blk_mq_kick_requeue_list(q);
1335 }
1336
1337 void blk_mq_kick_requeue_list(struct request_queue *q)
1338 {
1339         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1340 }
1341 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1342
1343 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1344                                     unsigned long msecs)
1345 {
1346         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1347                                     msecs_to_jiffies(msecs));
1348 }
1349 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1350
1351 static bool blk_mq_rq_inflight(struct request *rq, void *priv,
1352                                bool reserved)
1353 {
1354         /*
1355          * If we find a request that isn't idle we know the queue is busy
1356          * as it's checked in the iter.
1357          * Return false to stop the iteration.
1358          */
1359         if (blk_mq_request_started(rq)) {
1360                 bool *busy = priv;
1361
1362                 *busy = true;
1363                 return false;
1364         }
1365
1366         return true;
1367 }
1368
1369 bool blk_mq_queue_inflight(struct request_queue *q)
1370 {
1371         bool busy = false;
1372
1373         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1374         return busy;
1375 }
1376 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1377
1378 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
1379 {
1380         req->rq_flags |= RQF_TIMED_OUT;
1381         if (req->q->mq_ops->timeout) {
1382                 enum blk_eh_timer_return ret;
1383
1384                 ret = req->q->mq_ops->timeout(req, reserved);
1385                 if (ret == BLK_EH_DONE)
1386                         return;
1387                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1388         }
1389
1390         blk_add_timer(req);
1391 }
1392
1393 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
1394 {
1395         unsigned long deadline;
1396
1397         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1398                 return false;
1399         if (rq->rq_flags & RQF_TIMED_OUT)
1400                 return false;
1401
1402         deadline = READ_ONCE(rq->deadline);
1403         if (time_after_eq(jiffies, deadline))
1404                 return true;
1405
1406         if (*next == 0)
1407                 *next = deadline;
1408         else if (time_after(*next, deadline))
1409                 *next = deadline;
1410         return false;
1411 }
1412
1413 void blk_mq_put_rq_ref(struct request *rq)
1414 {
1415         if (is_flush_rq(rq))
1416                 rq->end_io(rq, 0);
1417         else if (req_ref_put_and_test(rq))
1418                 __blk_mq_free_request(rq);
1419 }
1420
1421 static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
1422 {
1423         unsigned long *next = priv;
1424
1425         /*
1426          * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1427          * be reallocated underneath the timeout handler's processing, then
1428          * the expire check is reliable. If the request is not expired, then
1429          * it was completed and reallocated as a new request after returning
1430          * from blk_mq_check_expired().
1431          */
1432         if (blk_mq_req_expired(rq, next))
1433                 blk_mq_rq_timed_out(rq, reserved);
1434         return true;
1435 }
1436
1437 static void blk_mq_timeout_work(struct work_struct *work)
1438 {
1439         struct request_queue *q =
1440                 container_of(work, struct request_queue, timeout_work);
1441         unsigned long next = 0;
1442         struct blk_mq_hw_ctx *hctx;
1443         int i;
1444
1445         /* A deadlock might occur if a request is stuck requiring a
1446          * timeout at the same time a queue freeze is waiting
1447          * completion, since the timeout code would not be able to
1448          * acquire the queue reference here.
1449          *
1450          * That's why we don't use blk_queue_enter here; instead, we use
1451          * percpu_ref_tryget directly, because we need to be able to
1452          * obtain a reference even in the short window between the queue
1453          * starting to freeze, by dropping the first reference in
1454          * blk_freeze_queue_start, and the moment the last request is
1455          * consumed, marked by the instant q_usage_counter reaches
1456          * zero.
1457          */
1458         if (!percpu_ref_tryget(&q->q_usage_counter))
1459                 return;
1460
1461         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
1462
1463         if (next != 0) {
1464                 mod_timer(&q->timeout, next);
1465         } else {
1466                 /*
1467                  * Request timeouts are handled as a forward rolling timer. If
1468                  * we end up here it means that no requests are pending and
1469                  * also that no request has been pending for a while. Mark
1470                  * each hctx as idle.
1471                  */
1472                 queue_for_each_hw_ctx(q, hctx, i) {
1473                         /* the hctx may be unmapped, so check it here */
1474                         if (blk_mq_hw_queue_mapped(hctx))
1475                                 blk_mq_tag_idle(hctx);
1476                 }
1477         }
1478         blk_queue_exit(q);
1479 }
1480
1481 struct flush_busy_ctx_data {
1482         struct blk_mq_hw_ctx *hctx;
1483         struct list_head *list;
1484 };
1485
1486 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1487 {
1488         struct flush_busy_ctx_data *flush_data = data;
1489         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1490         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1491         enum hctx_type type = hctx->type;
1492
1493         spin_lock(&ctx->lock);
1494         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1495         sbitmap_clear_bit(sb, bitnr);
1496         spin_unlock(&ctx->lock);
1497         return true;
1498 }
1499
1500 /*
1501  * Process software queues that have been marked busy, splicing them
1502  * to the for-dispatch
1503  */
1504 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1505 {
1506         struct flush_busy_ctx_data data = {
1507                 .hctx = hctx,
1508                 .list = list,
1509         };
1510
1511         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1512 }
1513 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1514
1515 struct dispatch_rq_data {
1516         struct blk_mq_hw_ctx *hctx;
1517         struct request *rq;
1518 };
1519
1520 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1521                 void *data)
1522 {
1523         struct dispatch_rq_data *dispatch_data = data;
1524         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1525         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1526         enum hctx_type type = hctx->type;
1527
1528         spin_lock(&ctx->lock);
1529         if (!list_empty(&ctx->rq_lists[type])) {
1530                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1531                 list_del_init(&dispatch_data->rq->queuelist);
1532                 if (list_empty(&ctx->rq_lists[type]))
1533                         sbitmap_clear_bit(sb, bitnr);
1534         }
1535         spin_unlock(&ctx->lock);
1536
1537         return !dispatch_data->rq;
1538 }
1539
1540 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1541                                         struct blk_mq_ctx *start)
1542 {
1543         unsigned off = start ? start->index_hw[hctx->type] : 0;
1544         struct dispatch_rq_data data = {
1545                 .hctx = hctx,
1546                 .rq   = NULL,
1547         };
1548
1549         __sbitmap_for_each_set(&hctx->ctx_map, off,
1550                                dispatch_rq_from_ctx, &data);
1551
1552         return data.rq;
1553 }
1554
1555 static bool __blk_mq_alloc_driver_tag(struct request *rq)
1556 {
1557         struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1558         unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1559         int tag;
1560
1561         blk_mq_tag_busy(rq->mq_hctx);
1562
1563         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1564                 bt = &rq->mq_hctx->tags->breserved_tags;
1565                 tag_offset = 0;
1566         } else {
1567                 if (!hctx_may_queue(rq->mq_hctx, bt))
1568                         return false;
1569         }
1570
1571         tag = __sbitmap_queue_get(bt);
1572         if (tag == BLK_MQ_NO_TAG)
1573                 return false;
1574
1575         rq->tag = tag + tag_offset;
1576         return true;
1577 }
1578
1579 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1580 {
1581         if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1582                 return false;
1583
1584         if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1585                         !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1586                 rq->rq_flags |= RQF_MQ_INFLIGHT;
1587                 __blk_mq_inc_active_requests(hctx);
1588         }
1589         hctx->tags->rqs[rq->tag] = rq;
1590         return true;
1591 }
1592
1593 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1594                                 int flags, void *key)
1595 {
1596         struct blk_mq_hw_ctx *hctx;
1597
1598         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1599
1600         spin_lock(&hctx->dispatch_wait_lock);
1601         if (!list_empty(&wait->entry)) {
1602                 struct sbitmap_queue *sbq;
1603
1604                 list_del_init(&wait->entry);
1605                 sbq = &hctx->tags->bitmap_tags;
1606                 atomic_dec(&sbq->ws_active);
1607         }
1608         spin_unlock(&hctx->dispatch_wait_lock);
1609
1610         blk_mq_run_hw_queue(hctx, true);
1611         return 1;
1612 }
1613
1614 /*
1615  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1616  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1617  * restart. For both cases, take care to check the condition again after
1618  * marking us as waiting.
1619  */
1620 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1621                                  struct request *rq)
1622 {
1623         struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1624         struct wait_queue_head *wq;
1625         wait_queue_entry_t *wait;
1626         bool ret;
1627
1628         if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1629                 blk_mq_sched_mark_restart_hctx(hctx);
1630
1631                 /*
1632                  * It's possible that a tag was freed in the window between the
1633                  * allocation failure and adding the hardware queue to the wait
1634                  * queue.
1635                  *
1636                  * Don't clear RESTART here, someone else could have set it.
1637                  * At most this will cost an extra queue run.
1638                  */
1639                 return blk_mq_get_driver_tag(rq);
1640         }
1641
1642         wait = &hctx->dispatch_wait;
1643         if (!list_empty_careful(&wait->entry))
1644                 return false;
1645
1646         wq = &bt_wait_ptr(sbq, hctx)->wait;
1647
1648         spin_lock_irq(&wq->lock);
1649         spin_lock(&hctx->dispatch_wait_lock);
1650         if (!list_empty(&wait->entry)) {
1651                 spin_unlock(&hctx->dispatch_wait_lock);
1652                 spin_unlock_irq(&wq->lock);
1653                 return false;
1654         }
1655
1656         atomic_inc(&sbq->ws_active);
1657         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1658         __add_wait_queue(wq, wait);
1659
1660         /*
1661          * It's possible that a tag was freed in the window between the
1662          * allocation failure and adding the hardware queue to the wait
1663          * queue.
1664          */
1665         ret = blk_mq_get_driver_tag(rq);
1666         if (!ret) {
1667                 spin_unlock(&hctx->dispatch_wait_lock);
1668                 spin_unlock_irq(&wq->lock);
1669                 return false;
1670         }
1671
1672         /*
1673          * We got a tag, remove ourselves from the wait queue to ensure
1674          * someone else gets the wakeup.
1675          */
1676         list_del_init(&wait->entry);
1677         atomic_dec(&sbq->ws_active);
1678         spin_unlock(&hctx->dispatch_wait_lock);
1679         spin_unlock_irq(&wq->lock);
1680
1681         return true;
1682 }
1683
1684 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1685 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1686 /*
1687  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1688  * - EWMA is one simple way to compute running average value
1689  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1690  * - take 4 as factor for avoiding to get too small(0) result, and this
1691  *   factor doesn't matter because EWMA decreases exponentially
1692  */
1693 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1694 {
1695         unsigned int ewma;
1696
1697         ewma = hctx->dispatch_busy;
1698
1699         if (!ewma && !busy)
1700                 return;
1701
1702         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1703         if (busy)
1704                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1705         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1706
1707         hctx->dispatch_busy = ewma;
1708 }
1709
1710 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1711
1712 static void blk_mq_handle_dev_resource(struct request *rq,
1713                                        struct list_head *list)
1714 {
1715         struct request *next =
1716                 list_first_entry_or_null(list, struct request, queuelist);
1717
1718         /*
1719          * If an I/O scheduler has been configured and we got a driver tag for
1720          * the next request already, free it.
1721          */
1722         if (next)
1723                 blk_mq_put_driver_tag(next);
1724
1725         list_add(&rq->queuelist, list);
1726         __blk_mq_requeue_request(rq);
1727 }
1728
1729 static void blk_mq_handle_zone_resource(struct request *rq,
1730                                         struct list_head *zone_list)
1731 {
1732         /*
1733          * If we end up here it is because we cannot dispatch a request to a
1734          * specific zone due to LLD level zone-write locking or other zone
1735          * related resource not being available. In this case, set the request
1736          * aside in zone_list for retrying it later.
1737          */
1738         list_add(&rq->queuelist, zone_list);
1739         __blk_mq_requeue_request(rq);
1740 }
1741
1742 enum prep_dispatch {
1743         PREP_DISPATCH_OK,
1744         PREP_DISPATCH_NO_TAG,
1745         PREP_DISPATCH_NO_BUDGET,
1746 };
1747
1748 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1749                                                   bool need_budget)
1750 {
1751         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1752         int budget_token = -1;
1753
1754         if (need_budget) {
1755                 budget_token = blk_mq_get_dispatch_budget(rq->q);
1756                 if (budget_token < 0) {
1757                         blk_mq_put_driver_tag(rq);
1758                         return PREP_DISPATCH_NO_BUDGET;
1759                 }
1760                 blk_mq_set_rq_budget_token(rq, budget_token);
1761         }
1762
1763         if (!blk_mq_get_driver_tag(rq)) {
1764                 /*
1765                  * The initial allocation attempt failed, so we need to
1766                  * rerun the hardware queue when a tag is freed. The
1767                  * waitqueue takes care of that. If the queue is run
1768                  * before we add this entry back on the dispatch list,
1769                  * we'll re-run it below.
1770                  */
1771                 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1772                         /*
1773                          * All budgets not got from this function will be put
1774                          * together during handling partial dispatch
1775                          */
1776                         if (need_budget)
1777                                 blk_mq_put_dispatch_budget(rq->q, budget_token);
1778                         return PREP_DISPATCH_NO_TAG;
1779                 }
1780         }
1781
1782         return PREP_DISPATCH_OK;
1783 }
1784
1785 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
1786 static void blk_mq_release_budgets(struct request_queue *q,
1787                 struct list_head *list)
1788 {
1789         struct request *rq;
1790
1791         list_for_each_entry(rq, list, queuelist) {
1792                 int budget_token = blk_mq_get_rq_budget_token(rq);
1793
1794                 if (budget_token >= 0)
1795                         blk_mq_put_dispatch_budget(q, budget_token);
1796         }
1797 }
1798
1799 /*
1800  * Returns true if we did some work AND can potentially do more.
1801  */
1802 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1803                              unsigned int nr_budgets)
1804 {
1805         enum prep_dispatch prep;
1806         struct request_queue *q = hctx->queue;
1807         struct request *rq, *nxt;
1808         int errors, queued;
1809         blk_status_t ret = BLK_STS_OK;
1810         LIST_HEAD(zone_list);
1811         bool needs_resource = false;
1812
1813         if (list_empty(list))
1814                 return false;
1815
1816         /*
1817          * Now process all the entries, sending them to the driver.
1818          */
1819         errors = queued = 0;
1820         do {
1821                 struct blk_mq_queue_data bd;
1822
1823                 rq = list_first_entry(list, struct request, queuelist);
1824
1825                 WARN_ON_ONCE(hctx != rq->mq_hctx);
1826                 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1827                 if (prep != PREP_DISPATCH_OK)
1828                         break;
1829
1830                 list_del_init(&rq->queuelist);
1831
1832                 bd.rq = rq;
1833
1834                 /*
1835                  * Flag last if we have no more requests, or if we have more
1836                  * but can't assign a driver tag to it.
1837                  */
1838                 if (list_empty(list))
1839                         bd.last = true;
1840                 else {
1841                         nxt = list_first_entry(list, struct request, queuelist);
1842                         bd.last = !blk_mq_get_driver_tag(nxt);
1843                 }
1844
1845                 /*
1846                  * once the request is queued to lld, no need to cover the
1847                  * budget any more
1848                  */
1849                 if (nr_budgets)
1850                         nr_budgets--;
1851                 ret = q->mq_ops->queue_rq(hctx, &bd);
1852                 switch (ret) {
1853                 case BLK_STS_OK:
1854                         queued++;
1855                         break;
1856                 case BLK_STS_RESOURCE:
1857                         needs_resource = true;
1858                         fallthrough;
1859                 case BLK_STS_DEV_RESOURCE:
1860                         blk_mq_handle_dev_resource(rq, list);
1861                         goto out;
1862                 case BLK_STS_ZONE_RESOURCE:
1863                         /*
1864                          * Move the request to zone_list and keep going through
1865                          * the dispatch list to find more requests the drive can
1866                          * accept.
1867                          */
1868                         blk_mq_handle_zone_resource(rq, &zone_list);
1869                         needs_resource = true;
1870                         break;
1871                 default:
1872                         errors++;
1873                         blk_mq_end_request(rq, ret);
1874                 }
1875         } while (!list_empty(list));
1876 out:
1877         if (!list_empty(&zone_list))
1878                 list_splice_tail_init(&zone_list, list);
1879
1880         /* If we didn't flush the entire list, we could have told the driver
1881          * there was more coming, but that turned out to be a lie.
1882          */
1883         if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
1884                 q->mq_ops->commit_rqs(hctx);
1885         /*
1886          * Any items that need requeuing? Stuff them into hctx->dispatch,
1887          * that is where we will continue on next queue run.
1888          */
1889         if (!list_empty(list)) {
1890                 bool needs_restart;
1891                 /* For non-shared tags, the RESTART check will suffice */
1892                 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
1893                         (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
1894
1895                 if (nr_budgets)
1896                         blk_mq_release_budgets(q, list);
1897
1898                 spin_lock(&hctx->lock);
1899                 list_splice_tail_init(list, &hctx->dispatch);
1900                 spin_unlock(&hctx->lock);
1901
1902                 /*
1903                  * Order adding requests to hctx->dispatch and checking
1904                  * SCHED_RESTART flag. The pair of this smp_mb() is the one
1905                  * in blk_mq_sched_restart(). Avoid restart code path to
1906                  * miss the new added requests to hctx->dispatch, meantime
1907                  * SCHED_RESTART is observed here.
1908                  */
1909                 smp_mb();
1910
1911                 /*
1912                  * If SCHED_RESTART was set by the caller of this function and
1913                  * it is no longer set that means that it was cleared by another
1914                  * thread and hence that a queue rerun is needed.
1915                  *
1916                  * If 'no_tag' is set, that means that we failed getting
1917                  * a driver tag with an I/O scheduler attached. If our dispatch
1918                  * waitqueue is no longer active, ensure that we run the queue
1919                  * AFTER adding our entries back to the list.
1920                  *
1921                  * If no I/O scheduler has been configured it is possible that
1922                  * the hardware queue got stopped and restarted before requests
1923                  * were pushed back onto the dispatch list. Rerun the queue to
1924                  * avoid starvation. Notes:
1925                  * - blk_mq_run_hw_queue() checks whether or not a queue has
1926                  *   been stopped before rerunning a queue.
1927                  * - Some but not all block drivers stop a queue before
1928                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1929                  *   and dm-rq.
1930                  *
1931                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1932                  * bit is set, run queue after a delay to avoid IO stalls
1933                  * that could otherwise occur if the queue is idle.  We'll do
1934                  * similar if we couldn't get budget or couldn't lock a zone
1935                  * and SCHED_RESTART is set.
1936                  */
1937                 needs_restart = blk_mq_sched_needs_restart(hctx);
1938                 if (prep == PREP_DISPATCH_NO_BUDGET)
1939                         needs_resource = true;
1940                 if (!needs_restart ||
1941                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1942                         blk_mq_run_hw_queue(hctx, true);
1943                 else if (needs_restart && needs_resource)
1944                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1945
1946                 blk_mq_update_dispatch_busy(hctx, true);
1947                 return false;
1948         } else
1949                 blk_mq_update_dispatch_busy(hctx, false);
1950
1951         return (queued + errors) != 0;
1952 }
1953
1954 /**
1955  * __blk_mq_run_hw_queue - Run a hardware queue.
1956  * @hctx: Pointer to the hardware queue to run.
1957  *
1958  * Send pending requests to the hardware.
1959  */
1960 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1961 {
1962         /*
1963          * We can't run the queue inline with ints disabled. Ensure that
1964          * we catch bad users of this early.
1965          */
1966         WARN_ON_ONCE(in_interrupt());
1967
1968         blk_mq_run_dispatch_ops(hctx->queue,
1969                         blk_mq_sched_dispatch_requests(hctx));
1970 }
1971
1972 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1973 {
1974         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1975
1976         if (cpu >= nr_cpu_ids)
1977                 cpu = cpumask_first(hctx->cpumask);
1978         return cpu;
1979 }
1980
1981 /*
1982  * It'd be great if the workqueue API had a way to pass
1983  * in a mask and had some smarts for more clever placement.
1984  * For now we just round-robin here, switching for every
1985  * BLK_MQ_CPU_WORK_BATCH queued items.
1986  */
1987 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1988 {
1989         bool tried = false;
1990         int next_cpu = hctx->next_cpu;
1991
1992         if (hctx->queue->nr_hw_queues == 1)
1993                 return WORK_CPU_UNBOUND;
1994
1995         if (--hctx->next_cpu_batch <= 0) {
1996 select_cpu:
1997                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1998                                 cpu_online_mask);
1999                 if (next_cpu >= nr_cpu_ids)
2000                         next_cpu = blk_mq_first_mapped_cpu(hctx);
2001                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2002         }
2003
2004         /*
2005          * Do unbound schedule if we can't find a online CPU for this hctx,
2006          * and it should only happen in the path of handling CPU DEAD.
2007          */
2008         if (!cpu_online(next_cpu)) {
2009                 if (!tried) {
2010                         tried = true;
2011                         goto select_cpu;
2012                 }
2013
2014                 /*
2015                  * Make sure to re-select CPU next time once after CPUs
2016                  * in hctx->cpumask become online again.
2017                  */
2018                 hctx->next_cpu = next_cpu;
2019                 hctx->next_cpu_batch = 1;
2020                 return WORK_CPU_UNBOUND;
2021         }
2022
2023         hctx->next_cpu = next_cpu;
2024         return next_cpu;
2025 }
2026
2027 /**
2028  * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
2029  * @hctx: Pointer to the hardware queue to run.
2030  * @async: If we want to run the queue asynchronously.
2031  * @msecs: Milliseconds of delay to wait before running the queue.
2032  *
2033  * If !@async, try to run the queue now. Else, run the queue asynchronously and
2034  * with a delay of @msecs.
2035  */
2036 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
2037                                         unsigned long msecs)
2038 {
2039         if (unlikely(blk_mq_hctx_stopped(hctx)))
2040                 return;
2041
2042         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2043                 int cpu = get_cpu();
2044                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
2045                         __blk_mq_run_hw_queue(hctx);
2046                         put_cpu();
2047                         return;
2048                 }
2049
2050                 put_cpu();
2051         }
2052
2053         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2054                                     msecs_to_jiffies(msecs));
2055 }
2056
2057 /**
2058  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2059  * @hctx: Pointer to the hardware queue to run.
2060  * @msecs: Milliseconds of delay to wait before running the queue.
2061  *
2062  * Run a hardware queue asynchronously with a delay of @msecs.
2063  */
2064 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2065 {
2066         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
2067 }
2068 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2069
2070 /**
2071  * blk_mq_run_hw_queue - Start to run a hardware queue.
2072  * @hctx: Pointer to the hardware queue to run.
2073  * @async: If we want to run the queue asynchronously.
2074  *
2075  * Check if the request queue is not in a quiesced state and if there are
2076  * pending requests to be sent. If this is true, run the queue to send requests
2077  * to hardware.
2078  */
2079 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2080 {
2081         bool need_run;
2082
2083         /*
2084          * When queue is quiesced, we may be switching io scheduler, or
2085          * updating nr_hw_queues, or other things, and we can't run queue
2086          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
2087          *
2088          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2089          * quiesced.
2090          */
2091         __blk_mq_run_dispatch_ops(hctx->queue, false,
2092                 need_run = !blk_queue_quiesced(hctx->queue) &&
2093                 blk_mq_hctx_has_pending(hctx));
2094
2095         if (need_run)
2096                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
2097 }
2098 EXPORT_SYMBOL(blk_mq_run_hw_queue);
2099
2100 /*
2101  * Is the request queue handled by an IO scheduler that does not respect
2102  * hardware queues when dispatching?
2103  */
2104 static bool blk_mq_has_sqsched(struct request_queue *q)
2105 {
2106         struct elevator_queue *e = q->elevator;
2107
2108         if (e && e->type->ops.dispatch_request &&
2109             !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
2110                 return true;
2111         return false;
2112 }
2113
2114 /*
2115  * Return prefered queue to dispatch from (if any) for non-mq aware IO
2116  * scheduler.
2117  */
2118 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2119 {
2120         struct blk_mq_hw_ctx *hctx;
2121
2122         /*
2123          * If the IO scheduler does not respect hardware queues when
2124          * dispatching, we just don't bother with multiple HW queues and
2125          * dispatch from hctx for the current CPU since running multiple queues
2126          * just causes lock contention inside the scheduler and pointless cache
2127          * bouncing.
2128          */
2129         hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
2130                                      raw_smp_processor_id());
2131         if (!blk_mq_hctx_stopped(hctx))
2132                 return hctx;
2133         return NULL;
2134 }
2135
2136 /**
2137  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2138  * @q: Pointer to the request queue to run.
2139  * @async: If we want to run the queue asynchronously.
2140  */
2141 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2142 {
2143         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2144         int i;
2145
2146         sq_hctx = NULL;
2147         if (blk_mq_has_sqsched(q))
2148                 sq_hctx = blk_mq_get_sq_hctx(q);
2149         queue_for_each_hw_ctx(q, hctx, i) {
2150                 if (blk_mq_hctx_stopped(hctx))
2151                         continue;
2152                 /*
2153                  * Dispatch from this hctx either if there's no hctx preferred
2154                  * by IO scheduler or if it has requests that bypass the
2155                  * scheduler.
2156                  */
2157                 if (!sq_hctx || sq_hctx == hctx ||
2158                     !list_empty_careful(&hctx->dispatch))
2159                         blk_mq_run_hw_queue(hctx, async);
2160         }
2161 }
2162 EXPORT_SYMBOL(blk_mq_run_hw_queues);
2163
2164 /**
2165  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2166  * @q: Pointer to the request queue to run.
2167  * @msecs: Milliseconds of delay to wait before running the queues.
2168  */
2169 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2170 {
2171         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2172         int i;
2173
2174         sq_hctx = NULL;
2175         if (blk_mq_has_sqsched(q))
2176                 sq_hctx = blk_mq_get_sq_hctx(q);
2177         queue_for_each_hw_ctx(q, hctx, i) {
2178                 if (blk_mq_hctx_stopped(hctx))
2179                         continue;
2180                 /*
2181                  * Dispatch from this hctx either if there's no hctx preferred
2182                  * by IO scheduler or if it has requests that bypass the
2183                  * scheduler.
2184                  */
2185                 if (!sq_hctx || sq_hctx == hctx ||
2186                     !list_empty_careful(&hctx->dispatch))
2187                         blk_mq_delay_run_hw_queue(hctx, msecs);
2188         }
2189 }
2190 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2191
2192 /**
2193  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
2194  * @q: request queue.
2195  *
2196  * The caller is responsible for serializing this function against
2197  * blk_mq_{start,stop}_hw_queue().
2198  */
2199 bool blk_mq_queue_stopped(struct request_queue *q)
2200 {
2201         struct blk_mq_hw_ctx *hctx;
2202         int i;
2203
2204         queue_for_each_hw_ctx(q, hctx, i)
2205                 if (blk_mq_hctx_stopped(hctx))
2206                         return true;
2207
2208         return false;
2209 }
2210 EXPORT_SYMBOL(blk_mq_queue_stopped);
2211
2212 /*
2213  * This function is often used for pausing .queue_rq() by driver when
2214  * there isn't enough resource or some conditions aren't satisfied, and
2215  * BLK_STS_RESOURCE is usually returned.
2216  *
2217  * We do not guarantee that dispatch can be drained or blocked
2218  * after blk_mq_stop_hw_queue() returns. Please use
2219  * blk_mq_quiesce_queue() for that requirement.
2220  */
2221 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2222 {
2223         cancel_delayed_work(&hctx->run_work);
2224
2225         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2226 }
2227 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2228
2229 /*
2230  * This function is often used for pausing .queue_rq() by driver when
2231  * there isn't enough resource or some conditions aren't satisfied, and
2232  * BLK_STS_RESOURCE is usually returned.
2233  *
2234  * We do not guarantee that dispatch can be drained or blocked
2235  * after blk_mq_stop_hw_queues() returns. Please use
2236  * blk_mq_quiesce_queue() for that requirement.
2237  */
2238 void blk_mq_stop_hw_queues(struct request_queue *q)
2239 {
2240         struct blk_mq_hw_ctx *hctx;
2241         int i;
2242
2243         queue_for_each_hw_ctx(q, hctx, i)
2244                 blk_mq_stop_hw_queue(hctx);
2245 }
2246 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2247
2248 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2249 {
2250         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2251
2252         blk_mq_run_hw_queue(hctx, false);
2253 }
2254 EXPORT_SYMBOL(blk_mq_start_hw_queue);
2255
2256 void blk_mq_start_hw_queues(struct request_queue *q)
2257 {
2258         struct blk_mq_hw_ctx *hctx;
2259         int i;
2260
2261         queue_for_each_hw_ctx(q, hctx, i)
2262                 blk_mq_start_hw_queue(hctx);
2263 }
2264 EXPORT_SYMBOL(blk_mq_start_hw_queues);
2265
2266 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2267 {
2268         if (!blk_mq_hctx_stopped(hctx))
2269                 return;
2270
2271         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2272         blk_mq_run_hw_queue(hctx, async);
2273 }
2274 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2275
2276 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2277 {
2278         struct blk_mq_hw_ctx *hctx;
2279         int i;
2280
2281         queue_for_each_hw_ctx(q, hctx, i)
2282                 blk_mq_start_stopped_hw_queue(hctx, async);
2283 }
2284 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2285
2286 static void blk_mq_run_work_fn(struct work_struct *work)
2287 {
2288         struct blk_mq_hw_ctx *hctx;
2289
2290         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
2291
2292         /*
2293          * If we are stopped, don't run the queue.
2294          */
2295         if (blk_mq_hctx_stopped(hctx))
2296                 return;
2297
2298         __blk_mq_run_hw_queue(hctx);
2299 }
2300
2301 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
2302                                             struct request *rq,
2303                                             bool at_head)
2304 {
2305         struct blk_mq_ctx *ctx = rq->mq_ctx;
2306         enum hctx_type type = hctx->type;
2307
2308         lockdep_assert_held(&ctx->lock);
2309
2310         trace_block_rq_insert(rq);
2311
2312         if (at_head)
2313                 list_add(&rq->queuelist, &ctx->rq_lists[type]);
2314         else
2315                 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
2316 }
2317
2318 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
2319                              bool at_head)
2320 {
2321         struct blk_mq_ctx *ctx = rq->mq_ctx;
2322
2323         lockdep_assert_held(&ctx->lock);
2324
2325         __blk_mq_insert_req_list(hctx, rq, at_head);
2326         blk_mq_hctx_mark_pending(hctx, ctx);
2327 }
2328
2329 /**
2330  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2331  * @rq: Pointer to request to be inserted.
2332  * @at_head: true if the request should be inserted at the head of the list.
2333  * @run_queue: If we should run the hardware queue after inserting the request.
2334  *
2335  * Should only be used carefully, when the caller knows we want to
2336  * bypass a potential IO scheduler on the target device.
2337  */
2338 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
2339                                   bool run_queue)
2340 {
2341         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2342
2343         spin_lock(&hctx->lock);
2344         if (at_head)
2345                 list_add(&rq->queuelist, &hctx->dispatch);
2346         else
2347                 list_add_tail(&rq->queuelist, &hctx->dispatch);
2348         spin_unlock(&hctx->lock);
2349
2350         if (run_queue)
2351                 blk_mq_run_hw_queue(hctx, false);
2352 }
2353
2354 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
2355                             struct list_head *list)
2356
2357 {
2358         struct request *rq;
2359         enum hctx_type type = hctx->type;
2360
2361         /*
2362          * preemption doesn't flush plug list, so it's possible ctx->cpu is
2363          * offline now
2364          */
2365         list_for_each_entry(rq, list, queuelist) {
2366                 BUG_ON(rq->mq_ctx != ctx);
2367                 trace_block_rq_insert(rq);
2368         }
2369
2370         spin_lock(&ctx->lock);
2371         list_splice_tail_init(list, &ctx->rq_lists[type]);
2372         blk_mq_hctx_mark_pending(hctx, ctx);
2373         spin_unlock(&ctx->lock);
2374 }
2375
2376 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
2377                               bool from_schedule)
2378 {
2379         if (hctx->queue->mq_ops->commit_rqs) {
2380                 trace_block_unplug(hctx->queue, *queued, !from_schedule);
2381                 hctx->queue->mq_ops->commit_rqs(hctx);
2382         }
2383         *queued = 0;
2384 }
2385
2386 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2387                 unsigned int nr_segs)
2388 {
2389         int err;
2390
2391         if (bio->bi_opf & REQ_RAHEAD)
2392                 rq->cmd_flags |= REQ_FAILFAST_MASK;
2393
2394         rq->__sector = bio->bi_iter.bi_sector;
2395         rq->write_hint = bio->bi_write_hint;
2396         blk_rq_bio_prep(rq, bio, nr_segs);
2397
2398         /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2399         err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2400         WARN_ON_ONCE(err);
2401
2402         blk_account_io_start(rq);
2403 }
2404
2405 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2406                                             struct request *rq, bool last)
2407 {
2408         struct request_queue *q = rq->q;
2409         struct blk_mq_queue_data bd = {
2410                 .rq = rq,
2411                 .last = last,
2412         };
2413         blk_status_t ret;
2414
2415         /*
2416          * For OK queue, we are done. For error, caller may kill it.
2417          * Any other error (busy), just add it to our list as we
2418          * previously would have done.
2419          */
2420         ret = q->mq_ops->queue_rq(hctx, &bd);
2421         switch (ret) {
2422         case BLK_STS_OK:
2423                 blk_mq_update_dispatch_busy(hctx, false);
2424                 break;
2425         case BLK_STS_RESOURCE:
2426         case BLK_STS_DEV_RESOURCE:
2427                 blk_mq_update_dispatch_busy(hctx, true);
2428                 __blk_mq_requeue_request(rq);
2429                 break;
2430         default:
2431                 blk_mq_update_dispatch_busy(hctx, false);
2432                 break;
2433         }
2434
2435         return ret;
2436 }
2437
2438 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2439                                                 struct request *rq,
2440                                                 bool bypass_insert, bool last)
2441 {
2442         struct request_queue *q = rq->q;
2443         bool run_queue = true;
2444         int budget_token;
2445
2446         /*
2447          * RCU or SRCU read lock is needed before checking quiesced flag.
2448          *
2449          * When queue is stopped or quiesced, ignore 'bypass_insert' from
2450          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2451          * and avoid driver to try to dispatch again.
2452          */
2453         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2454                 run_queue = false;
2455                 bypass_insert = false;
2456                 goto insert;
2457         }
2458
2459         if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
2460                 goto insert;
2461
2462         budget_token = blk_mq_get_dispatch_budget(q);
2463         if (budget_token < 0)
2464                 goto insert;
2465
2466         blk_mq_set_rq_budget_token(rq, budget_token);
2467
2468         if (!blk_mq_get_driver_tag(rq)) {
2469                 blk_mq_put_dispatch_budget(q, budget_token);
2470                 goto insert;
2471         }
2472
2473         return __blk_mq_issue_directly(hctx, rq, last);
2474 insert:
2475         if (bypass_insert)
2476                 return BLK_STS_RESOURCE;
2477
2478         blk_mq_sched_insert_request(rq, false, run_queue, false);
2479
2480         return BLK_STS_OK;
2481 }
2482
2483 /**
2484  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2485  * @hctx: Pointer of the associated hardware queue.
2486  * @rq: Pointer to request to be sent.
2487  *
2488  * If the device has enough resources to accept a new request now, send the
2489  * request directly to device driver. Else, insert at hctx->dispatch queue, so
2490  * we can try send it another time in the future. Requests inserted at this
2491  * queue have higher priority.
2492  */
2493 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2494                 struct request *rq)
2495 {
2496         blk_status_t ret =
2497                 __blk_mq_try_issue_directly(hctx, rq, false, true);
2498
2499         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2500                 blk_mq_request_bypass_insert(rq, false, true);
2501         else if (ret != BLK_STS_OK)
2502                 blk_mq_end_request(rq, ret);
2503 }
2504
2505 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2506 {
2507         return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
2508 }
2509
2510 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
2511 {
2512         struct blk_mq_hw_ctx *hctx = NULL;
2513         struct request *rq;
2514         int queued = 0;
2515         int errors = 0;
2516
2517         while ((rq = rq_list_pop(&plug->mq_list))) {
2518                 bool last = rq_list_empty(plug->mq_list);
2519                 blk_status_t ret;
2520
2521                 if (hctx != rq->mq_hctx) {
2522                         if (hctx)
2523                                 blk_mq_commit_rqs(hctx, &queued, from_schedule);
2524                         hctx = rq->mq_hctx;
2525                 }
2526
2527                 ret = blk_mq_request_issue_directly(rq, last);
2528                 switch (ret) {
2529                 case BLK_STS_OK:
2530                         queued++;
2531                         break;
2532                 case BLK_STS_RESOURCE:
2533                 case BLK_STS_DEV_RESOURCE:
2534                         blk_mq_request_bypass_insert(rq, false, last);
2535                         blk_mq_commit_rqs(hctx, &queued, from_schedule);
2536                         return;
2537                 default:
2538                         blk_mq_end_request(rq, ret);
2539                         errors++;
2540                         break;
2541                 }
2542         }
2543
2544         /*
2545          * If we didn't flush the entire list, we could have told the driver
2546          * there was more coming, but that turned out to be a lie.
2547          */
2548         if (errors)
2549                 blk_mq_commit_rqs(hctx, &queued, from_schedule);
2550 }
2551
2552 static void __blk_mq_flush_plug_list(struct request_queue *q,
2553                                      struct blk_plug *plug)
2554 {
2555         if (blk_queue_quiesced(q))
2556                 return;
2557         q->mq_ops->queue_rqs(&plug->mq_list);
2558 }
2559
2560 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2561 {
2562         struct blk_mq_hw_ctx *this_hctx;
2563         struct blk_mq_ctx *this_ctx;
2564         struct request *rq;
2565         unsigned int depth;
2566         LIST_HEAD(list);
2567
2568         if (rq_list_empty(plug->mq_list))
2569                 return;
2570         plug->rq_count = 0;
2571
2572         if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2573                 struct request_queue *q;
2574
2575                 rq = rq_list_peek(&plug->mq_list);
2576                 q = rq->q;
2577
2578                 /*
2579                  * Peek first request and see if we have a ->queue_rqs() hook.
2580                  * If we do, we can dispatch the whole plug list in one go. We
2581                  * already know at this point that all requests belong to the
2582                  * same queue, caller must ensure that's the case.
2583                  *
2584                  * Since we pass off the full list to the driver at this point,
2585                  * we do not increment the active request count for the queue.
2586                  * Bypass shared tags for now because of that.
2587                  */
2588                 if (q->mq_ops->queue_rqs &&
2589                     !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
2590                         blk_mq_run_dispatch_ops(q,
2591                                 __blk_mq_flush_plug_list(q, plug));
2592                         if (rq_list_empty(plug->mq_list))
2593                                 return;
2594                 }
2595
2596                 blk_mq_run_dispatch_ops(q,
2597                                 blk_mq_plug_issue_direct(plug, false));
2598                 if (rq_list_empty(plug->mq_list))
2599                         return;
2600         }
2601
2602         this_hctx = NULL;
2603         this_ctx = NULL;
2604         depth = 0;
2605         do {
2606                 rq = rq_list_pop(&plug->mq_list);
2607
2608                 if (!this_hctx) {
2609                         this_hctx = rq->mq_hctx;
2610                         this_ctx = rq->mq_ctx;
2611                 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
2612                         trace_block_unplug(this_hctx->queue, depth,
2613                                                 !from_schedule);
2614                         blk_mq_sched_insert_requests(this_hctx, this_ctx,
2615                                                 &list, from_schedule);
2616                         depth = 0;
2617                         this_hctx = rq->mq_hctx;
2618                         this_ctx = rq->mq_ctx;
2619
2620                 }
2621
2622                 list_add(&rq->queuelist, &list);
2623                 depth++;
2624         } while (!rq_list_empty(plug->mq_list));
2625
2626         if (!list_empty(&list)) {
2627                 trace_block_unplug(this_hctx->queue, depth, !from_schedule);
2628                 blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
2629                                                 from_schedule);
2630         }
2631 }
2632
2633 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2634                 struct list_head *list)
2635 {
2636         int queued = 0;
2637         int errors = 0;
2638
2639         while (!list_empty(list)) {
2640                 blk_status_t ret;
2641                 struct request *rq = list_first_entry(list, struct request,
2642                                 queuelist);
2643
2644                 list_del_init(&rq->queuelist);
2645                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2646                 if (ret != BLK_STS_OK) {
2647                         if (ret == BLK_STS_RESOURCE ||
2648                                         ret == BLK_STS_DEV_RESOURCE) {
2649                                 blk_mq_request_bypass_insert(rq, false,
2650                                                         list_empty(list));
2651                                 break;
2652                         }
2653                         blk_mq_end_request(rq, ret);
2654                         errors++;
2655                 } else
2656                         queued++;
2657         }
2658
2659         /*
2660          * If we didn't flush the entire list, we could have told
2661          * the driver there was more coming, but that turned out to
2662          * be a lie.
2663          */
2664         if ((!list_empty(list) || errors) &&
2665              hctx->queue->mq_ops->commit_rqs && queued)
2666                 hctx->queue->mq_ops->commit_rqs(hctx);
2667 }
2668
2669 /*
2670  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
2671  * queues. This is important for md arrays to benefit from merging
2672  * requests.
2673  */
2674 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
2675 {
2676         if (plug->multiple_queues)
2677                 return BLK_MAX_REQUEST_COUNT * 2;
2678         return BLK_MAX_REQUEST_COUNT;
2679 }
2680
2681 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
2682 {
2683         struct request *last = rq_list_peek(&plug->mq_list);
2684
2685         if (!plug->rq_count) {
2686                 trace_block_plug(rq->q);
2687         } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
2688                    (!blk_queue_nomerges(rq->q) &&
2689                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
2690                 blk_mq_flush_plug_list(plug, false);
2691                 trace_block_plug(rq->q);
2692         }
2693
2694         if (!plug->multiple_queues && last && last->q != rq->q)
2695                 plug->multiple_queues = true;
2696         if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
2697                 plug->has_elevator = true;
2698         rq->rq_next = NULL;
2699         rq_list_add(&plug->mq_list, rq);
2700         plug->rq_count++;
2701 }
2702
2703 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2704                                      struct bio *bio, unsigned int nr_segs)
2705 {
2706         if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2707                 if (blk_attempt_plug_merge(q, bio, nr_segs))
2708                         return true;
2709                 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2710                         return true;
2711         }
2712         return false;
2713 }
2714
2715 static struct request *blk_mq_get_new_requests(struct request_queue *q,
2716                                                struct blk_plug *plug,
2717                                                struct bio *bio)
2718 {
2719         struct blk_mq_alloc_data data = {
2720                 .q              = q,
2721                 .nr_tags        = 1,
2722                 .cmd_flags      = bio->bi_opf,
2723         };
2724         struct request *rq;
2725
2726         if (unlikely(bio_queue_enter(bio)))
2727                 return NULL;
2728
2729         if (plug) {
2730                 data.nr_tags = plug->nr_ios;
2731                 plug->nr_ios = 1;
2732                 data.cached_rq = &plug->cached_rq;
2733         }
2734
2735         rq = __blk_mq_alloc_requests(&data);
2736         if (rq)
2737                 return rq;
2738         rq_qos_cleanup(q, bio);
2739         if (bio->bi_opf & REQ_NOWAIT)
2740                 bio_wouldblock_error(bio);
2741         blk_queue_exit(q);
2742         return NULL;
2743 }
2744
2745 static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2746                 struct blk_plug *plug, struct bio *bio)
2747 {
2748         struct request *rq;
2749
2750         if (!plug)
2751                 return NULL;
2752         rq = rq_list_peek(&plug->cached_rq);
2753         if (!rq || rq->q != q)
2754                 return NULL;
2755
2756         if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
2757                 return NULL;
2758         if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2759                 return NULL;
2760
2761         rq->cmd_flags = bio->bi_opf;
2762         plug->cached_rq = rq_list_next(rq);
2763         INIT_LIST_HEAD(&rq->queuelist);
2764         return rq;
2765 }
2766
2767 /**
2768  * blk_mq_submit_bio - Create and send a request to block device.
2769  * @bio: Bio pointer.
2770  *
2771  * Builds up a request structure from @q and @bio and send to the device. The
2772  * request may not be queued directly to hardware if:
2773  * * This request can be merged with another one
2774  * * We want to place request at plug queue for possible future merging
2775  * * There is an IO scheduler active at this queue
2776  *
2777  * It will not queue the request if there is an error with the bio, or at the
2778  * request creation.
2779  */
2780 void blk_mq_submit_bio(struct bio *bio)
2781 {
2782         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2783         struct blk_plug *plug = blk_mq_plug(q, bio);
2784         const int is_sync = op_is_sync(bio->bi_opf);
2785         struct request *rq;
2786         unsigned int nr_segs = 1;
2787         blk_status_t ret;
2788
2789         if (unlikely(!blk_crypto_bio_prep(&bio)))
2790                 return;
2791
2792         blk_queue_bounce(q, &bio);
2793         if (blk_may_split(q, bio))
2794                 __blk_queue_split(q, &bio, &nr_segs);
2795
2796         if (!bio_integrity_prep(bio))
2797                 return;
2798
2799         if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
2800                 return;
2801
2802         rq_qos_throttle(q, bio);
2803
2804         rq = blk_mq_get_cached_request(q, plug, bio);
2805         if (!rq) {
2806                 rq = blk_mq_get_new_requests(q, plug, bio);
2807                 if (unlikely(!rq))
2808                         return;
2809         }
2810
2811         trace_block_getrq(bio);
2812
2813         rq_qos_track(q, rq, bio);
2814
2815         blk_mq_bio_to_request(rq, bio, nr_segs);
2816
2817         ret = blk_crypto_init_request(rq);
2818         if (ret != BLK_STS_OK) {
2819                 bio->bi_status = ret;
2820                 bio_endio(bio);
2821                 blk_mq_free_request(rq);
2822                 return;
2823         }
2824
2825         if (op_is_flush(bio->bi_opf)) {
2826                 blk_insert_flush(rq);
2827                 return;
2828         }
2829
2830         if (plug)
2831                 blk_add_rq_to_plug(plug, rq);
2832         else if ((rq->rq_flags & RQF_ELV) ||
2833                  (rq->mq_hctx->dispatch_busy &&
2834                   (q->nr_hw_queues == 1 || !is_sync)))
2835                 blk_mq_sched_insert_request(rq, false, true, true);
2836         else
2837                 blk_mq_run_dispatch_ops(rq->q,
2838                                 blk_mq_try_issue_directly(rq->mq_hctx, rq));
2839 }
2840
2841 /**
2842  * blk_cloned_rq_check_limits - Helper function to check a cloned request
2843  *                              for the new queue limits
2844  * @q:  the queue
2845  * @rq: the request being checked
2846  *
2847  * Description:
2848  *    @rq may have been made based on weaker limitations of upper-level queues
2849  *    in request stacking drivers, and it may violate the limitation of @q.
2850  *    Since the block layer and the underlying device driver trust @rq
2851  *    after it is inserted to @q, it should be checked against @q before
2852  *    the insertion using this generic function.
2853  *
2854  *    Request stacking drivers like request-based dm may change the queue
2855  *    limits when retrying requests on other queues. Those requests need
2856  *    to be checked against the new queue limits again during dispatch.
2857  */
2858 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
2859                                       struct request *rq)
2860 {
2861         unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
2862
2863         if (blk_rq_sectors(rq) > max_sectors) {
2864                 /*
2865                  * SCSI device does not have a good way to return if
2866                  * Write Same/Zero is actually supported. If a device rejects
2867                  * a non-read/write command (discard, write same,etc.) the
2868                  * low-level device driver will set the relevant queue limit to
2869                  * 0 to prevent blk-lib from issuing more of the offending
2870                  * operations. Commands queued prior to the queue limit being
2871                  * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
2872                  * errors being propagated to upper layers.
2873                  */
2874                 if (max_sectors == 0)
2875                         return BLK_STS_NOTSUPP;
2876
2877                 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
2878                         __func__, blk_rq_sectors(rq), max_sectors);
2879                 return BLK_STS_IOERR;
2880         }
2881
2882         /*
2883          * The queue settings related to segment counting may differ from the
2884          * original queue.
2885          */
2886         rq->nr_phys_segments = blk_recalc_rq_segments(rq);
2887         if (rq->nr_phys_segments > queue_max_segments(q)) {
2888                 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
2889                         __func__, rq->nr_phys_segments, queue_max_segments(q));
2890                 return BLK_STS_IOERR;
2891         }
2892
2893         return BLK_STS_OK;
2894 }
2895
2896 /**
2897  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2898  * @q:  the queue to submit the request
2899  * @rq: the request being queued
2900  */
2901 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2902 {
2903         blk_status_t ret;
2904
2905         ret = blk_cloned_rq_check_limits(q, rq);
2906         if (ret != BLK_STS_OK)
2907                 return ret;
2908
2909         if (rq->q->disk &&
2910             should_fail_request(rq->q->disk->part0, blk_rq_bytes(rq)))
2911                 return BLK_STS_IOERR;
2912
2913         if (blk_crypto_insert_cloned_request(rq))
2914                 return BLK_STS_IOERR;
2915
2916         blk_account_io_start(rq);
2917
2918         /*
2919          * Since we have a scheduler attached on the top device,
2920          * bypass a potential scheduler on the bottom device for
2921          * insert.
2922          */
2923         blk_mq_run_dispatch_ops(rq->q,
2924                         ret = blk_mq_request_issue_directly(rq, true));
2925         if (ret)
2926                 blk_account_io_done(rq, ktime_get_ns());
2927         return ret;
2928 }
2929 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2930
2931 /**
2932  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2933  * @rq: the clone request to be cleaned up
2934  *
2935  * Description:
2936  *     Free all bios in @rq for a cloned request.
2937  */
2938 void blk_rq_unprep_clone(struct request *rq)
2939 {
2940         struct bio *bio;
2941
2942         while ((bio = rq->bio) != NULL) {
2943                 rq->bio = bio->bi_next;
2944
2945                 bio_put(bio);
2946         }
2947 }
2948 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2949
2950 /**
2951  * blk_rq_prep_clone - Helper function to setup clone request
2952  * @rq: the request to be setup
2953  * @rq_src: original request to be cloned
2954  * @bs: bio_set that bios for clone are allocated from
2955  * @gfp_mask: memory allocation mask for bio
2956  * @bio_ctr: setup function to be called for each clone bio.
2957  *           Returns %0 for success, non %0 for failure.
2958  * @data: private data to be passed to @bio_ctr
2959  *
2960  * Description:
2961  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2962  *     Also, pages which the original bios are pointing to are not copied
2963  *     and the cloned bios just point same pages.
2964  *     So cloned bios must be completed before original bios, which means
2965  *     the caller must complete @rq before @rq_src.
2966  */
2967 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2968                       struct bio_set *bs, gfp_t gfp_mask,
2969                       int (*bio_ctr)(struct bio *, struct bio *, void *),
2970                       void *data)
2971 {
2972         struct bio *bio, *bio_src;
2973
2974         if (!bs)
2975                 bs = &fs_bio_set;
2976
2977         __rq_for_each_bio(bio_src, rq_src) {
2978                 bio = bio_clone_fast(bio_src, gfp_mask, bs);
2979                 if (!bio)
2980                         goto free_and_out;
2981                 bio->bi_bdev = rq->q->disk->part0;
2982
2983                 if (bio_ctr && bio_ctr(bio, bio_src, data))
2984                         goto free_and_out;
2985
2986                 if (rq->bio) {
2987                         rq->biotail->bi_next = bio;
2988                         rq->biotail = bio;
2989                 } else {
2990                         rq->bio = rq->biotail = bio;
2991                 }
2992                 bio = NULL;
2993         }
2994
2995         /* Copy attributes of the original request to the clone request. */
2996         rq->__sector = blk_rq_pos(rq_src);
2997         rq->__data_len = blk_rq_bytes(rq_src);
2998         if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
2999                 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3000                 rq->special_vec = rq_src->special_vec;
3001         }
3002         rq->nr_phys_segments = rq_src->nr_phys_segments;
3003         rq->ioprio = rq_src->ioprio;
3004
3005         if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3006                 goto free_and_out;
3007
3008         return 0;
3009
3010 free_and_out:
3011         if (bio)
3012                 bio_put(bio);
3013         blk_rq_unprep_clone(rq);
3014
3015         return -ENOMEM;
3016 }
3017 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3018
3019 /*
3020  * Steal bios from a request and add them to a bio list.
3021  * The request must not have been partially completed before.
3022  */
3023 void blk_steal_bios(struct bio_list *list, struct request *rq)
3024 {
3025         if (rq->bio) {
3026                 if (list->tail)
3027                         list->tail->bi_next = rq->bio;
3028                 else
3029                         list->head = rq->bio;
3030                 list->tail = rq->biotail;
3031
3032                 rq->bio = NULL;
3033                 rq->biotail = NULL;
3034         }
3035
3036         rq->__data_len = 0;
3037 }
3038 EXPORT_SYMBOL_GPL(blk_steal_bios);
3039
3040 static size_t order_to_size(unsigned int order)
3041 {
3042         return (size_t)PAGE_SIZE << order;
3043 }
3044
3045 /* called before freeing request pool in @tags */
3046 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3047                                     struct blk_mq_tags *tags)
3048 {
3049         struct page *page;
3050         unsigned long flags;
3051
3052         /* There is no need to clear a driver tags own mapping */
3053         if (drv_tags == tags)
3054                 return;
3055
3056         list_for_each_entry(page, &tags->page_list, lru) {
3057                 unsigned long start = (unsigned long)page_address(page);
3058                 unsigned long end = start + order_to_size(page->private);
3059                 int i;
3060
3061                 for (i = 0; i < drv_tags->nr_tags; i++) {
3062                         struct request *rq = drv_tags->rqs[i];
3063                         unsigned long rq_addr = (unsigned long)rq;
3064
3065                         if (rq_addr >= start && rq_addr < end) {
3066                                 WARN_ON_ONCE(req_ref_read(rq) != 0);
3067                                 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3068                         }
3069                 }
3070         }
3071
3072         /*
3073          * Wait until all pending iteration is done.
3074          *
3075          * Request reference is cleared and it is guaranteed to be observed
3076          * after the ->lock is released.
3077          */
3078         spin_lock_irqsave(&drv_tags->lock, flags);
3079         spin_unlock_irqrestore(&drv_tags->lock, flags);
3080 }
3081
3082 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3083                      unsigned int hctx_idx)
3084 {
3085         struct blk_mq_tags *drv_tags;
3086         struct page *page;
3087
3088         if (blk_mq_is_shared_tags(set->flags))
3089                 drv_tags = set->shared_tags;
3090         else
3091                 drv_tags = set->tags[hctx_idx];
3092
3093         if (tags->static_rqs && set->ops->exit_request) {
3094                 int i;
3095
3096                 for (i = 0; i < tags->nr_tags; i++) {
3097                         struct request *rq = tags->static_rqs[i];
3098
3099                         if (!rq)
3100                                 continue;
3101                         set->ops->exit_request(set, rq, hctx_idx);
3102                         tags->static_rqs[i] = NULL;
3103                 }
3104         }
3105
3106         blk_mq_clear_rq_mapping(drv_tags, tags);
3107
3108         while (!list_empty(&tags->page_list)) {
3109                 page = list_first_entry(&tags->page_list, struct page, lru);
3110                 list_del_init(&page->lru);
3111                 /*
3112                  * Remove kmemleak object previously allocated in
3113                  * blk_mq_alloc_rqs().
3114                  */
3115                 kmemleak_free(page_address(page));
3116                 __free_pages(page, page->private);
3117         }
3118 }
3119
3120 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3121 {
3122         kfree(tags->rqs);
3123         tags->rqs = NULL;
3124         kfree(tags->static_rqs);
3125         tags->static_rqs = NULL;
3126
3127         blk_mq_free_tags(tags);
3128 }
3129
3130 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3131                                                unsigned int hctx_idx,
3132                                                unsigned int nr_tags,
3133                                                unsigned int reserved_tags)
3134 {
3135         struct blk_mq_tags *tags;
3136         int node;
3137
3138         node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
3139         if (node == NUMA_NO_NODE)
3140                 node = set->numa_node;
3141
3142         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3143                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3144         if (!tags)
3145                 return NULL;
3146
3147         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3148                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3149                                  node);
3150         if (!tags->rqs) {
3151                 blk_mq_free_tags(tags);
3152                 return NULL;
3153         }
3154
3155         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3156                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3157                                         node);
3158         if (!tags->static_rqs) {
3159                 kfree(tags->rqs);
3160                 blk_mq_free_tags(tags);
3161                 return NULL;
3162         }
3163
3164         return tags;
3165 }
3166
3167 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3168                                unsigned int hctx_idx, int node)
3169 {
3170         int ret;
3171
3172         if (set->ops->init_request) {
3173                 ret = set->ops->init_request(set, rq, hctx_idx, node);
3174                 if (ret)
3175                         return ret;
3176         }
3177
3178         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3179         return 0;
3180 }
3181
3182 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3183                             struct blk_mq_tags *tags,
3184                             unsigned int hctx_idx, unsigned int depth)
3185 {
3186         unsigned int i, j, entries_per_page, max_order = 4;
3187         size_t rq_size, left;
3188         int node;
3189
3190         node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
3191         if (node == NUMA_NO_NODE)
3192                 node = set->numa_node;
3193
3194         INIT_LIST_HEAD(&tags->page_list);
3195
3196         /*
3197          * rq_size is the size of the request plus driver payload, rounded
3198          * to the cacheline size
3199          */
3200         rq_size = round_up(sizeof(struct request) + set->cmd_size,
3201                                 cache_line_size());
3202         left = rq_size * depth;
3203
3204         for (i = 0; i < depth; ) {
3205                 int this_order = max_order;
3206                 struct page *page;
3207                 int to_do;
3208                 void *p;
3209
3210                 while (this_order && left < order_to_size(this_order - 1))
3211                         this_order--;
3212
3213                 do {
3214                         page = alloc_pages_node(node,
3215                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3216                                 this_order);
3217                         if (page)
3218                                 break;
3219                         if (!this_order--)
3220                                 break;
3221                         if (order_to_size(this_order) < rq_size)
3222                                 break;
3223                 } while (1);
3224
3225                 if (!page)
3226                         goto fail;
3227
3228                 page->private = this_order;
3229                 list_add_tail(&page->lru, &tags->page_list);
3230
3231                 p = page_address(page);
3232                 /*
3233                  * Allow kmemleak to scan these pages as they contain pointers
3234                  * to additional allocations like via ops->init_request().
3235                  */
3236                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3237                 entries_per_page = order_to_size(this_order) / rq_size;
3238                 to_do = min(entries_per_page, depth - i);
3239                 left -= to_do * rq_size;
3240                 for (j = 0; j < to_do; j++) {
3241                         struct request *rq = p;
3242
3243                         tags->static_rqs[i] = rq;
3244                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3245                                 tags->static_rqs[i] = NULL;
3246                                 goto fail;
3247                         }
3248
3249                         p += rq_size;
3250                         i++;
3251                 }
3252         }
3253         return 0;
3254
3255 fail:
3256         blk_mq_free_rqs(set, tags, hctx_idx);
3257         return -ENOMEM;
3258 }
3259
3260 struct rq_iter_data {
3261         struct blk_mq_hw_ctx *hctx;
3262         bool has_rq;
3263 };
3264
3265 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
3266 {
3267         struct rq_iter_data *iter_data = data;
3268
3269         if (rq->mq_hctx != iter_data->hctx)
3270                 return true;
3271         iter_data->has_rq = true;
3272         return false;
3273 }
3274
3275 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3276 {
3277         struct blk_mq_tags *tags = hctx->sched_tags ?
3278                         hctx->sched_tags : hctx->tags;
3279         struct rq_iter_data data = {
3280                 .hctx   = hctx,
3281         };
3282
3283         blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3284         return data.has_rq;
3285 }
3286
3287 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3288                 struct blk_mq_hw_ctx *hctx)
3289 {
3290         if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3291                 return false;
3292         if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3293                 return false;
3294         return true;
3295 }
3296
3297 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3298 {
3299         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3300                         struct blk_mq_hw_ctx, cpuhp_online);
3301
3302         if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3303             !blk_mq_last_cpu_in_hctx(cpu, hctx))
3304                 return 0;
3305
3306         /*
3307          * Prevent new request from being allocated on the current hctx.
3308          *
3309          * The smp_mb__after_atomic() Pairs with the implied barrier in
3310          * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
3311          * seen once we return from the tag allocator.
3312          */
3313         set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3314         smp_mb__after_atomic();
3315
3316         /*
3317          * Try to grab a reference to the queue and wait for any outstanding
3318          * requests.  If we could not grab a reference the queue has been
3319          * frozen and there are no requests.
3320          */
3321         if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3322                 while (blk_mq_hctx_has_requests(hctx))
3323                         msleep(5);
3324                 percpu_ref_put(&hctx->queue->q_usage_counter);
3325         }
3326
3327         return 0;
3328 }
3329
3330 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3331 {
3332         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3333                         struct blk_mq_hw_ctx, cpuhp_online);
3334
3335         if (cpumask_test_cpu(cpu, hctx->cpumask))
3336                 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3337         return 0;
3338 }
3339
3340 /*
3341  * 'cpu' is going away. splice any existing rq_list entries from this
3342  * software queue to the hw queue dispatch list, and ensure that it
3343  * gets run.
3344  */
3345 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3346 {
3347         struct blk_mq_hw_ctx *hctx;
3348         struct blk_mq_ctx *ctx;
3349         LIST_HEAD(tmp);
3350         enum hctx_type type;
3351
3352         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3353         if (!cpumask_test_cpu(cpu, hctx->cpumask))
3354                 return 0;
3355
3356         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3357         type = hctx->type;
3358
3359         spin_lock(&ctx->lock);
3360         if (!list_empty(&ctx->rq_lists[type])) {
3361                 list_splice_init(&ctx->rq_lists[type], &tmp);
3362                 blk_mq_hctx_clear_pending(hctx, ctx);
3363         }
3364         spin_unlock(&ctx->lock);
3365
3366         if (list_empty(&tmp))
3367                 return 0;
3368
3369         spin_lock(&hctx->lock);
3370         list_splice_tail_init(&tmp, &hctx->dispatch);
3371         spin_unlock(&hctx->lock);
3372
3373         blk_mq_run_hw_queue(hctx, true);
3374         return 0;
3375 }
3376
3377 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3378 {
3379         if (!(hctx->flags & BLK_MQ_F_STACKING))
3380                 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3381                                                     &hctx->cpuhp_online);
3382         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3383                                             &hctx->cpuhp_dead);
3384 }
3385
3386 /*
3387  * Before freeing hw queue, clearing the flush request reference in
3388  * tags->rqs[] for avoiding potential UAF.
3389  */
3390 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3391                 unsigned int queue_depth, struct request *flush_rq)
3392 {
3393         int i;
3394         unsigned long flags;
3395
3396         /* The hw queue may not be mapped yet */
3397         if (!tags)
3398                 return;
3399
3400         WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3401
3402         for (i = 0; i < queue_depth; i++)
3403                 cmpxchg(&tags->rqs[i], flush_rq, NULL);
3404
3405         /*
3406          * Wait until all pending iteration is done.
3407          *
3408          * Request reference is cleared and it is guaranteed to be observed
3409          * after the ->lock is released.
3410          */
3411         spin_lock_irqsave(&tags->lock, flags);
3412         spin_unlock_irqrestore(&tags->lock, flags);
3413 }
3414
3415 /* hctx->ctxs will be freed in queue's release handler */
3416 static void blk_mq_exit_hctx(struct request_queue *q,
3417                 struct blk_mq_tag_set *set,
3418                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3419 {
3420         struct request *flush_rq = hctx->fq->flush_rq;
3421
3422         if (blk_mq_hw_queue_mapped(hctx))
3423                 blk_mq_tag_idle(hctx);
3424
3425         blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3426                         set->queue_depth, flush_rq);
3427         if (set->ops->exit_request)
3428                 set->ops->exit_request(set, flush_rq, hctx_idx);
3429
3430         if (set->ops->exit_hctx)
3431                 set->ops->exit_hctx(hctx, hctx_idx);
3432
3433         blk_mq_remove_cpuhp(hctx);
3434
3435         spin_lock(&q->unused_hctx_lock);
3436         list_add(&hctx->hctx_list, &q->unused_hctx_list);
3437         spin_unlock(&q->unused_hctx_lock);
3438 }
3439
3440 static void blk_mq_exit_hw_queues(struct request_queue *q,
3441                 struct blk_mq_tag_set *set, int nr_queue)
3442 {
3443         struct blk_mq_hw_ctx *hctx;
3444         unsigned int i;
3445
3446         queue_for_each_hw_ctx(q, hctx, i) {
3447                 if (i == nr_queue)
3448                         break;
3449                 blk_mq_debugfs_unregister_hctx(hctx);
3450                 blk_mq_exit_hctx(q, set, hctx, i);
3451         }
3452 }
3453
3454 static int blk_mq_init_hctx(struct request_queue *q,
3455                 struct blk_mq_tag_set *set,
3456                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3457 {
3458         hctx->queue_num = hctx_idx;
3459
3460         if (!(hctx->flags & BLK_MQ_F_STACKING))
3461                 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3462                                 &hctx->cpuhp_online);
3463         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3464
3465         hctx->tags = set->tags[hctx_idx];
3466
3467         if (set->ops->init_hctx &&
3468             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3469                 goto unregister_cpu_notifier;
3470
3471         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3472                                 hctx->numa_node))
3473                 goto exit_hctx;
3474         return 0;
3475
3476  exit_hctx:
3477         if (set->ops->exit_hctx)
3478                 set->ops->exit_hctx(hctx, hctx_idx);
3479  unregister_cpu_notifier:
3480         blk_mq_remove_cpuhp(hctx);
3481         return -1;
3482 }
3483
3484 static struct blk_mq_hw_ctx *
3485 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3486                 int node)
3487 {
3488         struct blk_mq_hw_ctx *hctx;
3489         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3490
3491         hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3492         if (!hctx)
3493                 goto fail_alloc_hctx;
3494
3495         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3496                 goto free_hctx;
3497
3498         atomic_set(&hctx->nr_active, 0);
3499         if (node == NUMA_NO_NODE)
3500                 node = set->numa_node;
3501         hctx->numa_node = node;
3502
3503         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3504         spin_lock_init(&hctx->lock);
3505         INIT_LIST_HEAD(&hctx->dispatch);
3506         hctx->queue = q;
3507         hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3508
3509         INIT_LIST_HEAD(&hctx->hctx_list);
3510
3511         /*
3512          * Allocate space for all possible cpus to avoid allocation at
3513          * runtime
3514          */
3515         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3516                         gfp, node);
3517         if (!hctx->ctxs)
3518                 goto free_cpumask;
3519
3520         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3521                                 gfp, node, false, false))
3522                 goto free_ctxs;
3523         hctx->nr_ctx = 0;
3524
3525         spin_lock_init(&hctx->dispatch_wait_lock);
3526         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3527         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3528
3529         hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3530         if (!hctx->fq)
3531                 goto free_bitmap;
3532
3533         blk_mq_hctx_kobj_init(hctx);
3534
3535         return hctx;
3536
3537  free_bitmap:
3538         sbitmap_free(&hctx->ctx_map);
3539  free_ctxs:
3540         kfree(hctx->ctxs);
3541  free_cpumask:
3542         free_cpumask_var(hctx->cpumask);
3543  free_hctx:
3544         kfree(hctx);
3545  fail_alloc_hctx:
3546         return NULL;
3547 }
3548
3549 static void blk_mq_init_cpu_queues(struct request_queue *q,
3550                                    unsigned int nr_hw_queues)
3551 {
3552         struct blk_mq_tag_set *set = q->tag_set;
3553         unsigned int i, j;
3554
3555         for_each_possible_cpu(i) {
3556                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3557                 struct blk_mq_hw_ctx *hctx;
3558                 int k;
3559
3560                 __ctx->cpu = i;
3561                 spin_lock_init(&__ctx->lock);
3562                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3563                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3564
3565                 __ctx->queue = q;
3566
3567                 /*
3568                  * Set local node, IFF we have more than one hw queue. If
3569                  * not, we remain on the home node of the device
3570                  */
3571                 for (j = 0; j < set->nr_maps; j++) {
3572                         hctx = blk_mq_map_queue_type(q, j, i);
3573                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3574                                 hctx->numa_node = cpu_to_node(i);
3575                 }
3576         }
3577 }
3578
3579 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3580                                              unsigned int hctx_idx,
3581                                              unsigned int depth)
3582 {
3583         struct blk_mq_tags *tags;
3584         int ret;
3585
3586         tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3587         if (!tags)
3588                 return NULL;
3589
3590         ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3591         if (ret) {
3592                 blk_mq_free_rq_map(tags);
3593                 return NULL;
3594         }
3595
3596         return tags;
3597 }
3598
3599 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3600                                        int hctx_idx)
3601 {
3602         if (blk_mq_is_shared_tags(set->flags)) {
3603                 set->tags[hctx_idx] = set->shared_tags;
3604
3605                 return true;
3606         }
3607
3608         set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3609                                                        set->queue_depth);
3610
3611         return set->tags[hctx_idx];
3612 }
3613
3614 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3615                              struct blk_mq_tags *tags,
3616                              unsigned int hctx_idx)
3617 {
3618         if (tags) {
3619                 blk_mq_free_rqs(set, tags, hctx_idx);
3620                 blk_mq_free_rq_map(tags);
3621         }
3622 }
3623
3624 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3625                                       unsigned int hctx_idx)
3626 {
3627         if (!blk_mq_is_shared_tags(set->flags))
3628                 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3629
3630         set->tags[hctx_idx] = NULL;
3631 }
3632
3633 static void blk_mq_map_swqueue(struct request_queue *q)
3634 {
3635         unsigned int i, j, hctx_idx;
3636         struct blk_mq_hw_ctx *hctx;
3637         struct blk_mq_ctx *ctx;
3638         struct blk_mq_tag_set *set = q->tag_set;
3639
3640         queue_for_each_hw_ctx(q, hctx, i) {
3641                 cpumask_clear(hctx->cpumask);
3642                 hctx->nr_ctx = 0;
3643                 hctx->dispatch_from = NULL;
3644         }
3645
3646         /*
3647          * Map software to hardware queues.
3648          *
3649          * If the cpu isn't present, the cpu is mapped to first hctx.
3650          */
3651         for_each_possible_cpu(i) {
3652
3653                 ctx = per_cpu_ptr(q->queue_ctx, i);
3654                 for (j = 0; j < set->nr_maps; j++) {
3655                         if (!set->map[j].nr_queues) {
3656                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3657                                                 HCTX_TYPE_DEFAULT, i);
3658                                 continue;
3659                         }
3660                         hctx_idx = set->map[j].mq_map[i];
3661                         /* unmapped hw queue can be remapped after CPU topo changed */
3662                         if (!set->tags[hctx_idx] &&
3663                             !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3664                                 /*
3665                                  * If tags initialization fail for some hctx,
3666                                  * that hctx won't be brought online.  In this
3667                                  * case, remap the current ctx to hctx[0] which
3668                                  * is guaranteed to always have tags allocated
3669                                  */
3670                                 set->map[j].mq_map[i] = 0;
3671                         }
3672
3673                         hctx = blk_mq_map_queue_type(q, j, i);
3674                         ctx->hctxs[j] = hctx;
3675                         /*
3676                          * If the CPU is already set in the mask, then we've
3677                          * mapped this one already. This can happen if
3678                          * devices share queues across queue maps.
3679                          */
3680                         if (cpumask_test_cpu(i, hctx->cpumask))
3681                                 continue;
3682
3683                         cpumask_set_cpu(i, hctx->cpumask);
3684                         hctx->type = j;
3685                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
3686                         hctx->ctxs[hctx->nr_ctx++] = ctx;
3687
3688                         /*
3689                          * If the nr_ctx type overflows, we have exceeded the
3690                          * amount of sw queues we can support.
3691                          */
3692                         BUG_ON(!hctx->nr_ctx);
3693                 }
3694
3695                 for (; j < HCTX_MAX_TYPES; j++)
3696                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
3697                                         HCTX_TYPE_DEFAULT, i);
3698         }
3699
3700         queue_for_each_hw_ctx(q, hctx, i) {
3701                 /*
3702                  * If no software queues are mapped to this hardware queue,
3703                  * disable it and free the request entries.
3704                  */
3705                 if (!hctx->nr_ctx) {
3706                         /* Never unmap queue 0.  We need it as a
3707                          * fallback in case of a new remap fails
3708                          * allocation
3709                          */
3710                         if (i)
3711                                 __blk_mq_free_map_and_rqs(set, i);
3712
3713                         hctx->tags = NULL;
3714                         continue;
3715                 }
3716
3717                 hctx->tags = set->tags[i];
3718                 WARN_ON(!hctx->tags);
3719
3720                 /*
3721                  * Set the map size to the number of mapped software queues.
3722                  * This is more accurate and more efficient than looping
3723                  * over all possibly mapped software queues.
3724                  */
3725                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3726
3727                 /*
3728                  * Initialize batch roundrobin counts
3729                  */
3730                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3731                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3732         }
3733 }
3734
3735 /*
3736  * Caller needs to ensure that we're either frozen/quiesced, or that
3737  * the queue isn't live yet.
3738  */
3739 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3740 {
3741         struct blk_mq_hw_ctx *hctx;
3742         int i;
3743
3744         queue_for_each_hw_ctx(q, hctx, i) {
3745                 if (shared) {
3746                         hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3747                 } else {
3748                         blk_mq_tag_idle(hctx);
3749                         hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3750                 }
3751         }
3752 }
3753
3754 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3755                                          bool shared)
3756 {
3757         struct request_queue *q;
3758
3759         lockdep_assert_held(&set->tag_list_lock);
3760
3761         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3762                 blk_mq_freeze_queue(q);
3763                 queue_set_hctx_shared(q, shared);
3764                 blk_mq_unfreeze_queue(q);
3765         }
3766 }
3767
3768 static void blk_mq_del_queue_tag_set(struct request_queue *q)
3769 {
3770         struct blk_mq_tag_set *set = q->tag_set;
3771
3772         mutex_lock(&set->tag_list_lock);
3773         list_del(&q->tag_set_list);
3774         if (list_is_singular(&set->tag_list)) {
3775                 /* just transitioned to unshared */
3776                 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3777                 /* update existing queue */
3778                 blk_mq_update_tag_set_shared(set, false);
3779         }
3780         mutex_unlock(&set->tag_list_lock);
3781         INIT_LIST_HEAD(&q->tag_set_list);
3782 }
3783
3784 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
3785                                      struct request_queue *q)
3786 {
3787         mutex_lock(&set->tag_list_lock);
3788
3789         /*
3790          * Check to see if we're transitioning to shared (from 1 to 2 queues).
3791          */
3792         if (!list_empty(&set->tag_list) &&
3793             !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
3794                 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3795                 /* update existing queue */
3796                 blk_mq_update_tag_set_shared(set, true);
3797         }
3798         if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3799                 queue_set_hctx_shared(q, true);
3800         list_add_tail(&q->tag_set_list, &set->tag_list);
3801
3802         mutex_unlock(&set->tag_list_lock);
3803 }
3804
3805 /* All allocations will be freed in release handler of q->mq_kobj */
3806 static int blk_mq_alloc_ctxs(struct request_queue *q)
3807 {
3808         struct blk_mq_ctxs *ctxs;
3809         int cpu;
3810
3811         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
3812         if (!ctxs)
3813                 return -ENOMEM;
3814
3815         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
3816         if (!ctxs->queue_ctx)
3817                 goto fail;
3818
3819         for_each_possible_cpu(cpu) {
3820                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3821                 ctx->ctxs = ctxs;
3822         }
3823
3824         q->mq_kobj = &ctxs->kobj;
3825         q->queue_ctx = ctxs->queue_ctx;
3826
3827         return 0;
3828  fail:
3829         kfree(ctxs);
3830         return -ENOMEM;
3831 }
3832
3833 /*
3834  * It is the actual release handler for mq, but we do it from
3835  * request queue's release handler for avoiding use-after-free
3836  * and headache because q->mq_kobj shouldn't have been introduced,
3837  * but we can't group ctx/kctx kobj without it.
3838  */
3839 void blk_mq_release(struct request_queue *q)
3840 {
3841         struct blk_mq_hw_ctx *hctx, *next;
3842         int i;
3843
3844         queue_for_each_hw_ctx(q, hctx, i)
3845                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3846
3847         /* all hctx are in .unused_hctx_list now */
3848         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3849                 list_del_init(&hctx->hctx_list);
3850                 kobject_put(&hctx->kobj);
3851         }
3852
3853         kfree(q->queue_hw_ctx);
3854
3855         /*
3856          * release .mq_kobj and sw queue's kobject now because
3857          * both share lifetime with request queue.
3858          */
3859         blk_mq_sysfs_deinit(q);
3860 }
3861
3862 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3863                 void *queuedata)
3864 {
3865         struct request_queue *q;
3866         int ret;
3867
3868         q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
3869         if (!q)
3870                 return ERR_PTR(-ENOMEM);
3871         q->queuedata = queuedata;
3872         ret = blk_mq_init_allocated_queue(set, q);
3873         if (ret) {
3874                 blk_cleanup_queue(q);
3875                 return ERR_PTR(ret);
3876         }
3877         return q;
3878 }
3879
3880 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
3881 {
3882         return blk_mq_init_queue_data(set, NULL);
3883 }
3884 EXPORT_SYMBOL(blk_mq_init_queue);
3885
3886 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
3887                 struct lock_class_key *lkclass)
3888 {
3889         struct request_queue *q;
3890         struct gendisk *disk;
3891
3892         q = blk_mq_init_queue_data(set, queuedata);
3893         if (IS_ERR(q))
3894                 return ERR_CAST(q);
3895
3896         disk = __alloc_disk_node(q, set->numa_node, lkclass);
3897         if (!disk) {
3898                 blk_cleanup_queue(q);
3899                 return ERR_PTR(-ENOMEM);
3900         }
3901         return disk;
3902 }
3903 EXPORT_SYMBOL(__blk_mq_alloc_disk);
3904
3905 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
3906                 struct blk_mq_tag_set *set, struct request_queue *q,
3907                 int hctx_idx, int node)
3908 {
3909         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
3910
3911         /* reuse dead hctx first */
3912         spin_lock(&q->unused_hctx_lock);
3913         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
3914                 if (tmp->numa_node == node) {
3915                         hctx = tmp;
3916                         break;
3917                 }
3918         }
3919         if (hctx)
3920                 list_del_init(&hctx->hctx_list);
3921         spin_unlock(&q->unused_hctx_lock);
3922
3923         if (!hctx)
3924                 hctx = blk_mq_alloc_hctx(q, set, node);
3925         if (!hctx)
3926                 goto fail;
3927
3928         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
3929                 goto free_hctx;
3930
3931         return hctx;
3932
3933  free_hctx:
3934         kobject_put(&hctx->kobj);
3935  fail:
3936         return NULL;
3937 }
3938
3939 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
3940                                                 struct request_queue *q)
3941 {
3942         int i, j, end;
3943         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
3944
3945         if (q->nr_hw_queues < set->nr_hw_queues) {
3946                 struct blk_mq_hw_ctx **new_hctxs;
3947
3948                 new_hctxs = kcalloc_node(set->nr_hw_queues,
3949                                        sizeof(*new_hctxs), GFP_KERNEL,
3950                                        set->numa_node);
3951                 if (!new_hctxs)
3952                         return;
3953                 if (hctxs)
3954                         memcpy(new_hctxs, hctxs, q->nr_hw_queues *
3955                                sizeof(*hctxs));
3956                 q->queue_hw_ctx = new_hctxs;
3957                 kfree(hctxs);
3958                 hctxs = new_hctxs;
3959         }
3960
3961         /* protect against switching io scheduler  */
3962         mutex_lock(&q->sysfs_lock);
3963         for (i = 0; i < set->nr_hw_queues; i++) {
3964                 int node;
3965                 struct blk_mq_hw_ctx *hctx;
3966
3967                 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
3968                 /*
3969                  * If the hw queue has been mapped to another numa node,
3970                  * we need to realloc the hctx. If allocation fails, fallback
3971                  * to use the previous one.
3972                  */
3973                 if (hctxs[i] && (hctxs[i]->numa_node == node))
3974                         continue;
3975
3976                 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
3977                 if (hctx) {
3978                         if (hctxs[i])
3979                                 blk_mq_exit_hctx(q, set, hctxs[i], i);
3980                         hctxs[i] = hctx;
3981                 } else {
3982                         if (hctxs[i])
3983                                 pr_warn("Allocate new hctx on node %d fails,\
3984                                                 fallback to previous one on node %d\n",
3985                                                 node, hctxs[i]->numa_node);
3986                         else
3987                                 break;
3988                 }
3989         }
3990         /*
3991          * Increasing nr_hw_queues fails. Free the newly allocated
3992          * hctxs and keep the previous q->nr_hw_queues.
3993          */
3994         if (i != set->nr_hw_queues) {
3995                 j = q->nr_hw_queues;
3996                 end = i;
3997         } else {
3998                 j = i;
3999                 end = q->nr_hw_queues;
4000                 q->nr_hw_queues = set->nr_hw_queues;
4001         }
4002
4003         for (; j < end; j++) {
4004                 struct blk_mq_hw_ctx *hctx = hctxs[j];
4005
4006                 if (hctx) {
4007                         blk_mq_exit_hctx(q, set, hctx, j);
4008                         hctxs[j] = NULL;
4009                 }
4010         }
4011         mutex_unlock(&q->sysfs_lock);
4012 }
4013
4014 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4015                 struct request_queue *q)
4016 {
4017         WARN_ON_ONCE(blk_queue_has_srcu(q) !=
4018                         !!(set->flags & BLK_MQ_F_BLOCKING));
4019
4020         /* mark the queue as mq asap */
4021         q->mq_ops = set->ops;
4022
4023         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
4024                                              blk_mq_poll_stats_bkt,
4025                                              BLK_MQ_POLL_STATS_BKTS, q);
4026         if (!q->poll_cb)
4027                 goto err_exit;
4028
4029         if (blk_mq_alloc_ctxs(q))
4030                 goto err_poll;
4031
4032         /* init q->mq_kobj and sw queues' kobjects */
4033         blk_mq_sysfs_init(q);
4034
4035         INIT_LIST_HEAD(&q->unused_hctx_list);
4036         spin_lock_init(&q->unused_hctx_lock);
4037
4038         blk_mq_realloc_hw_ctxs(set, q);
4039         if (!q->nr_hw_queues)
4040                 goto err_hctxs;
4041
4042         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4043         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4044
4045         q->tag_set = set;
4046
4047         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4048         if (set->nr_maps > HCTX_TYPE_POLL &&
4049             set->map[HCTX_TYPE_POLL].nr_queues)
4050                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4051
4052         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4053         INIT_LIST_HEAD(&q->requeue_list);
4054         spin_lock_init(&q->requeue_lock);
4055
4056         q->nr_requests = set->queue_depth;
4057
4058         /*
4059          * Default to classic polling
4060          */
4061         q->poll_nsec = BLK_MQ_POLL_CLASSIC;
4062
4063         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4064         blk_mq_add_queue_tag_set(set, q);
4065         blk_mq_map_swqueue(q);
4066         return 0;
4067
4068 err_hctxs:
4069         kfree(q->queue_hw_ctx);
4070         q->nr_hw_queues = 0;
4071         blk_mq_sysfs_deinit(q);
4072 err_poll:
4073         blk_stat_free_callback(q->poll_cb);
4074         q->poll_cb = NULL;
4075 err_exit:
4076         q->mq_ops = NULL;
4077         return -ENOMEM;
4078 }
4079 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4080
4081 /* tags can _not_ be used after returning from blk_mq_exit_queue */
4082 void blk_mq_exit_queue(struct request_queue *q)
4083 {
4084         struct blk_mq_tag_set *set = q->tag_set;
4085
4086         /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4087         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4088         /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4089         blk_mq_del_queue_tag_set(q);
4090 }
4091
4092 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4093 {
4094         int i;
4095
4096         if (blk_mq_is_shared_tags(set->flags)) {
4097                 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4098                                                 BLK_MQ_NO_HCTX_IDX,
4099                                                 set->queue_depth);
4100                 if (!set->shared_tags)
4101                         return -ENOMEM;
4102         }
4103
4104         for (i = 0; i < set->nr_hw_queues; i++) {
4105                 if (!__blk_mq_alloc_map_and_rqs(set, i))
4106                         goto out_unwind;
4107                 cond_resched();
4108         }
4109
4110         return 0;
4111
4112 out_unwind:
4113         while (--i >= 0)
4114                 __blk_mq_free_map_and_rqs(set, i);
4115
4116         if (blk_mq_is_shared_tags(set->flags)) {
4117                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4118                                         BLK_MQ_NO_HCTX_IDX);
4119         }
4120
4121         return -ENOMEM;
4122 }
4123
4124 /*
4125  * Allocate the request maps associated with this tag_set. Note that this
4126  * may reduce the depth asked for, if memory is tight. set->queue_depth
4127  * will be updated to reflect the allocated depth.
4128  */
4129 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4130 {
4131         unsigned int depth;
4132         int err;
4133
4134         depth = set->queue_depth;
4135         do {
4136                 err = __blk_mq_alloc_rq_maps(set);
4137                 if (!err)
4138                         break;
4139
4140                 set->queue_depth >>= 1;
4141                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4142                         err = -ENOMEM;
4143                         break;
4144                 }
4145         } while (set->queue_depth);
4146
4147         if (!set->queue_depth || err) {
4148                 pr_err("blk-mq: failed to allocate request map\n");
4149                 return -ENOMEM;
4150         }
4151
4152         if (depth != set->queue_depth)
4153                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4154                                                 depth, set->queue_depth);
4155
4156         return 0;
4157 }
4158
4159 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4160 {
4161         /*
4162          * blk_mq_map_queues() and multiple .map_queues() implementations
4163          * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4164          * number of hardware queues.
4165          */
4166         if (set->nr_maps == 1)
4167                 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4168
4169         if (set->ops->map_queues && !is_kdump_kernel()) {
4170                 int i;
4171
4172                 /*
4173                  * transport .map_queues is usually done in the following
4174                  * way:
4175                  *
4176                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4177                  *      mask = get_cpu_mask(queue)
4178                  *      for_each_cpu(cpu, mask)
4179                  *              set->map[x].mq_map[cpu] = queue;
4180                  * }
4181                  *
4182                  * When we need to remap, the table has to be cleared for
4183                  * killing stale mapping since one CPU may not be mapped
4184                  * to any hw queue.
4185                  */
4186                 for (i = 0; i < set->nr_maps; i++)
4187                         blk_mq_clear_mq_map(&set->map[i]);
4188
4189                 return set->ops->map_queues(set);
4190         } else {
4191                 BUG_ON(set->nr_maps > 1);
4192                 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4193         }
4194 }
4195
4196 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4197                                   int cur_nr_hw_queues, int new_nr_hw_queues)
4198 {
4199         struct blk_mq_tags **new_tags;
4200
4201         if (cur_nr_hw_queues >= new_nr_hw_queues)
4202                 return 0;
4203
4204         new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4205                                 GFP_KERNEL, set->numa_node);
4206         if (!new_tags)
4207                 return -ENOMEM;
4208
4209         if (set->tags)
4210                 memcpy(new_tags, set->tags, cur_nr_hw_queues *
4211                        sizeof(*set->tags));
4212         kfree(set->tags);
4213         set->tags = new_tags;
4214         set->nr_hw_queues = new_nr_hw_queues;
4215
4216         return 0;
4217 }
4218
4219 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
4220                                 int new_nr_hw_queues)
4221 {
4222         return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
4223 }
4224
4225 /*
4226  * Alloc a tag set to be associated with one or more request queues.
4227  * May fail with EINVAL for various error conditions. May adjust the
4228  * requested depth down, if it's too large. In that case, the set
4229  * value will be stored in set->queue_depth.
4230  */
4231 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4232 {
4233         int i, ret;
4234
4235         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4236
4237         if (!set->nr_hw_queues)
4238                 return -EINVAL;
4239         if (!set->queue_depth)
4240                 return -EINVAL;
4241         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4242                 return -EINVAL;
4243
4244         if (!set->ops->queue_rq)
4245                 return -EINVAL;
4246
4247         if (!set->ops->get_budget ^ !set->ops->put_budget)
4248                 return -EINVAL;
4249
4250         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4251                 pr_info("blk-mq: reduced tag depth to %u\n",
4252                         BLK_MQ_MAX_DEPTH);
4253                 set->queue_depth = BLK_MQ_MAX_DEPTH;
4254         }
4255
4256         if (!set->nr_maps)
4257                 set->nr_maps = 1;
4258         else if (set->nr_maps > HCTX_MAX_TYPES)
4259                 return -EINVAL;
4260
4261         /*
4262          * If a crashdump is active, then we are potentially in a very
4263          * memory constrained environment. Limit us to 1 queue and
4264          * 64 tags to prevent using too much memory.
4265          */
4266         if (is_kdump_kernel()) {
4267                 set->nr_hw_queues = 1;
4268                 set->nr_maps = 1;
4269                 set->queue_depth = min(64U, set->queue_depth);
4270         }
4271         /*
4272          * There is no use for more h/w queues than cpus if we just have
4273          * a single map
4274          */
4275         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4276                 set->nr_hw_queues = nr_cpu_ids;
4277
4278         if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
4279                 return -ENOMEM;
4280
4281         ret = -ENOMEM;
4282         for (i = 0; i < set->nr_maps; i++) {
4283                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4284                                                   sizeof(set->map[i].mq_map[0]),
4285                                                   GFP_KERNEL, set->numa_node);
4286                 if (!set->map[i].mq_map)
4287                         goto out_free_mq_map;
4288                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4289         }
4290
4291         ret = blk_mq_update_queue_map(set);
4292         if (ret)
4293                 goto out_free_mq_map;
4294
4295         ret = blk_mq_alloc_set_map_and_rqs(set);
4296         if (ret)
4297                 goto out_free_mq_map;
4298
4299         mutex_init(&set->tag_list_lock);
4300         INIT_LIST_HEAD(&set->tag_list);
4301
4302         return 0;
4303
4304 out_free_mq_map:
4305         for (i = 0; i < set->nr_maps; i++) {
4306                 kfree(set->map[i].mq_map);
4307                 set->map[i].mq_map = NULL;
4308         }
4309         kfree(set->tags);
4310         set->tags = NULL;
4311         return ret;
4312 }
4313 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4314
4315 /* allocate and initialize a tagset for a simple single-queue device */
4316 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4317                 const struct blk_mq_ops *ops, unsigned int queue_depth,
4318                 unsigned int set_flags)
4319 {
4320         memset(set, 0, sizeof(*set));
4321         set->ops = ops;
4322         set->nr_hw_queues = 1;
4323         set->nr_maps = 1;
4324         set->queue_depth = queue_depth;
4325         set->numa_node = NUMA_NO_NODE;
4326         set->flags = set_flags;
4327         return blk_mq_alloc_tag_set(set);
4328 }
4329 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4330
4331 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4332 {
4333         int i, j;
4334
4335         for (i = 0; i < set->nr_hw_queues; i++)
4336                 __blk_mq_free_map_and_rqs(set, i);
4337
4338         if (blk_mq_is_shared_tags(set->flags)) {
4339                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4340                                         BLK_MQ_NO_HCTX_IDX);
4341         }
4342
4343         for (j = 0; j < set->nr_maps; j++) {
4344                 kfree(set->map[j].mq_map);
4345                 set->map[j].mq_map = NULL;
4346         }
4347
4348         kfree(set->tags);
4349         set->tags = NULL;
4350 }
4351 EXPORT_SYMBOL(blk_mq_free_tag_set);
4352
4353 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4354 {
4355         struct blk_mq_tag_set *set = q->tag_set;
4356         struct blk_mq_hw_ctx *hctx;
4357         int i, ret;
4358
4359         if (!set)
4360                 return -EINVAL;
4361
4362         if (q->nr_requests == nr)
4363                 return 0;
4364
4365         blk_mq_freeze_queue(q);
4366         blk_mq_quiesce_queue(q);
4367
4368         ret = 0;
4369         queue_for_each_hw_ctx(q, hctx, i) {
4370                 if (!hctx->tags)
4371                         continue;
4372                 /*
4373                  * If we're using an MQ scheduler, just update the scheduler
4374                  * queue depth. This is similar to what the old code would do.
4375                  */
4376                 if (hctx->sched_tags) {
4377                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4378                                                       nr, true);
4379                 } else {
4380                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4381                                                       false);
4382                 }
4383                 if (ret)
4384                         break;
4385                 if (q->elevator && q->elevator->type->ops.depth_updated)
4386                         q->elevator->type->ops.depth_updated(hctx);
4387         }
4388         if (!ret) {
4389                 q->nr_requests = nr;
4390                 if (blk_mq_is_shared_tags(set->flags)) {
4391                         if (q->elevator)
4392                                 blk_mq_tag_update_sched_shared_tags(q);
4393                         else
4394                                 blk_mq_tag_resize_shared_tags(set, nr);
4395                 }
4396         }
4397
4398         blk_mq_unquiesce_queue(q);
4399         blk_mq_unfreeze_queue(q);
4400
4401         return ret;
4402 }
4403
4404 /*
4405  * request_queue and elevator_type pair.
4406  * It is just used by __blk_mq_update_nr_hw_queues to cache
4407  * the elevator_type associated with a request_queue.
4408  */
4409 struct blk_mq_qe_pair {
4410         struct list_head node;
4411         struct request_queue *q;
4412         struct elevator_type *type;
4413 };
4414
4415 /*
4416  * Cache the elevator_type in qe pair list and switch the
4417  * io scheduler to 'none'
4418  */
4419 static bool blk_mq_elv_switch_none(struct list_head *head,
4420                 struct request_queue *q)
4421 {
4422         struct blk_mq_qe_pair *qe;
4423
4424         if (!q->elevator)
4425                 return true;
4426
4427         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4428         if (!qe)
4429                 return false;
4430
4431         INIT_LIST_HEAD(&qe->node);
4432         qe->q = q;
4433         qe->type = q->elevator->type;
4434         list_add(&qe->node, head);
4435
4436         mutex_lock(&q->sysfs_lock);
4437         /*
4438          * After elevator_switch_mq, the previous elevator_queue will be
4439          * released by elevator_release. The reference of the io scheduler
4440          * module get by elevator_get will also be put. So we need to get
4441          * a reference of the io scheduler module here to prevent it to be
4442          * removed.
4443          */
4444         __module_get(qe->type->elevator_owner);
4445         elevator_switch_mq(q, NULL);
4446         mutex_unlock(&q->sysfs_lock);
4447
4448         return true;
4449 }
4450
4451 static void blk_mq_elv_switch_back(struct list_head *head,
4452                 struct request_queue *q)
4453 {
4454         struct blk_mq_qe_pair *qe;
4455         struct elevator_type *t = NULL;
4456
4457         list_for_each_entry(qe, head, node)
4458                 if (qe->q == q) {
4459                         t = qe->type;
4460                         break;
4461                 }
4462
4463         if (!t)
4464                 return;
4465
4466         list_del(&qe->node);
4467         kfree(qe);
4468
4469         mutex_lock(&q->sysfs_lock);
4470         elevator_switch_mq(q, t);
4471         mutex_unlock(&q->sysfs_lock);
4472 }
4473
4474 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4475                                                         int nr_hw_queues)
4476 {
4477         struct request_queue *q;
4478         LIST_HEAD(head);
4479         int prev_nr_hw_queues;
4480
4481         lockdep_assert_held(&set->tag_list_lock);
4482
4483         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4484                 nr_hw_queues = nr_cpu_ids;
4485         if (nr_hw_queues < 1)
4486                 return;
4487         if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4488                 return;
4489
4490         list_for_each_entry(q, &set->tag_list, tag_set_list)
4491                 blk_mq_freeze_queue(q);
4492         /*
4493          * Switch IO scheduler to 'none', cleaning up the data associated
4494          * with the previous scheduler. We will switch back once we are done
4495          * updating the new sw to hw queue mappings.
4496          */
4497         list_for_each_entry(q, &set->tag_list, tag_set_list)
4498                 if (!blk_mq_elv_switch_none(&head, q))
4499                         goto switch_back;
4500
4501         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4502                 blk_mq_debugfs_unregister_hctxs(q);
4503                 blk_mq_sysfs_unregister(q);
4504         }
4505
4506         prev_nr_hw_queues = set->nr_hw_queues;
4507         if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
4508             0)
4509                 goto reregister;
4510
4511         set->nr_hw_queues = nr_hw_queues;
4512 fallback:
4513         blk_mq_update_queue_map(set);
4514         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4515                 blk_mq_realloc_hw_ctxs(set, q);
4516                 if (q->nr_hw_queues != set->nr_hw_queues) {
4517                         int i = prev_nr_hw_queues;
4518
4519                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4520                                         nr_hw_queues, prev_nr_hw_queues);
4521                         for (; i < set->nr_hw_queues; i++)
4522                                 __blk_mq_free_map_and_rqs(set, i);
4523
4524                         set->nr_hw_queues = prev_nr_hw_queues;
4525                         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4526                         goto fallback;
4527                 }
4528                 blk_mq_map_swqueue(q);
4529         }
4530
4531 reregister:
4532         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4533                 blk_mq_sysfs_register(q);
4534                 blk_mq_debugfs_register_hctxs(q);
4535         }
4536
4537 switch_back:
4538         list_for_each_entry(q, &set->tag_list, tag_set_list)
4539                 blk_mq_elv_switch_back(&head, q);
4540
4541         list_for_each_entry(q, &set->tag_list, tag_set_list)
4542                 blk_mq_unfreeze_queue(q);
4543 }
4544
4545 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4546 {
4547         mutex_lock(&set->tag_list_lock);
4548         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4549         mutex_unlock(&set->tag_list_lock);
4550 }
4551 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4552
4553 /* Enable polling stats and return whether they were already enabled. */
4554 static bool blk_poll_stats_enable(struct request_queue *q)
4555 {
4556         if (q->poll_stat)
4557                 return true;
4558
4559         return blk_stats_alloc_enable(q);
4560 }
4561
4562 static void blk_mq_poll_stats_start(struct request_queue *q)
4563 {
4564         /*
4565          * We don't arm the callback if polling stats are not enabled or the
4566          * callback is already active.
4567          */
4568         if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
4569                 return;
4570
4571         blk_stat_activate_msecs(q->poll_cb, 100);
4572 }
4573
4574 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
4575 {
4576         struct request_queue *q = cb->data;
4577         int bucket;
4578
4579         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
4580                 if (cb->stat[bucket].nr_samples)
4581                         q->poll_stat[bucket] = cb->stat[bucket];
4582         }
4583 }
4584
4585 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
4586                                        struct request *rq)
4587 {
4588         unsigned long ret = 0;
4589         int bucket;
4590
4591         /*
4592          * If stats collection isn't on, don't sleep but turn it on for
4593          * future users
4594          */
4595         if (!blk_poll_stats_enable(q))
4596                 return 0;
4597
4598         /*
4599          * As an optimistic guess, use half of the mean service time
4600          * for this type of request. We can (and should) make this smarter.
4601          * For instance, if the completion latencies are tight, we can
4602          * get closer than just half the mean. This is especially
4603          * important on devices where the completion latencies are longer
4604          * than ~10 usec. We do use the stats for the relevant IO size
4605          * if available which does lead to better estimates.
4606          */
4607         bucket = blk_mq_poll_stats_bkt(rq);
4608         if (bucket < 0)
4609                 return ret;
4610
4611         if (q->poll_stat[bucket].nr_samples)
4612                 ret = (q->poll_stat[bucket].mean + 1) / 2;
4613
4614         return ret;
4615 }
4616
4617 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
4618 {
4619         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
4620         struct request *rq = blk_qc_to_rq(hctx, qc);
4621         struct hrtimer_sleeper hs;
4622         enum hrtimer_mode mode;
4623         unsigned int nsecs;
4624         ktime_t kt;
4625
4626         /*
4627          * If a request has completed on queue that uses an I/O scheduler, we
4628          * won't get back a request from blk_qc_to_rq.
4629          */
4630         if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
4631                 return false;
4632
4633         /*
4634          * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
4635          *
4636          *  0:  use half of prev avg
4637          * >0:  use this specific value
4638          */
4639         if (q->poll_nsec > 0)
4640                 nsecs = q->poll_nsec;
4641         else
4642                 nsecs = blk_mq_poll_nsecs(q, rq);
4643
4644         if (!nsecs)
4645                 return false;
4646
4647         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
4648
4649         /*
4650          * This will be replaced with the stats tracking code, using
4651          * 'avg_completion_time / 2' as the pre-sleep target.
4652          */
4653         kt = nsecs;
4654
4655         mode = HRTIMER_MODE_REL;
4656         hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
4657         hrtimer_set_expires(&hs.timer, kt);
4658
4659         do {
4660                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
4661                         break;
4662                 set_current_state(TASK_UNINTERRUPTIBLE);
4663                 hrtimer_sleeper_start_expires(&hs, mode);
4664                 if (hs.task)
4665                         io_schedule();
4666                 hrtimer_cancel(&hs.timer);
4667                 mode = HRTIMER_MODE_ABS;
4668         } while (hs.task && !signal_pending(current));
4669
4670         __set_current_state(TASK_RUNNING);
4671         destroy_hrtimer_on_stack(&hs.timer);
4672
4673         /*
4674          * If we sleep, have the caller restart the poll loop to reset the
4675          * state.  Like for the other success return cases, the caller is
4676          * responsible for checking if the IO completed.  If the IO isn't
4677          * complete, we'll get called again and will go straight to the busy
4678          * poll loop.
4679          */
4680         return true;
4681 }
4682
4683 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
4684                                struct io_comp_batch *iob, unsigned int flags)
4685 {
4686         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
4687         long state = get_current_state();
4688         int ret;
4689
4690         do {
4691                 ret = q->mq_ops->poll(hctx, iob);
4692                 if (ret > 0) {
4693                         __set_current_state(TASK_RUNNING);
4694                         return ret;
4695                 }
4696
4697                 if (signal_pending_state(state, current))
4698                         __set_current_state(TASK_RUNNING);
4699                 if (task_is_running(current))
4700                         return 1;
4701
4702                 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4703                         break;
4704                 cpu_relax();
4705         } while (!need_resched());
4706
4707         __set_current_state(TASK_RUNNING);
4708         return 0;
4709 }
4710
4711 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
4712                 unsigned int flags)
4713 {
4714         if (!(flags & BLK_POLL_NOSLEEP) &&
4715             q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
4716                 if (blk_mq_poll_hybrid(q, cookie))
4717                         return 1;
4718         }
4719         return blk_mq_poll_classic(q, cookie, iob, flags);
4720 }
4721
4722 unsigned int blk_mq_rq_cpu(struct request *rq)
4723 {
4724         return rq->mq_ctx->cpu;
4725 }
4726 EXPORT_SYMBOL(blk_mq_rq_cpu);
4727
4728 void blk_mq_cancel_work_sync(struct request_queue *q)
4729 {
4730         if (queue_is_mq(q)) {
4731                 struct blk_mq_hw_ctx *hctx;
4732                 int i;
4733
4734                 cancel_delayed_work_sync(&q->requeue_work);
4735
4736                 queue_for_each_hw_ctx(q, hctx, i)
4737                         cancel_delayed_work_sync(&hctx->run_work);
4738         }
4739 }
4740
4741 static int __init blk_mq_init(void)
4742 {
4743         int i;
4744
4745         for_each_possible_cpu(i)
4746                 init_llist_head(&per_cpu(blk_cpu_done, i));
4747         open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4748
4749         cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4750                                   "block/softirq:dead", NULL,
4751                                   blk_softirq_cpu_dead);
4752         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4753                                 blk_mq_hctx_notify_dead);
4754         cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4755                                 blk_mq_hctx_notify_online,
4756                                 blk_mq_hctx_notify_offline);
4757         return 0;
4758 }
4759 subsys_initcall(blk_mq_init);