blk-mq: make blk_abort_request() trigger timeout path
[linux-2.6-microblaze.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28
29 #include <trace/events/block.h>
30
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-stat.h"
37 #include "blk-wbt.h"
38 #include "blk-mq-sched.h"
39
40 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
41 static void blk_mq_poll_stats_start(struct request_queue *q);
42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43
44 static int blk_mq_poll_stats_bkt(const struct request *rq)
45 {
46         int ddir, bytes, bucket;
47
48         ddir = rq_data_dir(rq);
49         bytes = blk_rq_bytes(rq);
50
51         bucket = ddir + 2*(ilog2(bytes) - 9);
52
53         if (bucket < 0)
54                 return -1;
55         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57
58         return bucket;
59 }
60
61 /*
62  * Check if any of the ctx's have pending work in this hardware queue
63  */
64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
65 {
66         return !list_empty_careful(&hctx->dispatch) ||
67                 sbitmap_any_bit_set(&hctx->ctx_map) ||
68                         blk_mq_sched_has_work(hctx);
69 }
70
71 /*
72  * Mark this ctx as having pending work in this hardware queue
73  */
74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
75                                      struct blk_mq_ctx *ctx)
76 {
77         if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
78                 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
79 }
80
81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82                                       struct blk_mq_ctx *ctx)
83 {
84         sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
85 }
86
87 struct mq_inflight {
88         struct hd_struct *part;
89         unsigned int *inflight;
90 };
91
92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
93                                   struct request *rq, void *priv,
94                                   bool reserved)
95 {
96         struct mq_inflight *mi = priv;
97
98         if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
99                 /*
100                  * index[0] counts the specific partition that was asked
101                  * for. index[1] counts the ones that are active on the
102                  * whole device, so increment that if mi->part is indeed
103                  * a partition, and not a whole device.
104                  */
105                 if (rq->part == mi->part)
106                         mi->inflight[0]++;
107                 if (mi->part->partno)
108                         mi->inflight[1]++;
109         }
110 }
111
112 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
113                       unsigned int inflight[2])
114 {
115         struct mq_inflight mi = { .part = part, .inflight = inflight, };
116
117         inflight[0] = inflight[1] = 0;
118         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119 }
120
121 void blk_freeze_queue_start(struct request_queue *q)
122 {
123         int freeze_depth;
124
125         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
126         if (freeze_depth == 1) {
127                 percpu_ref_kill(&q->q_usage_counter);
128                 if (q->mq_ops)
129                         blk_mq_run_hw_queues(q, false);
130         }
131 }
132 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
133
134 void blk_mq_freeze_queue_wait(struct request_queue *q)
135 {
136         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
137 }
138 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
139
140 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
141                                      unsigned long timeout)
142 {
143         return wait_event_timeout(q->mq_freeze_wq,
144                                         percpu_ref_is_zero(&q->q_usage_counter),
145                                         timeout);
146 }
147 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
148
149 /*
150  * Guarantee no request is in use, so we can change any data structure of
151  * the queue afterward.
152  */
153 void blk_freeze_queue(struct request_queue *q)
154 {
155         /*
156          * In the !blk_mq case we are only calling this to kill the
157          * q_usage_counter, otherwise this increases the freeze depth
158          * and waits for it to return to zero.  For this reason there is
159          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
160          * exported to drivers as the only user for unfreeze is blk_mq.
161          */
162         blk_freeze_queue_start(q);
163         blk_mq_freeze_queue_wait(q);
164 }
165
166 void blk_mq_freeze_queue(struct request_queue *q)
167 {
168         /*
169          * ...just an alias to keep freeze and unfreeze actions balanced
170          * in the blk_mq_* namespace
171          */
172         blk_freeze_queue(q);
173 }
174 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
175
176 void blk_mq_unfreeze_queue(struct request_queue *q)
177 {
178         int freeze_depth;
179
180         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
181         WARN_ON_ONCE(freeze_depth < 0);
182         if (!freeze_depth) {
183                 percpu_ref_reinit(&q->q_usage_counter);
184                 wake_up_all(&q->mq_freeze_wq);
185         }
186 }
187 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
188
189 /*
190  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
191  * mpt3sas driver such that this function can be removed.
192  */
193 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
194 {
195         unsigned long flags;
196
197         spin_lock_irqsave(q->queue_lock, flags);
198         queue_flag_set(QUEUE_FLAG_QUIESCED, q);
199         spin_unlock_irqrestore(q->queue_lock, flags);
200 }
201 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
202
203 /**
204  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
205  * @q: request queue.
206  *
207  * Note: this function does not prevent that the struct request end_io()
208  * callback function is invoked. Once this function is returned, we make
209  * sure no dispatch can happen until the queue is unquiesced via
210  * blk_mq_unquiesce_queue().
211  */
212 void blk_mq_quiesce_queue(struct request_queue *q)
213 {
214         struct blk_mq_hw_ctx *hctx;
215         unsigned int i;
216         bool rcu = false;
217
218         blk_mq_quiesce_queue_nowait(q);
219
220         queue_for_each_hw_ctx(q, hctx, i) {
221                 if (hctx->flags & BLK_MQ_F_BLOCKING)
222                         synchronize_srcu(hctx->queue_rq_srcu);
223                 else
224                         rcu = true;
225         }
226         if (rcu)
227                 synchronize_rcu();
228 }
229 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
230
231 /*
232  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
233  * @q: request queue.
234  *
235  * This function recovers queue into the state before quiescing
236  * which is done by blk_mq_quiesce_queue.
237  */
238 void blk_mq_unquiesce_queue(struct request_queue *q)
239 {
240         unsigned long flags;
241
242         spin_lock_irqsave(q->queue_lock, flags);
243         queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
244         spin_unlock_irqrestore(q->queue_lock, flags);
245
246         /* dispatch requests which are inserted during quiescing */
247         blk_mq_run_hw_queues(q, true);
248 }
249 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
250
251 void blk_mq_wake_waiters(struct request_queue *q)
252 {
253         struct blk_mq_hw_ctx *hctx;
254         unsigned int i;
255
256         queue_for_each_hw_ctx(q, hctx, i)
257                 if (blk_mq_hw_queue_mapped(hctx))
258                         blk_mq_tag_wakeup_all(hctx->tags, true);
259 }
260
261 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
262 {
263         return blk_mq_has_free_tags(hctx->tags);
264 }
265 EXPORT_SYMBOL(blk_mq_can_queue);
266
267 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
268                 unsigned int tag, unsigned int op)
269 {
270         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
271         struct request *rq = tags->static_rqs[tag];
272
273         rq->rq_flags = 0;
274
275         if (data->flags & BLK_MQ_REQ_INTERNAL) {
276                 rq->tag = -1;
277                 rq->internal_tag = tag;
278         } else {
279                 if (blk_mq_tag_busy(data->hctx)) {
280                         rq->rq_flags = RQF_MQ_INFLIGHT;
281                         atomic_inc(&data->hctx->nr_active);
282                 }
283                 rq->tag = tag;
284                 rq->internal_tag = -1;
285                 data->hctx->tags->rqs[rq->tag] = rq;
286         }
287
288         INIT_LIST_HEAD(&rq->queuelist);
289         /* csd/requeue_work/fifo_time is initialized before use */
290         rq->q = data->q;
291         rq->mq_ctx = data->ctx;
292         rq->cmd_flags = op;
293         if (data->flags & BLK_MQ_REQ_PREEMPT)
294                 rq->rq_flags |= RQF_PREEMPT;
295         if (blk_queue_io_stat(data->q))
296                 rq->rq_flags |= RQF_IO_STAT;
297         /* do not touch atomic flags, it needs atomic ops against the timer */
298         rq->cpu = -1;
299         INIT_HLIST_NODE(&rq->hash);
300         RB_CLEAR_NODE(&rq->rb_node);
301         rq->rq_disk = NULL;
302         rq->part = NULL;
303         rq->start_time = jiffies;
304 #ifdef CONFIG_BLK_CGROUP
305         rq->rl = NULL;
306         set_start_time_ns(rq);
307         rq->io_start_time_ns = 0;
308 #endif
309         rq->nr_phys_segments = 0;
310 #if defined(CONFIG_BLK_DEV_INTEGRITY)
311         rq->nr_integrity_segments = 0;
312 #endif
313         rq->special = NULL;
314         /* tag was already set */
315         rq->extra_len = 0;
316
317         INIT_LIST_HEAD(&rq->timeout_list);
318         rq->timeout = 0;
319
320         rq->end_io = NULL;
321         rq->end_io_data = NULL;
322         rq->next_rq = NULL;
323
324         data->ctx->rq_dispatched[op_is_sync(op)]++;
325         return rq;
326 }
327
328 static struct request *blk_mq_get_request(struct request_queue *q,
329                 struct bio *bio, unsigned int op,
330                 struct blk_mq_alloc_data *data)
331 {
332         struct elevator_queue *e = q->elevator;
333         struct request *rq;
334         unsigned int tag;
335         bool put_ctx_on_error = false;
336
337         blk_queue_enter_live(q);
338         data->q = q;
339         if (likely(!data->ctx)) {
340                 data->ctx = blk_mq_get_ctx(q);
341                 put_ctx_on_error = true;
342         }
343         if (likely(!data->hctx))
344                 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
345         if (op & REQ_NOWAIT)
346                 data->flags |= BLK_MQ_REQ_NOWAIT;
347
348         if (e) {
349                 data->flags |= BLK_MQ_REQ_INTERNAL;
350
351                 /*
352                  * Flush requests are special and go directly to the
353                  * dispatch list.
354                  */
355                 if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
356                         e->type->ops.mq.limit_depth(op, data);
357         }
358
359         tag = blk_mq_get_tag(data);
360         if (tag == BLK_MQ_TAG_FAIL) {
361                 if (put_ctx_on_error) {
362                         blk_mq_put_ctx(data->ctx);
363                         data->ctx = NULL;
364                 }
365                 blk_queue_exit(q);
366                 return NULL;
367         }
368
369         rq = blk_mq_rq_ctx_init(data, tag, op);
370         if (!op_is_flush(op)) {
371                 rq->elv.icq = NULL;
372                 if (e && e->type->ops.mq.prepare_request) {
373                         if (e->type->icq_cache && rq_ioc(bio))
374                                 blk_mq_sched_assign_ioc(rq, bio);
375
376                         e->type->ops.mq.prepare_request(rq, bio);
377                         rq->rq_flags |= RQF_ELVPRIV;
378                 }
379         }
380         data->hctx->queued++;
381         return rq;
382 }
383
384 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
385                 blk_mq_req_flags_t flags)
386 {
387         struct blk_mq_alloc_data alloc_data = { .flags = flags };
388         struct request *rq;
389         int ret;
390
391         ret = blk_queue_enter(q, flags);
392         if (ret)
393                 return ERR_PTR(ret);
394
395         rq = blk_mq_get_request(q, NULL, op, &alloc_data);
396         blk_queue_exit(q);
397
398         if (!rq)
399                 return ERR_PTR(-EWOULDBLOCK);
400
401         blk_mq_put_ctx(alloc_data.ctx);
402
403         rq->__data_len = 0;
404         rq->__sector = (sector_t) -1;
405         rq->bio = rq->biotail = NULL;
406         return rq;
407 }
408 EXPORT_SYMBOL(blk_mq_alloc_request);
409
410 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
411         unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
412 {
413         struct blk_mq_alloc_data alloc_data = { .flags = flags };
414         struct request *rq;
415         unsigned int cpu;
416         int ret;
417
418         /*
419          * If the tag allocator sleeps we could get an allocation for a
420          * different hardware context.  No need to complicate the low level
421          * allocator for this for the rare use case of a command tied to
422          * a specific queue.
423          */
424         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
425                 return ERR_PTR(-EINVAL);
426
427         if (hctx_idx >= q->nr_hw_queues)
428                 return ERR_PTR(-EIO);
429
430         ret = blk_queue_enter(q, flags);
431         if (ret)
432                 return ERR_PTR(ret);
433
434         /*
435          * Check if the hardware context is actually mapped to anything.
436          * If not tell the caller that it should skip this queue.
437          */
438         alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
439         if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
440                 blk_queue_exit(q);
441                 return ERR_PTR(-EXDEV);
442         }
443         cpu = cpumask_first(alloc_data.hctx->cpumask);
444         alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
445
446         rq = blk_mq_get_request(q, NULL, op, &alloc_data);
447         blk_queue_exit(q);
448
449         if (!rq)
450                 return ERR_PTR(-EWOULDBLOCK);
451
452         return rq;
453 }
454 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
455
456 void blk_mq_free_request(struct request *rq)
457 {
458         struct request_queue *q = rq->q;
459         struct elevator_queue *e = q->elevator;
460         struct blk_mq_ctx *ctx = rq->mq_ctx;
461         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
462         const int sched_tag = rq->internal_tag;
463
464         if (rq->rq_flags & RQF_ELVPRIV) {
465                 if (e && e->type->ops.mq.finish_request)
466                         e->type->ops.mq.finish_request(rq);
467                 if (rq->elv.icq) {
468                         put_io_context(rq->elv.icq->ioc);
469                         rq->elv.icq = NULL;
470                 }
471         }
472
473         ctx->rq_completed[rq_is_sync(rq)]++;
474         if (rq->rq_flags & RQF_MQ_INFLIGHT)
475                 atomic_dec(&hctx->nr_active);
476
477         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
478                 laptop_io_completion(q->backing_dev_info);
479
480         wbt_done(q->rq_wb, &rq->issue_stat);
481
482         if (blk_rq_rl(rq))
483                 blk_put_rl(blk_rq_rl(rq));
484
485         blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
486         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
487         clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
488         if (rq->tag != -1)
489                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
490         if (sched_tag != -1)
491                 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
492         blk_mq_sched_restart(hctx);
493         blk_queue_exit(q);
494 }
495 EXPORT_SYMBOL_GPL(blk_mq_free_request);
496
497 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
498 {
499         blk_account_io_done(rq);
500
501         if (rq->end_io) {
502                 wbt_done(rq->q->rq_wb, &rq->issue_stat);
503                 rq->end_io(rq, error);
504         } else {
505                 if (unlikely(blk_bidi_rq(rq)))
506                         blk_mq_free_request(rq->next_rq);
507                 blk_mq_free_request(rq);
508         }
509 }
510 EXPORT_SYMBOL(__blk_mq_end_request);
511
512 void blk_mq_end_request(struct request *rq, blk_status_t error)
513 {
514         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
515                 BUG();
516         __blk_mq_end_request(rq, error);
517 }
518 EXPORT_SYMBOL(blk_mq_end_request);
519
520 static void __blk_mq_complete_request_remote(void *data)
521 {
522         struct request *rq = data;
523
524         rq->q->softirq_done_fn(rq);
525 }
526
527 static void __blk_mq_complete_request(struct request *rq)
528 {
529         struct blk_mq_ctx *ctx = rq->mq_ctx;
530         bool shared = false;
531         int cpu;
532
533         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
534
535         if (rq->internal_tag != -1)
536                 blk_mq_sched_completed_request(rq);
537         if (rq->rq_flags & RQF_STATS) {
538                 blk_mq_poll_stats_start(rq->q);
539                 blk_stat_add(rq);
540         }
541
542         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
543                 rq->q->softirq_done_fn(rq);
544                 return;
545         }
546
547         cpu = get_cpu();
548         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
549                 shared = cpus_share_cache(cpu, ctx->cpu);
550
551         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
552                 rq->csd.func = __blk_mq_complete_request_remote;
553                 rq->csd.info = rq;
554                 rq->csd.flags = 0;
555                 smp_call_function_single_async(ctx->cpu, &rq->csd);
556         } else {
557                 rq->q->softirq_done_fn(rq);
558         }
559         put_cpu();
560 }
561
562 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
563 {
564         if (!(hctx->flags & BLK_MQ_F_BLOCKING))
565                 rcu_read_unlock();
566         else
567                 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
568 }
569
570 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
571 {
572         if (!(hctx->flags & BLK_MQ_F_BLOCKING))
573                 rcu_read_lock();
574         else
575                 *srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
576 }
577
578 static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate)
579 {
580         unsigned long flags;
581
582         /*
583          * blk_mq_rq_aborted_gstate() is used from the completion path and
584          * can thus be called from irq context.  u64_stats_fetch in the
585          * middle of update on the same CPU leads to lockup.  Disable irq
586          * while updating.
587          */
588         local_irq_save(flags);
589         u64_stats_update_begin(&rq->aborted_gstate_sync);
590         rq->aborted_gstate = gstate;
591         u64_stats_update_end(&rq->aborted_gstate_sync);
592         local_irq_restore(flags);
593 }
594
595 static u64 blk_mq_rq_aborted_gstate(struct request *rq)
596 {
597         unsigned int start;
598         u64 aborted_gstate;
599
600         do {
601                 start = u64_stats_fetch_begin(&rq->aborted_gstate_sync);
602                 aborted_gstate = rq->aborted_gstate;
603         } while (u64_stats_fetch_retry(&rq->aborted_gstate_sync, start));
604
605         return aborted_gstate;
606 }
607
608 /**
609  * blk_mq_complete_request - end I/O on a request
610  * @rq:         the request being processed
611  *
612  * Description:
613  *      Ends all I/O on a request. It does not handle partial completions.
614  *      The actual completion happens out-of-order, through a IPI handler.
615  **/
616 void blk_mq_complete_request(struct request *rq)
617 {
618         struct request_queue *q = rq->q;
619         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
620         int srcu_idx;
621
622         if (unlikely(blk_should_fake_timeout(q)))
623                 return;
624
625         /*
626          * If @rq->aborted_gstate equals the current instance, timeout is
627          * claiming @rq and we lost.  This is synchronized through
628          * hctx_lock().  See blk_mq_timeout_work() for details.
629          *
630          * Completion path never blocks and we can directly use RCU here
631          * instead of hctx_lock() which can be either RCU or SRCU.
632          * However, that would complicate paths which want to synchronize
633          * against us.  Let stay in sync with the issue path so that
634          * hctx_lock() covers both issue and completion paths.
635          */
636         hctx_lock(hctx, &srcu_idx);
637         if (blk_mq_rq_aborted_gstate(rq) != rq->gstate &&
638             !blk_mark_rq_complete(rq))
639                 __blk_mq_complete_request(rq);
640         hctx_unlock(hctx, srcu_idx);
641 }
642 EXPORT_SYMBOL(blk_mq_complete_request);
643
644 int blk_mq_request_started(struct request *rq)
645 {
646         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
647 }
648 EXPORT_SYMBOL_GPL(blk_mq_request_started);
649
650 void blk_mq_start_request(struct request *rq)
651 {
652         struct request_queue *q = rq->q;
653
654         blk_mq_sched_started_request(rq);
655
656         trace_block_rq_issue(q, rq);
657
658         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
659                 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
660                 rq->rq_flags |= RQF_STATS;
661                 wbt_issue(q->rq_wb, &rq->issue_stat);
662         }
663
664         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
665         WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags));
666
667         /*
668          * Mark @rq in-flight which also advances the generation number,
669          * and register for timeout.  Protect with a seqcount to allow the
670          * timeout path to read both @rq->gstate and @rq->deadline
671          * coherently.
672          *
673          * This is the only place where a request is marked in-flight.  If
674          * the timeout path reads an in-flight @rq->gstate, the
675          * @rq->deadline it reads together under @rq->gstate_seq is
676          * guaranteed to be the matching one.
677          */
678         preempt_disable();
679         write_seqcount_begin(&rq->gstate_seq);
680
681         blk_mq_rq_update_state(rq, MQ_RQ_IN_FLIGHT);
682         blk_add_timer(rq);
683
684         write_seqcount_end(&rq->gstate_seq);
685         preempt_enable();
686
687         set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
688         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
689                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
690
691         if (q->dma_drain_size && blk_rq_bytes(rq)) {
692                 /*
693                  * Make sure space for the drain appears.  We know we can do
694                  * this because max_hw_segments has been adjusted to be one
695                  * fewer than the device can handle.
696                  */
697                 rq->nr_phys_segments++;
698         }
699 }
700 EXPORT_SYMBOL(blk_mq_start_request);
701
702 /*
703  * When we reach here because queue is busy, REQ_ATOM_COMPLETE
704  * flag isn't set yet, so there may be race with timeout handler,
705  * but given rq->deadline is just set in .queue_rq() under
706  * this situation, the race won't be possible in reality because
707  * rq->timeout should be set as big enough to cover the window
708  * between blk_mq_start_request() called from .queue_rq() and
709  * clearing REQ_ATOM_STARTED here.
710  */
711 static void __blk_mq_requeue_request(struct request *rq)
712 {
713         struct request_queue *q = rq->q;
714
715         blk_mq_put_driver_tag(rq);
716
717         trace_block_rq_requeue(q, rq);
718         wbt_requeue(q->rq_wb, &rq->issue_stat);
719         blk_mq_sched_requeue_request(rq);
720
721         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
722                 blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
723                 if (q->dma_drain_size && blk_rq_bytes(rq))
724                         rq->nr_phys_segments--;
725         }
726 }
727
728 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
729 {
730         __blk_mq_requeue_request(rq);
731
732         BUG_ON(blk_queued_rq(rq));
733         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
734 }
735 EXPORT_SYMBOL(blk_mq_requeue_request);
736
737 static void blk_mq_requeue_work(struct work_struct *work)
738 {
739         struct request_queue *q =
740                 container_of(work, struct request_queue, requeue_work.work);
741         LIST_HEAD(rq_list);
742         struct request *rq, *next;
743
744         spin_lock_irq(&q->requeue_lock);
745         list_splice_init(&q->requeue_list, &rq_list);
746         spin_unlock_irq(&q->requeue_lock);
747
748         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
749                 if (!(rq->rq_flags & RQF_SOFTBARRIER))
750                         continue;
751
752                 rq->rq_flags &= ~RQF_SOFTBARRIER;
753                 list_del_init(&rq->queuelist);
754                 blk_mq_sched_insert_request(rq, true, false, false, true);
755         }
756
757         while (!list_empty(&rq_list)) {
758                 rq = list_entry(rq_list.next, struct request, queuelist);
759                 list_del_init(&rq->queuelist);
760                 blk_mq_sched_insert_request(rq, false, false, false, true);
761         }
762
763         blk_mq_run_hw_queues(q, false);
764 }
765
766 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
767                                 bool kick_requeue_list)
768 {
769         struct request_queue *q = rq->q;
770         unsigned long flags;
771
772         /*
773          * We abuse this flag that is otherwise used by the I/O scheduler to
774          * request head insertion from the workqueue.
775          */
776         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
777
778         spin_lock_irqsave(&q->requeue_lock, flags);
779         if (at_head) {
780                 rq->rq_flags |= RQF_SOFTBARRIER;
781                 list_add(&rq->queuelist, &q->requeue_list);
782         } else {
783                 list_add_tail(&rq->queuelist, &q->requeue_list);
784         }
785         spin_unlock_irqrestore(&q->requeue_lock, flags);
786
787         if (kick_requeue_list)
788                 blk_mq_kick_requeue_list(q);
789 }
790 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
791
792 void blk_mq_kick_requeue_list(struct request_queue *q)
793 {
794         kblockd_schedule_delayed_work(&q->requeue_work, 0);
795 }
796 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
797
798 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
799                                     unsigned long msecs)
800 {
801         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
802                                     msecs_to_jiffies(msecs));
803 }
804 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
805
806 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
807 {
808         if (tag < tags->nr_tags) {
809                 prefetch(tags->rqs[tag]);
810                 return tags->rqs[tag];
811         }
812
813         return NULL;
814 }
815 EXPORT_SYMBOL(blk_mq_tag_to_rq);
816
817 struct blk_mq_timeout_data {
818         unsigned long next;
819         unsigned int next_set;
820         unsigned int nr_expired;
821 };
822
823 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
824 {
825         const struct blk_mq_ops *ops = req->q->mq_ops;
826         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
827
828         /*
829          * We know that complete is set at this point. If STARTED isn't set
830          * anymore, then the request isn't active and the "timeout" should
831          * just be ignored. This can happen due to the bitflag ordering.
832          * Timeout first checks if STARTED is set, and if it is, assumes
833          * the request is active. But if we race with completion, then
834          * both flags will get cleared. So check here again, and ignore
835          * a timeout event with a request that isn't active.
836          */
837         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
838                 return;
839
840         if (ops->timeout)
841                 ret = ops->timeout(req, reserved);
842
843         switch (ret) {
844         case BLK_EH_HANDLED:
845                 __blk_mq_complete_request(req);
846                 break;
847         case BLK_EH_RESET_TIMER:
848                 /*
849                  * As nothing prevents from completion happening while
850                  * ->aborted_gstate is set, this may lead to ignored
851                  * completions and further spurious timeouts.
852                  */
853                 blk_mq_rq_update_aborted_gstate(req, 0);
854                 blk_add_timer(req);
855                 blk_clear_rq_complete(req);
856                 break;
857         case BLK_EH_NOT_HANDLED:
858                 break;
859         default:
860                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
861                 break;
862         }
863 }
864
865 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
866                 struct request *rq, void *priv, bool reserved)
867 {
868         struct blk_mq_timeout_data *data = priv;
869         unsigned long gstate, deadline;
870         int start;
871
872         might_sleep();
873
874         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
875                 return;
876
877         /* read coherent snapshots of @rq->state_gen and @rq->deadline */
878         while (true) {
879                 start = read_seqcount_begin(&rq->gstate_seq);
880                 gstate = READ_ONCE(rq->gstate);
881                 deadline = rq->deadline;
882                 if (!read_seqcount_retry(&rq->gstate_seq, start))
883                         break;
884                 cond_resched();
885         }
886
887         /* if in-flight && overdue, mark for abortion */
888         if ((gstate & MQ_RQ_STATE_MASK) == MQ_RQ_IN_FLIGHT &&
889             time_after_eq(jiffies, deadline)) {
890                 blk_mq_rq_update_aborted_gstate(rq, gstate);
891                 data->nr_expired++;
892                 hctx->nr_expired++;
893         } else if (!data->next_set || time_after(data->next, deadline)) {
894                 data->next = deadline;
895                 data->next_set = 1;
896         }
897 }
898
899 static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
900                 struct request *rq, void *priv, bool reserved)
901 {
902         /*
903          * We marked @rq->aborted_gstate and waited for RCU.  If there were
904          * completions that we lost to, they would have finished and
905          * updated @rq->gstate by now; otherwise, the completion path is
906          * now guaranteed to see @rq->aborted_gstate and yield.  If
907          * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
908          */
909         if (READ_ONCE(rq->gstate) == rq->aborted_gstate &&
910             !blk_mark_rq_complete(rq))
911                 blk_mq_rq_timed_out(rq, reserved);
912 }
913
914 static void blk_mq_timeout_work(struct work_struct *work)
915 {
916         struct request_queue *q =
917                 container_of(work, struct request_queue, timeout_work);
918         struct blk_mq_timeout_data data = {
919                 .next           = 0,
920                 .next_set       = 0,
921                 .nr_expired     = 0,
922         };
923         struct blk_mq_hw_ctx *hctx;
924         int i;
925
926         /* A deadlock might occur if a request is stuck requiring a
927          * timeout at the same time a queue freeze is waiting
928          * completion, since the timeout code would not be able to
929          * acquire the queue reference here.
930          *
931          * That's why we don't use blk_queue_enter here; instead, we use
932          * percpu_ref_tryget directly, because we need to be able to
933          * obtain a reference even in the short window between the queue
934          * starting to freeze, by dropping the first reference in
935          * blk_freeze_queue_start, and the moment the last request is
936          * consumed, marked by the instant q_usage_counter reaches
937          * zero.
938          */
939         if (!percpu_ref_tryget(&q->q_usage_counter))
940                 return;
941
942         /* scan for the expired ones and set their ->aborted_gstate */
943         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
944
945         if (data.nr_expired) {
946                 bool has_rcu = false;
947
948                 /*
949                  * Wait till everyone sees ->aborted_gstate.  The
950                  * sequential waits for SRCUs aren't ideal.  If this ever
951                  * becomes a problem, we can add per-hw_ctx rcu_head and
952                  * wait in parallel.
953                  */
954                 queue_for_each_hw_ctx(q, hctx, i) {
955                         if (!hctx->nr_expired)
956                                 continue;
957
958                         if (!(hctx->flags & BLK_MQ_F_BLOCKING))
959                                 has_rcu = true;
960                         else
961                                 synchronize_srcu(hctx->queue_rq_srcu);
962
963                         hctx->nr_expired = 0;
964                 }
965                 if (has_rcu)
966                         synchronize_rcu();
967
968                 /* terminate the ones we won */
969                 blk_mq_queue_tag_busy_iter(q, blk_mq_terminate_expired, NULL);
970         }
971
972         if (data.next_set) {
973                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
974                 mod_timer(&q->timeout, data.next);
975         } else {
976                 queue_for_each_hw_ctx(q, hctx, i) {
977                         /* the hctx may be unmapped, so check it here */
978                         if (blk_mq_hw_queue_mapped(hctx))
979                                 blk_mq_tag_idle(hctx);
980                 }
981         }
982         blk_queue_exit(q);
983 }
984
985 struct flush_busy_ctx_data {
986         struct blk_mq_hw_ctx *hctx;
987         struct list_head *list;
988 };
989
990 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
991 {
992         struct flush_busy_ctx_data *flush_data = data;
993         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
994         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
995
996         sbitmap_clear_bit(sb, bitnr);
997         spin_lock(&ctx->lock);
998         list_splice_tail_init(&ctx->rq_list, flush_data->list);
999         spin_unlock(&ctx->lock);
1000         return true;
1001 }
1002
1003 /*
1004  * Process software queues that have been marked busy, splicing them
1005  * to the for-dispatch
1006  */
1007 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1008 {
1009         struct flush_busy_ctx_data data = {
1010                 .hctx = hctx,
1011                 .list = list,
1012         };
1013
1014         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1015 }
1016 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1017
1018 struct dispatch_rq_data {
1019         struct blk_mq_hw_ctx *hctx;
1020         struct request *rq;
1021 };
1022
1023 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1024                 void *data)
1025 {
1026         struct dispatch_rq_data *dispatch_data = data;
1027         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1028         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1029
1030         spin_lock(&ctx->lock);
1031         if (unlikely(!list_empty(&ctx->rq_list))) {
1032                 dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
1033                 list_del_init(&dispatch_data->rq->queuelist);
1034                 if (list_empty(&ctx->rq_list))
1035                         sbitmap_clear_bit(sb, bitnr);
1036         }
1037         spin_unlock(&ctx->lock);
1038
1039         return !dispatch_data->rq;
1040 }
1041
1042 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1043                                         struct blk_mq_ctx *start)
1044 {
1045         unsigned off = start ? start->index_hw : 0;
1046         struct dispatch_rq_data data = {
1047                 .hctx = hctx,
1048                 .rq   = NULL,
1049         };
1050
1051         __sbitmap_for_each_set(&hctx->ctx_map, off,
1052                                dispatch_rq_from_ctx, &data);
1053
1054         return data.rq;
1055 }
1056
1057 static inline unsigned int queued_to_index(unsigned int queued)
1058 {
1059         if (!queued)
1060                 return 0;
1061
1062         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1063 }
1064
1065 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
1066                            bool wait)
1067 {
1068         struct blk_mq_alloc_data data = {
1069                 .q = rq->q,
1070                 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
1071                 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
1072         };
1073
1074         might_sleep_if(wait);
1075
1076         if (rq->tag != -1)
1077                 goto done;
1078
1079         if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1080                 data.flags |= BLK_MQ_REQ_RESERVED;
1081
1082         rq->tag = blk_mq_get_tag(&data);
1083         if (rq->tag >= 0) {
1084                 if (blk_mq_tag_busy(data.hctx)) {
1085                         rq->rq_flags |= RQF_MQ_INFLIGHT;
1086                         atomic_inc(&data.hctx->nr_active);
1087                 }
1088                 data.hctx->tags->rqs[rq->tag] = rq;
1089         }
1090
1091 done:
1092         if (hctx)
1093                 *hctx = data.hctx;
1094         return rq->tag != -1;
1095 }
1096
1097 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1098                                 int flags, void *key)
1099 {
1100         struct blk_mq_hw_ctx *hctx;
1101
1102         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1103
1104         list_del_init(&wait->entry);
1105         blk_mq_run_hw_queue(hctx, true);
1106         return 1;
1107 }
1108
1109 /*
1110  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1111  * the tag wakeups. For non-shared tags, we can simply mark us nedeing a
1112  * restart. For both caes, take care to check the condition again after
1113  * marking us as waiting.
1114  */
1115 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1116                                  struct request *rq)
1117 {
1118         struct blk_mq_hw_ctx *this_hctx = *hctx;
1119         bool shared_tags = (this_hctx->flags & BLK_MQ_F_TAG_SHARED) != 0;
1120         struct sbq_wait_state *ws;
1121         wait_queue_entry_t *wait;
1122         bool ret;
1123
1124         if (!shared_tags) {
1125                 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
1126                         set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
1127         } else {
1128                 wait = &this_hctx->dispatch_wait;
1129                 if (!list_empty_careful(&wait->entry))
1130                         return false;
1131
1132                 spin_lock(&this_hctx->lock);
1133                 if (!list_empty(&wait->entry)) {
1134                         spin_unlock(&this_hctx->lock);
1135                         return false;
1136                 }
1137
1138                 ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
1139                 add_wait_queue(&ws->wait, wait);
1140         }
1141
1142         /*
1143          * It's possible that a tag was freed in the window between the
1144          * allocation failure and adding the hardware queue to the wait
1145          * queue.
1146          */
1147         ret = blk_mq_get_driver_tag(rq, hctx, false);
1148
1149         if (!shared_tags) {
1150                 /*
1151                  * Don't clear RESTART here, someone else could have set it.
1152                  * At most this will cost an extra queue run.
1153                  */
1154                 return ret;
1155         } else {
1156                 if (!ret) {
1157                         spin_unlock(&this_hctx->lock);
1158                         return false;
1159                 }
1160
1161                 /*
1162                  * We got a tag, remove ourselves from the wait queue to ensure
1163                  * someone else gets the wakeup.
1164                  */
1165                 spin_lock_irq(&ws->wait.lock);
1166                 list_del_init(&wait->entry);
1167                 spin_unlock_irq(&ws->wait.lock);
1168                 spin_unlock(&this_hctx->lock);
1169                 return true;
1170         }
1171 }
1172
1173 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1174                              bool got_budget)
1175 {
1176         struct blk_mq_hw_ctx *hctx;
1177         struct request *rq, *nxt;
1178         bool no_tag = false;
1179         int errors, queued;
1180
1181         if (list_empty(list))
1182                 return false;
1183
1184         WARN_ON(!list_is_singular(list) && got_budget);
1185
1186         /*
1187          * Now process all the entries, sending them to the driver.
1188          */
1189         errors = queued = 0;
1190         do {
1191                 struct blk_mq_queue_data bd;
1192                 blk_status_t ret;
1193
1194                 rq = list_first_entry(list, struct request, queuelist);
1195                 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1196                         /*
1197                          * The initial allocation attempt failed, so we need to
1198                          * rerun the hardware queue when a tag is freed. The
1199                          * waitqueue takes care of that. If the queue is run
1200                          * before we add this entry back on the dispatch list,
1201                          * we'll re-run it below.
1202                          */
1203                         if (!blk_mq_mark_tag_wait(&hctx, rq)) {
1204                                 if (got_budget)
1205                                         blk_mq_put_dispatch_budget(hctx);
1206                                 /*
1207                                  * For non-shared tags, the RESTART check
1208                                  * will suffice.
1209                                  */
1210                                 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1211                                         no_tag = true;
1212                                 break;
1213                         }
1214                 }
1215
1216                 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
1217                         blk_mq_put_driver_tag(rq);
1218                         break;
1219                 }
1220
1221                 list_del_init(&rq->queuelist);
1222
1223                 bd.rq = rq;
1224
1225                 /*
1226                  * Flag last if we have no more requests, or if we have more
1227                  * but can't assign a driver tag to it.
1228                  */
1229                 if (list_empty(list))
1230                         bd.last = true;
1231                 else {
1232                         nxt = list_first_entry(list, struct request, queuelist);
1233                         bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1234                 }
1235
1236                 ret = q->mq_ops->queue_rq(hctx, &bd);
1237                 if (ret == BLK_STS_RESOURCE) {
1238                         /*
1239                          * If an I/O scheduler has been configured and we got a
1240                          * driver tag for the next request already, free it
1241                          * again.
1242                          */
1243                         if (!list_empty(list)) {
1244                                 nxt = list_first_entry(list, struct request, queuelist);
1245                                 blk_mq_put_driver_tag(nxt);
1246                         }
1247                         list_add(&rq->queuelist, list);
1248                         __blk_mq_requeue_request(rq);
1249                         break;
1250                 }
1251
1252                 if (unlikely(ret != BLK_STS_OK)) {
1253                         errors++;
1254                         blk_mq_end_request(rq, BLK_STS_IOERR);
1255                         continue;
1256                 }
1257
1258                 queued++;
1259         } while (!list_empty(list));
1260
1261         hctx->dispatched[queued_to_index(queued)]++;
1262
1263         /*
1264          * Any items that need requeuing? Stuff them into hctx->dispatch,
1265          * that is where we will continue on next queue run.
1266          */
1267         if (!list_empty(list)) {
1268                 spin_lock(&hctx->lock);
1269                 list_splice_init(list, &hctx->dispatch);
1270                 spin_unlock(&hctx->lock);
1271
1272                 /*
1273                  * If SCHED_RESTART was set by the caller of this function and
1274                  * it is no longer set that means that it was cleared by another
1275                  * thread and hence that a queue rerun is needed.
1276                  *
1277                  * If 'no_tag' is set, that means that we failed getting
1278                  * a driver tag with an I/O scheduler attached. If our dispatch
1279                  * waitqueue is no longer active, ensure that we run the queue
1280                  * AFTER adding our entries back to the list.
1281                  *
1282                  * If no I/O scheduler has been configured it is possible that
1283                  * the hardware queue got stopped and restarted before requests
1284                  * were pushed back onto the dispatch list. Rerun the queue to
1285                  * avoid starvation. Notes:
1286                  * - blk_mq_run_hw_queue() checks whether or not a queue has
1287                  *   been stopped before rerunning a queue.
1288                  * - Some but not all block drivers stop a queue before
1289                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1290                  *   and dm-rq.
1291                  */
1292                 if (!blk_mq_sched_needs_restart(hctx) ||
1293                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1294                         blk_mq_run_hw_queue(hctx, true);
1295         }
1296
1297         return (queued + errors) != 0;
1298 }
1299
1300 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1301 {
1302         int srcu_idx;
1303
1304         /*
1305          * We should be running this queue from one of the CPUs that
1306          * are mapped to it.
1307          */
1308         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1309                 cpu_online(hctx->next_cpu));
1310
1311         /*
1312          * We can't run the queue inline with ints disabled. Ensure that
1313          * we catch bad users of this early.
1314          */
1315         WARN_ON_ONCE(in_interrupt());
1316
1317         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1318
1319         hctx_lock(hctx, &srcu_idx);
1320         blk_mq_sched_dispatch_requests(hctx);
1321         hctx_unlock(hctx, srcu_idx);
1322 }
1323
1324 /*
1325  * It'd be great if the workqueue API had a way to pass
1326  * in a mask and had some smarts for more clever placement.
1327  * For now we just round-robin here, switching for every
1328  * BLK_MQ_CPU_WORK_BATCH queued items.
1329  */
1330 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1331 {
1332         if (hctx->queue->nr_hw_queues == 1)
1333                 return WORK_CPU_UNBOUND;
1334
1335         if (--hctx->next_cpu_batch <= 0) {
1336                 int next_cpu;
1337
1338                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1339                 if (next_cpu >= nr_cpu_ids)
1340                         next_cpu = cpumask_first(hctx->cpumask);
1341
1342                 hctx->next_cpu = next_cpu;
1343                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1344         }
1345
1346         return hctx->next_cpu;
1347 }
1348
1349 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1350                                         unsigned long msecs)
1351 {
1352         if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1353                 return;
1354
1355         if (unlikely(blk_mq_hctx_stopped(hctx)))
1356                 return;
1357
1358         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1359                 int cpu = get_cpu();
1360                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1361                         __blk_mq_run_hw_queue(hctx);
1362                         put_cpu();
1363                         return;
1364                 }
1365
1366                 put_cpu();
1367         }
1368
1369         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1370                                          &hctx->run_work,
1371                                          msecs_to_jiffies(msecs));
1372 }
1373
1374 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1375 {
1376         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1377 }
1378 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1379
1380 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1381 {
1382         int srcu_idx;
1383         bool need_run;
1384
1385         /*
1386          * When queue is quiesced, we may be switching io scheduler, or
1387          * updating nr_hw_queues, or other things, and we can't run queue
1388          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1389          *
1390          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1391          * quiesced.
1392          */
1393         hctx_lock(hctx, &srcu_idx);
1394         need_run = !blk_queue_quiesced(hctx->queue) &&
1395                 blk_mq_hctx_has_pending(hctx);
1396         hctx_unlock(hctx, srcu_idx);
1397
1398         if (need_run) {
1399                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1400                 return true;
1401         }
1402
1403         return false;
1404 }
1405 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1406
1407 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1408 {
1409         struct blk_mq_hw_ctx *hctx;
1410         int i;
1411
1412         queue_for_each_hw_ctx(q, hctx, i) {
1413                 if (blk_mq_hctx_stopped(hctx))
1414                         continue;
1415
1416                 blk_mq_run_hw_queue(hctx, async);
1417         }
1418 }
1419 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1420
1421 /**
1422  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1423  * @q: request queue.
1424  *
1425  * The caller is responsible for serializing this function against
1426  * blk_mq_{start,stop}_hw_queue().
1427  */
1428 bool blk_mq_queue_stopped(struct request_queue *q)
1429 {
1430         struct blk_mq_hw_ctx *hctx;
1431         int i;
1432
1433         queue_for_each_hw_ctx(q, hctx, i)
1434                 if (blk_mq_hctx_stopped(hctx))
1435                         return true;
1436
1437         return false;
1438 }
1439 EXPORT_SYMBOL(blk_mq_queue_stopped);
1440
1441 /*
1442  * This function is often used for pausing .queue_rq() by driver when
1443  * there isn't enough resource or some conditions aren't satisfied, and
1444  * BLK_STS_RESOURCE is usually returned.
1445  *
1446  * We do not guarantee that dispatch can be drained or blocked
1447  * after blk_mq_stop_hw_queue() returns. Please use
1448  * blk_mq_quiesce_queue() for that requirement.
1449  */
1450 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1451 {
1452         cancel_delayed_work(&hctx->run_work);
1453
1454         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1455 }
1456 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1457
1458 /*
1459  * This function is often used for pausing .queue_rq() by driver when
1460  * there isn't enough resource or some conditions aren't satisfied, and
1461  * BLK_STS_RESOURCE is usually returned.
1462  *
1463  * We do not guarantee that dispatch can be drained or blocked
1464  * after blk_mq_stop_hw_queues() returns. Please use
1465  * blk_mq_quiesce_queue() for that requirement.
1466  */
1467 void blk_mq_stop_hw_queues(struct request_queue *q)
1468 {
1469         struct blk_mq_hw_ctx *hctx;
1470         int i;
1471
1472         queue_for_each_hw_ctx(q, hctx, i)
1473                 blk_mq_stop_hw_queue(hctx);
1474 }
1475 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1476
1477 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1478 {
1479         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1480
1481         blk_mq_run_hw_queue(hctx, false);
1482 }
1483 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1484
1485 void blk_mq_start_hw_queues(struct request_queue *q)
1486 {
1487         struct blk_mq_hw_ctx *hctx;
1488         int i;
1489
1490         queue_for_each_hw_ctx(q, hctx, i)
1491                 blk_mq_start_hw_queue(hctx);
1492 }
1493 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1494
1495 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1496 {
1497         if (!blk_mq_hctx_stopped(hctx))
1498                 return;
1499
1500         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1501         blk_mq_run_hw_queue(hctx, async);
1502 }
1503 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1504
1505 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1506 {
1507         struct blk_mq_hw_ctx *hctx;
1508         int i;
1509
1510         queue_for_each_hw_ctx(q, hctx, i)
1511                 blk_mq_start_stopped_hw_queue(hctx, async);
1512 }
1513 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1514
1515 static void blk_mq_run_work_fn(struct work_struct *work)
1516 {
1517         struct blk_mq_hw_ctx *hctx;
1518
1519         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1520
1521         /*
1522          * If we are stopped, don't run the queue. The exception is if
1523          * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1524          * the STOPPED bit and run it.
1525          */
1526         if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1527                 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1528                         return;
1529
1530                 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1531                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1532         }
1533
1534         __blk_mq_run_hw_queue(hctx);
1535 }
1536
1537
1538 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1539 {
1540         if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1541                 return;
1542
1543         /*
1544          * Stop the hw queue, then modify currently delayed work.
1545          * This should prevent us from running the queue prematurely.
1546          * Mark the queue as auto-clearing STOPPED when it runs.
1547          */
1548         blk_mq_stop_hw_queue(hctx);
1549         set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1550         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1551                                         &hctx->run_work,
1552                                         msecs_to_jiffies(msecs));
1553 }
1554 EXPORT_SYMBOL(blk_mq_delay_queue);
1555
1556 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1557                                             struct request *rq,
1558                                             bool at_head)
1559 {
1560         struct blk_mq_ctx *ctx = rq->mq_ctx;
1561
1562         lockdep_assert_held(&ctx->lock);
1563
1564         trace_block_rq_insert(hctx->queue, rq);
1565
1566         if (at_head)
1567                 list_add(&rq->queuelist, &ctx->rq_list);
1568         else
1569                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1570 }
1571
1572 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1573                              bool at_head)
1574 {
1575         struct blk_mq_ctx *ctx = rq->mq_ctx;
1576
1577         lockdep_assert_held(&ctx->lock);
1578
1579         __blk_mq_insert_req_list(hctx, rq, at_head);
1580         blk_mq_hctx_mark_pending(hctx, ctx);
1581 }
1582
1583 /*
1584  * Should only be used carefully, when the caller knows we want to
1585  * bypass a potential IO scheduler on the target device.
1586  */
1587 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1588 {
1589         struct blk_mq_ctx *ctx = rq->mq_ctx;
1590         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1591
1592         spin_lock(&hctx->lock);
1593         list_add_tail(&rq->queuelist, &hctx->dispatch);
1594         spin_unlock(&hctx->lock);
1595
1596         if (run_queue)
1597                 blk_mq_run_hw_queue(hctx, false);
1598 }
1599
1600 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1601                             struct list_head *list)
1602
1603 {
1604         /*
1605          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1606          * offline now
1607          */
1608         spin_lock(&ctx->lock);
1609         while (!list_empty(list)) {
1610                 struct request *rq;
1611
1612                 rq = list_first_entry(list, struct request, queuelist);
1613                 BUG_ON(rq->mq_ctx != ctx);
1614                 list_del_init(&rq->queuelist);
1615                 __blk_mq_insert_req_list(hctx, rq, false);
1616         }
1617         blk_mq_hctx_mark_pending(hctx, ctx);
1618         spin_unlock(&ctx->lock);
1619 }
1620
1621 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1622 {
1623         struct request *rqa = container_of(a, struct request, queuelist);
1624         struct request *rqb = container_of(b, struct request, queuelist);
1625
1626         return !(rqa->mq_ctx < rqb->mq_ctx ||
1627                  (rqa->mq_ctx == rqb->mq_ctx &&
1628                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1629 }
1630
1631 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1632 {
1633         struct blk_mq_ctx *this_ctx;
1634         struct request_queue *this_q;
1635         struct request *rq;
1636         LIST_HEAD(list);
1637         LIST_HEAD(ctx_list);
1638         unsigned int depth;
1639
1640         list_splice_init(&plug->mq_list, &list);
1641
1642         list_sort(NULL, &list, plug_ctx_cmp);
1643
1644         this_q = NULL;
1645         this_ctx = NULL;
1646         depth = 0;
1647
1648         while (!list_empty(&list)) {
1649                 rq = list_entry_rq(list.next);
1650                 list_del_init(&rq->queuelist);
1651                 BUG_ON(!rq->q);
1652                 if (rq->mq_ctx != this_ctx) {
1653                         if (this_ctx) {
1654                                 trace_block_unplug(this_q, depth, from_schedule);
1655                                 blk_mq_sched_insert_requests(this_q, this_ctx,
1656                                                                 &ctx_list,
1657                                                                 from_schedule);
1658                         }
1659
1660                         this_ctx = rq->mq_ctx;
1661                         this_q = rq->q;
1662                         depth = 0;
1663                 }
1664
1665                 depth++;
1666                 list_add_tail(&rq->queuelist, &ctx_list);
1667         }
1668
1669         /*
1670          * If 'this_ctx' is set, we know we have entries to complete
1671          * on 'ctx_list'. Do those.
1672          */
1673         if (this_ctx) {
1674                 trace_block_unplug(this_q, depth, from_schedule);
1675                 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1676                                                 from_schedule);
1677         }
1678 }
1679
1680 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1681 {
1682         blk_init_request_from_bio(rq, bio);
1683
1684         blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
1685
1686         blk_account_io_start(rq, true);
1687 }
1688
1689 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1690                                    struct blk_mq_ctx *ctx,
1691                                    struct request *rq)
1692 {
1693         spin_lock(&ctx->lock);
1694         __blk_mq_insert_request(hctx, rq, false);
1695         spin_unlock(&ctx->lock);
1696 }
1697
1698 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1699 {
1700         if (rq->tag != -1)
1701                 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1702
1703         return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1704 }
1705
1706 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1707                                         struct request *rq,
1708                                         blk_qc_t *cookie)
1709 {
1710         struct request_queue *q = rq->q;
1711         struct blk_mq_queue_data bd = {
1712                 .rq = rq,
1713                 .last = true,
1714         };
1715         blk_qc_t new_cookie;
1716         blk_status_t ret;
1717         bool run_queue = true;
1718
1719         /* RCU or SRCU read lock is needed before checking quiesced flag */
1720         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1721                 run_queue = false;
1722                 goto insert;
1723         }
1724
1725         if (q->elevator)
1726                 goto insert;
1727
1728         if (!blk_mq_get_driver_tag(rq, NULL, false))
1729                 goto insert;
1730
1731         if (!blk_mq_get_dispatch_budget(hctx)) {
1732                 blk_mq_put_driver_tag(rq);
1733                 goto insert;
1734         }
1735
1736         new_cookie = request_to_qc_t(hctx, rq);
1737
1738         /*
1739          * For OK queue, we are done. For error, kill it. Any other
1740          * error (busy), just add it to our list as we previously
1741          * would have done
1742          */
1743         ret = q->mq_ops->queue_rq(hctx, &bd);
1744         switch (ret) {
1745         case BLK_STS_OK:
1746                 *cookie = new_cookie;
1747                 return;
1748         case BLK_STS_RESOURCE:
1749                 __blk_mq_requeue_request(rq);
1750                 goto insert;
1751         default:
1752                 *cookie = BLK_QC_T_NONE;
1753                 blk_mq_end_request(rq, ret);
1754                 return;
1755         }
1756
1757 insert:
1758         blk_mq_sched_insert_request(rq, false, run_queue, false,
1759                                         hctx->flags & BLK_MQ_F_BLOCKING);
1760 }
1761
1762 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1763                 struct request *rq, blk_qc_t *cookie)
1764 {
1765         int srcu_idx;
1766
1767         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1768
1769         hctx_lock(hctx, &srcu_idx);
1770         __blk_mq_try_issue_directly(hctx, rq, cookie);
1771         hctx_unlock(hctx, srcu_idx);
1772 }
1773
1774 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1775 {
1776         const int is_sync = op_is_sync(bio->bi_opf);
1777         const int is_flush_fua = op_is_flush(bio->bi_opf);
1778         struct blk_mq_alloc_data data = { .flags = 0 };
1779         struct request *rq;
1780         unsigned int request_count = 0;
1781         struct blk_plug *plug;
1782         struct request *same_queue_rq = NULL;
1783         blk_qc_t cookie;
1784         unsigned int wb_acct;
1785
1786         blk_queue_bounce(q, &bio);
1787
1788         blk_queue_split(q, &bio);
1789
1790         if (!bio_integrity_prep(bio))
1791                 return BLK_QC_T_NONE;
1792
1793         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1794             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1795                 return BLK_QC_T_NONE;
1796
1797         if (blk_mq_sched_bio_merge(q, bio))
1798                 return BLK_QC_T_NONE;
1799
1800         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1801
1802         trace_block_getrq(q, bio, bio->bi_opf);
1803
1804         rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
1805         if (unlikely(!rq)) {
1806                 __wbt_done(q->rq_wb, wb_acct);
1807                 if (bio->bi_opf & REQ_NOWAIT)
1808                         bio_wouldblock_error(bio);
1809                 return BLK_QC_T_NONE;
1810         }
1811
1812         wbt_track(&rq->issue_stat, wb_acct);
1813
1814         cookie = request_to_qc_t(data.hctx, rq);
1815
1816         plug = current->plug;
1817         if (unlikely(is_flush_fua)) {
1818                 blk_mq_put_ctx(data.ctx);
1819                 blk_mq_bio_to_request(rq, bio);
1820
1821                 /* bypass scheduler for flush rq */
1822                 blk_insert_flush(rq);
1823                 blk_mq_run_hw_queue(data.hctx, true);
1824         } else if (plug && q->nr_hw_queues == 1) {
1825                 struct request *last = NULL;
1826
1827                 blk_mq_put_ctx(data.ctx);
1828                 blk_mq_bio_to_request(rq, bio);
1829
1830                 /*
1831                  * @request_count may become stale because of schedule
1832                  * out, so check the list again.
1833                  */
1834                 if (list_empty(&plug->mq_list))
1835                         request_count = 0;
1836                 else if (blk_queue_nomerges(q))
1837                         request_count = blk_plug_queued_count(q);
1838
1839                 if (!request_count)
1840                         trace_block_plug(q);
1841                 else
1842                         last = list_entry_rq(plug->mq_list.prev);
1843
1844                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1845                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1846                         blk_flush_plug_list(plug, false);
1847                         trace_block_plug(q);
1848                 }
1849
1850                 list_add_tail(&rq->queuelist, &plug->mq_list);
1851         } else if (plug && !blk_queue_nomerges(q)) {
1852                 blk_mq_bio_to_request(rq, bio);
1853
1854                 /*
1855                  * We do limited plugging. If the bio can be merged, do that.
1856                  * Otherwise the existing request in the plug list will be
1857                  * issued. So the plug list will have one request at most
1858                  * The plug list might get flushed before this. If that happens,
1859                  * the plug list is empty, and same_queue_rq is invalid.
1860                  */
1861                 if (list_empty(&plug->mq_list))
1862                         same_queue_rq = NULL;
1863                 if (same_queue_rq)
1864                         list_del_init(&same_queue_rq->queuelist);
1865                 list_add_tail(&rq->queuelist, &plug->mq_list);
1866
1867                 blk_mq_put_ctx(data.ctx);
1868
1869                 if (same_queue_rq) {
1870                         data.hctx = blk_mq_map_queue(q,
1871                                         same_queue_rq->mq_ctx->cpu);
1872                         blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1873                                         &cookie);
1874                 }
1875         } else if (q->nr_hw_queues > 1 && is_sync) {
1876                 blk_mq_put_ctx(data.ctx);
1877                 blk_mq_bio_to_request(rq, bio);
1878                 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1879         } else if (q->elevator) {
1880                 blk_mq_put_ctx(data.ctx);
1881                 blk_mq_bio_to_request(rq, bio);
1882                 blk_mq_sched_insert_request(rq, false, true, true, true);
1883         } else {
1884                 blk_mq_put_ctx(data.ctx);
1885                 blk_mq_bio_to_request(rq, bio);
1886                 blk_mq_queue_io(data.hctx, data.ctx, rq);
1887                 blk_mq_run_hw_queue(data.hctx, true);
1888         }
1889
1890         return cookie;
1891 }
1892
1893 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1894                      unsigned int hctx_idx)
1895 {
1896         struct page *page;
1897
1898         if (tags->rqs && set->ops->exit_request) {
1899                 int i;
1900
1901                 for (i = 0; i < tags->nr_tags; i++) {
1902                         struct request *rq = tags->static_rqs[i];
1903
1904                         if (!rq)
1905                                 continue;
1906                         set->ops->exit_request(set, rq, hctx_idx);
1907                         tags->static_rqs[i] = NULL;
1908                 }
1909         }
1910
1911         while (!list_empty(&tags->page_list)) {
1912                 page = list_first_entry(&tags->page_list, struct page, lru);
1913                 list_del_init(&page->lru);
1914                 /*
1915                  * Remove kmemleak object previously allocated in
1916                  * blk_mq_init_rq_map().
1917                  */
1918                 kmemleak_free(page_address(page));
1919                 __free_pages(page, page->private);
1920         }
1921 }
1922
1923 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1924 {
1925         kfree(tags->rqs);
1926         tags->rqs = NULL;
1927         kfree(tags->static_rqs);
1928         tags->static_rqs = NULL;
1929
1930         blk_mq_free_tags(tags);
1931 }
1932
1933 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1934                                         unsigned int hctx_idx,
1935                                         unsigned int nr_tags,
1936                                         unsigned int reserved_tags)
1937 {
1938         struct blk_mq_tags *tags;
1939         int node;
1940
1941         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1942         if (node == NUMA_NO_NODE)
1943                 node = set->numa_node;
1944
1945         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
1946                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1947         if (!tags)
1948                 return NULL;
1949
1950         tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1951                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1952                                  node);
1953         if (!tags->rqs) {
1954                 blk_mq_free_tags(tags);
1955                 return NULL;
1956         }
1957
1958         tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1959                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1960                                  node);
1961         if (!tags->static_rqs) {
1962                 kfree(tags->rqs);
1963                 blk_mq_free_tags(tags);
1964                 return NULL;
1965         }
1966
1967         return tags;
1968 }
1969
1970 static size_t order_to_size(unsigned int order)
1971 {
1972         return (size_t)PAGE_SIZE << order;
1973 }
1974
1975 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1976                                unsigned int hctx_idx, int node)
1977 {
1978         int ret;
1979
1980         if (set->ops->init_request) {
1981                 ret = set->ops->init_request(set, rq, hctx_idx, node);
1982                 if (ret)
1983                         return ret;
1984         }
1985
1986         seqcount_init(&rq->gstate_seq);
1987         u64_stats_init(&rq->aborted_gstate_sync);
1988         return 0;
1989 }
1990
1991 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1992                      unsigned int hctx_idx, unsigned int depth)
1993 {
1994         unsigned int i, j, entries_per_page, max_order = 4;
1995         size_t rq_size, left;
1996         int node;
1997
1998         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1999         if (node == NUMA_NO_NODE)
2000                 node = set->numa_node;
2001
2002         INIT_LIST_HEAD(&tags->page_list);
2003
2004         /*
2005          * rq_size is the size of the request plus driver payload, rounded
2006          * to the cacheline size
2007          */
2008         rq_size = round_up(sizeof(struct request) + set->cmd_size,
2009                                 cache_line_size());
2010         left = rq_size * depth;
2011
2012         for (i = 0; i < depth; ) {
2013                 int this_order = max_order;
2014                 struct page *page;
2015                 int to_do;
2016                 void *p;
2017
2018                 while (this_order && left < order_to_size(this_order - 1))
2019                         this_order--;
2020
2021                 do {
2022                         page = alloc_pages_node(node,
2023                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2024                                 this_order);
2025                         if (page)
2026                                 break;
2027                         if (!this_order--)
2028                                 break;
2029                         if (order_to_size(this_order) < rq_size)
2030                                 break;
2031                 } while (1);
2032
2033                 if (!page)
2034                         goto fail;
2035
2036                 page->private = this_order;
2037                 list_add_tail(&page->lru, &tags->page_list);
2038
2039                 p = page_address(page);
2040                 /*
2041                  * Allow kmemleak to scan these pages as they contain pointers
2042                  * to additional allocations like via ops->init_request().
2043                  */
2044                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2045                 entries_per_page = order_to_size(this_order) / rq_size;
2046                 to_do = min(entries_per_page, depth - i);
2047                 left -= to_do * rq_size;
2048                 for (j = 0; j < to_do; j++) {
2049                         struct request *rq = p;
2050
2051                         tags->static_rqs[i] = rq;
2052                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2053                                 tags->static_rqs[i] = NULL;
2054                                 goto fail;
2055                         }
2056
2057                         p += rq_size;
2058                         i++;
2059                 }
2060         }
2061         return 0;
2062
2063 fail:
2064         blk_mq_free_rqs(set, tags, hctx_idx);
2065         return -ENOMEM;
2066 }
2067
2068 /*
2069  * 'cpu' is going away. splice any existing rq_list entries from this
2070  * software queue to the hw queue dispatch list, and ensure that it
2071  * gets run.
2072  */
2073 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2074 {
2075         struct blk_mq_hw_ctx *hctx;
2076         struct blk_mq_ctx *ctx;
2077         LIST_HEAD(tmp);
2078
2079         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2080         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2081
2082         spin_lock(&ctx->lock);
2083         if (!list_empty(&ctx->rq_list)) {
2084                 list_splice_init(&ctx->rq_list, &tmp);
2085                 blk_mq_hctx_clear_pending(hctx, ctx);
2086         }
2087         spin_unlock(&ctx->lock);
2088
2089         if (list_empty(&tmp))
2090                 return 0;
2091
2092         spin_lock(&hctx->lock);
2093         list_splice_tail_init(&tmp, &hctx->dispatch);
2094         spin_unlock(&hctx->lock);
2095
2096         blk_mq_run_hw_queue(hctx, true);
2097         return 0;
2098 }
2099
2100 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2101 {
2102         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2103                                             &hctx->cpuhp_dead);
2104 }
2105
2106 /* hctx->ctxs will be freed in queue's release handler */
2107 static void blk_mq_exit_hctx(struct request_queue *q,
2108                 struct blk_mq_tag_set *set,
2109                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2110 {
2111         blk_mq_debugfs_unregister_hctx(hctx);
2112
2113         if (blk_mq_hw_queue_mapped(hctx))
2114                 blk_mq_tag_idle(hctx);
2115
2116         if (set->ops->exit_request)
2117                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2118
2119         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2120
2121         if (set->ops->exit_hctx)
2122                 set->ops->exit_hctx(hctx, hctx_idx);
2123
2124         if (hctx->flags & BLK_MQ_F_BLOCKING)
2125                 cleanup_srcu_struct(hctx->queue_rq_srcu);
2126
2127         blk_mq_remove_cpuhp(hctx);
2128         blk_free_flush_queue(hctx->fq);
2129         sbitmap_free(&hctx->ctx_map);
2130 }
2131
2132 static void blk_mq_exit_hw_queues(struct request_queue *q,
2133                 struct blk_mq_tag_set *set, int nr_queue)
2134 {
2135         struct blk_mq_hw_ctx *hctx;
2136         unsigned int i;
2137
2138         queue_for_each_hw_ctx(q, hctx, i) {
2139                 if (i == nr_queue)
2140                         break;
2141                 blk_mq_exit_hctx(q, set, hctx, i);
2142         }
2143 }
2144
2145 static int blk_mq_init_hctx(struct request_queue *q,
2146                 struct blk_mq_tag_set *set,
2147                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2148 {
2149         int node;
2150
2151         node = hctx->numa_node;
2152         if (node == NUMA_NO_NODE)
2153                 node = hctx->numa_node = set->numa_node;
2154
2155         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2156         spin_lock_init(&hctx->lock);
2157         INIT_LIST_HEAD(&hctx->dispatch);
2158         hctx->queue = q;
2159         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2160
2161         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2162
2163         hctx->tags = set->tags[hctx_idx];
2164
2165         /*
2166          * Allocate space for all possible cpus to avoid allocation at
2167          * runtime
2168          */
2169         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2170                                         GFP_KERNEL, node);
2171         if (!hctx->ctxs)
2172                 goto unregister_cpu_notifier;
2173
2174         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2175                               node))
2176                 goto free_ctxs;
2177
2178         hctx->nr_ctx = 0;
2179
2180         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2181         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2182
2183         if (set->ops->init_hctx &&
2184             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2185                 goto free_bitmap;
2186
2187         if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2188                 goto exit_hctx;
2189
2190         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2191         if (!hctx->fq)
2192                 goto sched_exit_hctx;
2193
2194         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
2195                 goto free_fq;
2196
2197         if (hctx->flags & BLK_MQ_F_BLOCKING)
2198                 init_srcu_struct(hctx->queue_rq_srcu);
2199
2200         blk_mq_debugfs_register_hctx(q, hctx);
2201
2202         return 0;
2203
2204  free_fq:
2205         kfree(hctx->fq);
2206  sched_exit_hctx:
2207         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2208  exit_hctx:
2209         if (set->ops->exit_hctx)
2210                 set->ops->exit_hctx(hctx, hctx_idx);
2211  free_bitmap:
2212         sbitmap_free(&hctx->ctx_map);
2213  free_ctxs:
2214         kfree(hctx->ctxs);
2215  unregister_cpu_notifier:
2216         blk_mq_remove_cpuhp(hctx);
2217         return -1;
2218 }
2219
2220 static void blk_mq_init_cpu_queues(struct request_queue *q,
2221                                    unsigned int nr_hw_queues)
2222 {
2223         unsigned int i;
2224
2225         for_each_possible_cpu(i) {
2226                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2227                 struct blk_mq_hw_ctx *hctx;
2228
2229                 __ctx->cpu = i;
2230                 spin_lock_init(&__ctx->lock);
2231                 INIT_LIST_HEAD(&__ctx->rq_list);
2232                 __ctx->queue = q;
2233
2234                 /* If the cpu isn't present, the cpu is mapped to first hctx */
2235                 if (!cpu_present(i))
2236                         continue;
2237
2238                 hctx = blk_mq_map_queue(q, i);
2239
2240                 /*
2241                  * Set local node, IFF we have more than one hw queue. If
2242                  * not, we remain on the home node of the device
2243                  */
2244                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2245                         hctx->numa_node = local_memory_node(cpu_to_node(i));
2246         }
2247 }
2248
2249 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2250 {
2251         int ret = 0;
2252
2253         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2254                                         set->queue_depth, set->reserved_tags);
2255         if (!set->tags[hctx_idx])
2256                 return false;
2257
2258         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2259                                 set->queue_depth);
2260         if (!ret)
2261                 return true;
2262
2263         blk_mq_free_rq_map(set->tags[hctx_idx]);
2264         set->tags[hctx_idx] = NULL;
2265         return false;
2266 }
2267
2268 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2269                                          unsigned int hctx_idx)
2270 {
2271         if (set->tags[hctx_idx]) {
2272                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2273                 blk_mq_free_rq_map(set->tags[hctx_idx]);
2274                 set->tags[hctx_idx] = NULL;
2275         }
2276 }
2277
2278 static void blk_mq_map_swqueue(struct request_queue *q)
2279 {
2280         unsigned int i, hctx_idx;
2281         struct blk_mq_hw_ctx *hctx;
2282         struct blk_mq_ctx *ctx;
2283         struct blk_mq_tag_set *set = q->tag_set;
2284
2285         /*
2286          * Avoid others reading imcomplete hctx->cpumask through sysfs
2287          */
2288         mutex_lock(&q->sysfs_lock);
2289
2290         queue_for_each_hw_ctx(q, hctx, i) {
2291                 cpumask_clear(hctx->cpumask);
2292                 hctx->nr_ctx = 0;
2293         }
2294
2295         /*
2296          * Map software to hardware queues.
2297          *
2298          * If the cpu isn't present, the cpu is mapped to first hctx.
2299          */
2300         for_each_present_cpu(i) {
2301                 hctx_idx = q->mq_map[i];
2302                 /* unmapped hw queue can be remapped after CPU topo changed */
2303                 if (!set->tags[hctx_idx] &&
2304                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2305                         /*
2306                          * If tags initialization fail for some hctx,
2307                          * that hctx won't be brought online.  In this
2308                          * case, remap the current ctx to hctx[0] which
2309                          * is guaranteed to always have tags allocated
2310                          */
2311                         q->mq_map[i] = 0;
2312                 }
2313
2314                 ctx = per_cpu_ptr(q->queue_ctx, i);
2315                 hctx = blk_mq_map_queue(q, i);
2316
2317                 cpumask_set_cpu(i, hctx->cpumask);
2318                 ctx->index_hw = hctx->nr_ctx;
2319                 hctx->ctxs[hctx->nr_ctx++] = ctx;
2320         }
2321
2322         mutex_unlock(&q->sysfs_lock);
2323
2324         queue_for_each_hw_ctx(q, hctx, i) {
2325                 /*
2326                  * If no software queues are mapped to this hardware queue,
2327                  * disable it and free the request entries.
2328                  */
2329                 if (!hctx->nr_ctx) {
2330                         /* Never unmap queue 0.  We need it as a
2331                          * fallback in case of a new remap fails
2332                          * allocation
2333                          */
2334                         if (i && set->tags[i])
2335                                 blk_mq_free_map_and_requests(set, i);
2336
2337                         hctx->tags = NULL;
2338                         continue;
2339                 }
2340
2341                 hctx->tags = set->tags[i];
2342                 WARN_ON(!hctx->tags);
2343
2344                 /*
2345                  * Set the map size to the number of mapped software queues.
2346                  * This is more accurate and more efficient than looping
2347                  * over all possibly mapped software queues.
2348                  */
2349                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2350
2351                 /*
2352                  * Initialize batch roundrobin counts
2353                  */
2354                 hctx->next_cpu = cpumask_first(hctx->cpumask);
2355                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2356         }
2357 }
2358
2359 /*
2360  * Caller needs to ensure that we're either frozen/quiesced, or that
2361  * the queue isn't live yet.
2362  */
2363 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2364 {
2365         struct blk_mq_hw_ctx *hctx;
2366         int i;
2367
2368         queue_for_each_hw_ctx(q, hctx, i) {
2369                 if (shared) {
2370                         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2371                                 atomic_inc(&q->shared_hctx_restart);
2372                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2373                 } else {
2374                         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2375                                 atomic_dec(&q->shared_hctx_restart);
2376                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2377                 }
2378         }
2379 }
2380
2381 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2382                                         bool shared)
2383 {
2384         struct request_queue *q;
2385
2386         lockdep_assert_held(&set->tag_list_lock);
2387
2388         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2389                 blk_mq_freeze_queue(q);
2390                 queue_set_hctx_shared(q, shared);
2391                 blk_mq_unfreeze_queue(q);
2392         }
2393 }
2394
2395 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2396 {
2397         struct blk_mq_tag_set *set = q->tag_set;
2398
2399         mutex_lock(&set->tag_list_lock);
2400         list_del_rcu(&q->tag_set_list);
2401         INIT_LIST_HEAD(&q->tag_set_list);
2402         if (list_is_singular(&set->tag_list)) {
2403                 /* just transitioned to unshared */
2404                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2405                 /* update existing queue */
2406                 blk_mq_update_tag_set_depth(set, false);
2407         }
2408         mutex_unlock(&set->tag_list_lock);
2409
2410         synchronize_rcu();
2411 }
2412
2413 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2414                                      struct request_queue *q)
2415 {
2416         q->tag_set = set;
2417
2418         mutex_lock(&set->tag_list_lock);
2419
2420         /*
2421          * Check to see if we're transitioning to shared (from 1 to 2 queues).
2422          */
2423         if (!list_empty(&set->tag_list) &&
2424             !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2425                 set->flags |= BLK_MQ_F_TAG_SHARED;
2426                 /* update existing queue */
2427                 blk_mq_update_tag_set_depth(set, true);
2428         }
2429         if (set->flags & BLK_MQ_F_TAG_SHARED)
2430                 queue_set_hctx_shared(q, true);
2431         list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2432
2433         mutex_unlock(&set->tag_list_lock);
2434 }
2435
2436 /*
2437  * It is the actual release handler for mq, but we do it from
2438  * request queue's release handler for avoiding use-after-free
2439  * and headache because q->mq_kobj shouldn't have been introduced,
2440  * but we can't group ctx/kctx kobj without it.
2441  */
2442 void blk_mq_release(struct request_queue *q)
2443 {
2444         struct blk_mq_hw_ctx *hctx;
2445         unsigned int i;
2446
2447         /* hctx kobj stays in hctx */
2448         queue_for_each_hw_ctx(q, hctx, i) {
2449                 if (!hctx)
2450                         continue;
2451                 kobject_put(&hctx->kobj);
2452         }
2453
2454         q->mq_map = NULL;
2455
2456         kfree(q->queue_hw_ctx);
2457
2458         /*
2459          * release .mq_kobj and sw queue's kobject now because
2460          * both share lifetime with request queue.
2461          */
2462         blk_mq_sysfs_deinit(q);
2463
2464         free_percpu(q->queue_ctx);
2465 }
2466
2467 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2468 {
2469         struct request_queue *uninit_q, *q;
2470
2471         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2472         if (!uninit_q)
2473                 return ERR_PTR(-ENOMEM);
2474
2475         q = blk_mq_init_allocated_queue(set, uninit_q);
2476         if (IS_ERR(q))
2477                 blk_cleanup_queue(uninit_q);
2478
2479         return q;
2480 }
2481 EXPORT_SYMBOL(blk_mq_init_queue);
2482
2483 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2484 {
2485         int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2486
2487         BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
2488                            __alignof__(struct blk_mq_hw_ctx)) !=
2489                      sizeof(struct blk_mq_hw_ctx));
2490
2491         if (tag_set->flags & BLK_MQ_F_BLOCKING)
2492                 hw_ctx_size += sizeof(struct srcu_struct);
2493
2494         return hw_ctx_size;
2495 }
2496
2497 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2498                                                 struct request_queue *q)
2499 {
2500         int i, j;
2501         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2502
2503         blk_mq_sysfs_unregister(q);
2504
2505         /* protect against switching io scheduler  */
2506         mutex_lock(&q->sysfs_lock);
2507         for (i = 0; i < set->nr_hw_queues; i++) {
2508                 int node;
2509
2510                 if (hctxs[i])
2511                         continue;
2512
2513                 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2514                 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2515                                         GFP_KERNEL, node);
2516                 if (!hctxs[i])
2517                         break;
2518
2519                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2520                                                 node)) {
2521                         kfree(hctxs[i]);
2522                         hctxs[i] = NULL;
2523                         break;
2524                 }
2525
2526                 atomic_set(&hctxs[i]->nr_active, 0);
2527                 hctxs[i]->numa_node = node;
2528                 hctxs[i]->queue_num = i;
2529
2530                 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2531                         free_cpumask_var(hctxs[i]->cpumask);
2532                         kfree(hctxs[i]);
2533                         hctxs[i] = NULL;
2534                         break;
2535                 }
2536                 blk_mq_hctx_kobj_init(hctxs[i]);
2537         }
2538         for (j = i; j < q->nr_hw_queues; j++) {
2539                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2540
2541                 if (hctx) {
2542                         if (hctx->tags)
2543                                 blk_mq_free_map_and_requests(set, j);
2544                         blk_mq_exit_hctx(q, set, hctx, j);
2545                         kobject_put(&hctx->kobj);
2546                         hctxs[j] = NULL;
2547
2548                 }
2549         }
2550         q->nr_hw_queues = i;
2551         mutex_unlock(&q->sysfs_lock);
2552         blk_mq_sysfs_register(q);
2553 }
2554
2555 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2556                                                   struct request_queue *q)
2557 {
2558         /* mark the queue as mq asap */
2559         q->mq_ops = set->ops;
2560
2561         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2562                                              blk_mq_poll_stats_bkt,
2563                                              BLK_MQ_POLL_STATS_BKTS, q);
2564         if (!q->poll_cb)
2565                 goto err_exit;
2566
2567         q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2568         if (!q->queue_ctx)
2569                 goto err_exit;
2570
2571         /* init q->mq_kobj and sw queues' kobjects */
2572         blk_mq_sysfs_init(q);
2573
2574         q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2575                                                 GFP_KERNEL, set->numa_node);
2576         if (!q->queue_hw_ctx)
2577                 goto err_percpu;
2578
2579         q->mq_map = set->mq_map;
2580
2581         blk_mq_realloc_hw_ctxs(set, q);
2582         if (!q->nr_hw_queues)
2583                 goto err_hctxs;
2584
2585         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2586         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2587
2588         q->nr_queues = nr_cpu_ids;
2589
2590         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2591
2592         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2593                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2594
2595         q->sg_reserved_size = INT_MAX;
2596
2597         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2598         INIT_LIST_HEAD(&q->requeue_list);
2599         spin_lock_init(&q->requeue_lock);
2600
2601         blk_queue_make_request(q, blk_mq_make_request);
2602         if (q->mq_ops->poll)
2603                 q->poll_fn = blk_mq_poll;
2604
2605         /*
2606          * Do this after blk_queue_make_request() overrides it...
2607          */
2608         q->nr_requests = set->queue_depth;
2609
2610         /*
2611          * Default to classic polling
2612          */
2613         q->poll_nsec = -1;
2614
2615         if (set->ops->complete)
2616                 blk_queue_softirq_done(q, set->ops->complete);
2617
2618         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2619         blk_mq_add_queue_tag_set(set, q);
2620         blk_mq_map_swqueue(q);
2621
2622         if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2623                 int ret;
2624
2625                 ret = blk_mq_sched_init(q);
2626                 if (ret)
2627                         return ERR_PTR(ret);
2628         }
2629
2630         return q;
2631
2632 err_hctxs:
2633         kfree(q->queue_hw_ctx);
2634 err_percpu:
2635         free_percpu(q->queue_ctx);
2636 err_exit:
2637         q->mq_ops = NULL;
2638         return ERR_PTR(-ENOMEM);
2639 }
2640 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2641
2642 void blk_mq_free_queue(struct request_queue *q)
2643 {
2644         struct blk_mq_tag_set   *set = q->tag_set;
2645
2646         blk_mq_del_queue_tag_set(q);
2647         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2648 }
2649
2650 /* Basically redo blk_mq_init_queue with queue frozen */
2651 static void blk_mq_queue_reinit(struct request_queue *q)
2652 {
2653         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2654
2655         blk_mq_debugfs_unregister_hctxs(q);
2656         blk_mq_sysfs_unregister(q);
2657
2658         /*
2659          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2660          * we should change hctx numa_node according to the new topology (this
2661          * involves freeing and re-allocating memory, worth doing?)
2662          */
2663         blk_mq_map_swqueue(q);
2664
2665         blk_mq_sysfs_register(q);
2666         blk_mq_debugfs_register_hctxs(q);
2667 }
2668
2669 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2670 {
2671         int i;
2672
2673         for (i = 0; i < set->nr_hw_queues; i++)
2674                 if (!__blk_mq_alloc_rq_map(set, i))
2675                         goto out_unwind;
2676
2677         return 0;
2678
2679 out_unwind:
2680         while (--i >= 0)
2681                 blk_mq_free_rq_map(set->tags[i]);
2682
2683         return -ENOMEM;
2684 }
2685
2686 /*
2687  * Allocate the request maps associated with this tag_set. Note that this
2688  * may reduce the depth asked for, if memory is tight. set->queue_depth
2689  * will be updated to reflect the allocated depth.
2690  */
2691 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2692 {
2693         unsigned int depth;
2694         int err;
2695
2696         depth = set->queue_depth;
2697         do {
2698                 err = __blk_mq_alloc_rq_maps(set);
2699                 if (!err)
2700                         break;
2701
2702                 set->queue_depth >>= 1;
2703                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2704                         err = -ENOMEM;
2705                         break;
2706                 }
2707         } while (set->queue_depth);
2708
2709         if (!set->queue_depth || err) {
2710                 pr_err("blk-mq: failed to allocate request map\n");
2711                 return -ENOMEM;
2712         }
2713
2714         if (depth != set->queue_depth)
2715                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2716                                                 depth, set->queue_depth);
2717
2718         return 0;
2719 }
2720
2721 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2722 {
2723         if (set->ops->map_queues) {
2724                 int cpu;
2725                 /*
2726                  * transport .map_queues is usually done in the following
2727                  * way:
2728                  *
2729                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2730                  *      mask = get_cpu_mask(queue)
2731                  *      for_each_cpu(cpu, mask)
2732                  *              set->mq_map[cpu] = queue;
2733                  * }
2734                  *
2735                  * When we need to remap, the table has to be cleared for
2736                  * killing stale mapping since one CPU may not be mapped
2737                  * to any hw queue.
2738                  */
2739                 for_each_possible_cpu(cpu)
2740                         set->mq_map[cpu] = 0;
2741
2742                 return set->ops->map_queues(set);
2743         } else
2744                 return blk_mq_map_queues(set);
2745 }
2746
2747 /*
2748  * Alloc a tag set to be associated with one or more request queues.
2749  * May fail with EINVAL for various error conditions. May adjust the
2750  * requested depth down, if if it too large. In that case, the set
2751  * value will be stored in set->queue_depth.
2752  */
2753 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2754 {
2755         int ret;
2756
2757         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2758
2759         if (!set->nr_hw_queues)
2760                 return -EINVAL;
2761         if (!set->queue_depth)
2762                 return -EINVAL;
2763         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2764                 return -EINVAL;
2765
2766         if (!set->ops->queue_rq)
2767                 return -EINVAL;
2768
2769         if (!set->ops->get_budget ^ !set->ops->put_budget)
2770                 return -EINVAL;
2771
2772         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2773                 pr_info("blk-mq: reduced tag depth to %u\n",
2774                         BLK_MQ_MAX_DEPTH);
2775                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2776         }
2777
2778         /*
2779          * If a crashdump is active, then we are potentially in a very
2780          * memory constrained environment. Limit us to 1 queue and
2781          * 64 tags to prevent using too much memory.
2782          */
2783         if (is_kdump_kernel()) {
2784                 set->nr_hw_queues = 1;
2785                 set->queue_depth = min(64U, set->queue_depth);
2786         }
2787         /*
2788          * There is no use for more h/w queues than cpus.
2789          */
2790         if (set->nr_hw_queues > nr_cpu_ids)
2791                 set->nr_hw_queues = nr_cpu_ids;
2792
2793         set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2794                                  GFP_KERNEL, set->numa_node);
2795         if (!set->tags)
2796                 return -ENOMEM;
2797
2798         ret = -ENOMEM;
2799         set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2800                         GFP_KERNEL, set->numa_node);
2801         if (!set->mq_map)
2802                 goto out_free_tags;
2803
2804         ret = blk_mq_update_queue_map(set);
2805         if (ret)
2806                 goto out_free_mq_map;
2807
2808         ret = blk_mq_alloc_rq_maps(set);
2809         if (ret)
2810                 goto out_free_mq_map;
2811
2812         mutex_init(&set->tag_list_lock);
2813         INIT_LIST_HEAD(&set->tag_list);
2814
2815         return 0;
2816
2817 out_free_mq_map:
2818         kfree(set->mq_map);
2819         set->mq_map = NULL;
2820 out_free_tags:
2821         kfree(set->tags);
2822         set->tags = NULL;
2823         return ret;
2824 }
2825 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2826
2827 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2828 {
2829         int i;
2830
2831         for (i = 0; i < nr_cpu_ids; i++)
2832                 blk_mq_free_map_and_requests(set, i);
2833
2834         kfree(set->mq_map);
2835         set->mq_map = NULL;
2836
2837         kfree(set->tags);
2838         set->tags = NULL;
2839 }
2840 EXPORT_SYMBOL(blk_mq_free_tag_set);
2841
2842 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2843 {
2844         struct blk_mq_tag_set *set = q->tag_set;
2845         struct blk_mq_hw_ctx *hctx;
2846         int i, ret;
2847
2848         if (!set)
2849                 return -EINVAL;
2850
2851         blk_mq_freeze_queue(q);
2852         blk_mq_quiesce_queue(q);
2853
2854         ret = 0;
2855         queue_for_each_hw_ctx(q, hctx, i) {
2856                 if (!hctx->tags)
2857                         continue;
2858                 /*
2859                  * If we're using an MQ scheduler, just update the scheduler
2860                  * queue depth. This is similar to what the old code would do.
2861                  */
2862                 if (!hctx->sched_tags) {
2863                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
2864                                                         false);
2865                 } else {
2866                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2867                                                         nr, true);
2868                 }
2869                 if (ret)
2870                         break;
2871         }
2872
2873         if (!ret)
2874                 q->nr_requests = nr;
2875
2876         blk_mq_unquiesce_queue(q);
2877         blk_mq_unfreeze_queue(q);
2878
2879         return ret;
2880 }
2881
2882 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2883                                                         int nr_hw_queues)
2884 {
2885         struct request_queue *q;
2886
2887         lockdep_assert_held(&set->tag_list_lock);
2888
2889         if (nr_hw_queues > nr_cpu_ids)
2890                 nr_hw_queues = nr_cpu_ids;
2891         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2892                 return;
2893
2894         list_for_each_entry(q, &set->tag_list, tag_set_list)
2895                 blk_mq_freeze_queue(q);
2896
2897         set->nr_hw_queues = nr_hw_queues;
2898         blk_mq_update_queue_map(set);
2899         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2900                 blk_mq_realloc_hw_ctxs(set, q);
2901                 blk_mq_queue_reinit(q);
2902         }
2903
2904         list_for_each_entry(q, &set->tag_list, tag_set_list)
2905                 blk_mq_unfreeze_queue(q);
2906 }
2907
2908 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2909 {
2910         mutex_lock(&set->tag_list_lock);
2911         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2912         mutex_unlock(&set->tag_list_lock);
2913 }
2914 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2915
2916 /* Enable polling stats and return whether they were already enabled. */
2917 static bool blk_poll_stats_enable(struct request_queue *q)
2918 {
2919         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2920             test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2921                 return true;
2922         blk_stat_add_callback(q, q->poll_cb);
2923         return false;
2924 }
2925
2926 static void blk_mq_poll_stats_start(struct request_queue *q)
2927 {
2928         /*
2929          * We don't arm the callback if polling stats are not enabled or the
2930          * callback is already active.
2931          */
2932         if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2933             blk_stat_is_active(q->poll_cb))
2934                 return;
2935
2936         blk_stat_activate_msecs(q->poll_cb, 100);
2937 }
2938
2939 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2940 {
2941         struct request_queue *q = cb->data;
2942         int bucket;
2943
2944         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2945                 if (cb->stat[bucket].nr_samples)
2946                         q->poll_stat[bucket] = cb->stat[bucket];
2947         }
2948 }
2949
2950 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2951                                        struct blk_mq_hw_ctx *hctx,
2952                                        struct request *rq)
2953 {
2954         unsigned long ret = 0;
2955         int bucket;
2956
2957         /*
2958          * If stats collection isn't on, don't sleep but turn it on for
2959          * future users
2960          */
2961         if (!blk_poll_stats_enable(q))
2962                 return 0;
2963
2964         /*
2965          * As an optimistic guess, use half of the mean service time
2966          * for this type of request. We can (and should) make this smarter.
2967          * For instance, if the completion latencies are tight, we can
2968          * get closer than just half the mean. This is especially
2969          * important on devices where the completion latencies are longer
2970          * than ~10 usec. We do use the stats for the relevant IO size
2971          * if available which does lead to better estimates.
2972          */
2973         bucket = blk_mq_poll_stats_bkt(rq);
2974         if (bucket < 0)
2975                 return ret;
2976
2977         if (q->poll_stat[bucket].nr_samples)
2978                 ret = (q->poll_stat[bucket].mean + 1) / 2;
2979
2980         return ret;
2981 }
2982
2983 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2984                                      struct blk_mq_hw_ctx *hctx,
2985                                      struct request *rq)
2986 {
2987         struct hrtimer_sleeper hs;
2988         enum hrtimer_mode mode;
2989         unsigned int nsecs;
2990         ktime_t kt;
2991
2992         if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2993                 return false;
2994
2995         /*
2996          * poll_nsec can be:
2997          *
2998          * -1:  don't ever hybrid sleep
2999          *  0:  use half of prev avg
3000          * >0:  use this specific value
3001          */
3002         if (q->poll_nsec == -1)
3003                 return false;
3004         else if (q->poll_nsec > 0)
3005                 nsecs = q->poll_nsec;
3006         else
3007                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3008
3009         if (!nsecs)
3010                 return false;
3011
3012         set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
3013
3014         /*
3015          * This will be replaced with the stats tracking code, using
3016          * 'avg_completion_time / 2' as the pre-sleep target.
3017          */
3018         kt = nsecs;
3019
3020         mode = HRTIMER_MODE_REL;
3021         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3022         hrtimer_set_expires(&hs.timer, kt);
3023
3024         hrtimer_init_sleeper(&hs, current);
3025         do {
3026                 if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
3027                     blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
3028                         break;
3029                 set_current_state(TASK_UNINTERRUPTIBLE);
3030                 hrtimer_start_expires(&hs.timer, mode);
3031                 if (hs.task)
3032                         io_schedule();
3033                 hrtimer_cancel(&hs.timer);
3034                 mode = HRTIMER_MODE_ABS;
3035         } while (hs.task && !signal_pending(current));
3036
3037         __set_current_state(TASK_RUNNING);
3038         destroy_hrtimer_on_stack(&hs.timer);
3039         return true;
3040 }
3041
3042 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
3043 {
3044         struct request_queue *q = hctx->queue;
3045         long state;
3046
3047         /*
3048          * If we sleep, have the caller restart the poll loop to reset
3049          * the state. Like for the other success return cases, the
3050          * caller is responsible for checking if the IO completed. If
3051          * the IO isn't complete, we'll get called again and will go
3052          * straight to the busy poll loop.
3053          */
3054         if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
3055                 return true;
3056
3057         hctx->poll_considered++;
3058
3059         state = current->state;
3060         while (!need_resched()) {
3061                 int ret;
3062
3063                 hctx->poll_invoked++;
3064
3065                 ret = q->mq_ops->poll(hctx, rq->tag);
3066                 if (ret > 0) {
3067                         hctx->poll_success++;
3068                         set_current_state(TASK_RUNNING);
3069                         return true;
3070                 }
3071
3072                 if (signal_pending_state(state, current))
3073                         set_current_state(TASK_RUNNING);
3074
3075                 if (current->state == TASK_RUNNING)
3076                         return true;
3077                 if (ret < 0)
3078                         break;
3079                 cpu_relax();
3080         }
3081
3082         return false;
3083 }
3084
3085 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
3086 {
3087         struct blk_mq_hw_ctx *hctx;
3088         struct request *rq;
3089
3090         if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3091                 return false;
3092
3093         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3094         if (!blk_qc_t_is_internal(cookie))
3095                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3096         else {
3097                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3098                 /*
3099                  * With scheduling, if the request has completed, we'll
3100                  * get a NULL return here, as we clear the sched tag when
3101                  * that happens. The request still remains valid, like always,
3102                  * so we should be safe with just the NULL check.
3103                  */
3104                 if (!rq)
3105                         return false;
3106         }
3107
3108         return __blk_mq_poll(hctx, rq);
3109 }
3110
3111 static int __init blk_mq_init(void)
3112 {
3113         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3114                                 blk_mq_hctx_notify_dead);
3115         return 0;
3116 }
3117 subsys_initcall(blk_mq_init);