blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request
[linux-2.6-microblaze.git] / block / blk-mq.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 #include <linux/llist.h>
12 #include <linux/list_sort.h>
13 #include <linux/cpu.h>
14 #include <linux/cache.h>
15 #include <linux/sched/sysctl.h>
16 #include <linux/delay.h>
17
18 #include <trace/events/block.h>
19
20 #include <linux/blk-mq.h>
21 #include "blk.h"
22 #include "blk-mq.h"
23 #include "blk-mq-tag.h"
24
25 static DEFINE_MUTEX(all_q_mutex);
26 static LIST_HEAD(all_q_list);
27
28 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
29
30 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
31                                            unsigned int cpu)
32 {
33         return per_cpu_ptr(q->queue_ctx, cpu);
34 }
35
36 /*
37  * This assumes per-cpu software queueing queues. They could be per-node
38  * as well, for instance. For now this is hardcoded as-is. Note that we don't
39  * care about preemption, since we know the ctx's are persistent. This does
40  * mean that we can't rely on ctx always matching the currently running CPU.
41  */
42 static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
43 {
44         return __blk_mq_get_ctx(q, get_cpu());
45 }
46
47 static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
48 {
49         put_cpu();
50 }
51
52 /*
53  * Check if any of the ctx's have pending work in this hardware queue
54  */
55 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
56 {
57         unsigned int i;
58
59         for (i = 0; i < hctx->ctx_map.map_size; i++)
60                 if (hctx->ctx_map.map[i].word)
61                         return true;
62
63         return false;
64 }
65
66 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
67                                               struct blk_mq_ctx *ctx)
68 {
69         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
70 }
71
72 #define CTX_TO_BIT(hctx, ctx)   \
73         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
74
75 /*
76  * Mark this ctx as having pending work in this hardware queue
77  */
78 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
79                                      struct blk_mq_ctx *ctx)
80 {
81         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
82
83         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
84                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
85 }
86
87 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
88                                       struct blk_mq_ctx *ctx)
89 {
90         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
91
92         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
93 }
94
95 static int blk_mq_queue_enter(struct request_queue *q)
96 {
97         int ret;
98
99         __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
100         smp_wmb();
101         /* we have problems to freeze the queue if it's initializing */
102         if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
103                 return 0;
104
105         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
106
107         spin_lock_irq(q->queue_lock);
108         ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
109                 !blk_queue_bypass(q) || blk_queue_dying(q),
110                 *q->queue_lock);
111         /* inc usage with lock hold to avoid freeze_queue runs here */
112         if (!ret && !blk_queue_dying(q))
113                 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
114         else if (blk_queue_dying(q))
115                 ret = -ENODEV;
116         spin_unlock_irq(q->queue_lock);
117
118         return ret;
119 }
120
121 static void blk_mq_queue_exit(struct request_queue *q)
122 {
123         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
124 }
125
126 static void __blk_mq_drain_queue(struct request_queue *q)
127 {
128         while (true) {
129                 s64 count;
130
131                 spin_lock_irq(q->queue_lock);
132                 count = percpu_counter_sum(&q->mq_usage_counter);
133                 spin_unlock_irq(q->queue_lock);
134
135                 if (count == 0)
136                         break;
137                 blk_mq_run_queues(q, false);
138                 msleep(10);
139         }
140 }
141
142 /*
143  * Guarantee no request is in use, so we can change any data structure of
144  * the queue afterward.
145  */
146 static void blk_mq_freeze_queue(struct request_queue *q)
147 {
148         bool drain;
149
150         spin_lock_irq(q->queue_lock);
151         drain = !q->bypass_depth++;
152         queue_flag_set(QUEUE_FLAG_BYPASS, q);
153         spin_unlock_irq(q->queue_lock);
154
155         if (drain)
156                 __blk_mq_drain_queue(q);
157 }
158
159 void blk_mq_drain_queue(struct request_queue *q)
160 {
161         __blk_mq_drain_queue(q);
162 }
163
164 static void blk_mq_unfreeze_queue(struct request_queue *q)
165 {
166         bool wake = false;
167
168         spin_lock_irq(q->queue_lock);
169         if (!--q->bypass_depth) {
170                 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
171                 wake = true;
172         }
173         WARN_ON_ONCE(q->bypass_depth < 0);
174         spin_unlock_irq(q->queue_lock);
175         if (wake)
176                 wake_up_all(&q->mq_freeze_wq);
177 }
178
179 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
180 {
181         return blk_mq_has_free_tags(hctx->tags);
182 }
183 EXPORT_SYMBOL(blk_mq_can_queue);
184
185 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
186                                struct request *rq, unsigned int rw_flags)
187 {
188         if (blk_queue_io_stat(q))
189                 rw_flags |= REQ_IO_STAT;
190
191         INIT_LIST_HEAD(&rq->queuelist);
192         /* csd/requeue_work/fifo_time is initialized before use */
193         rq->q = q;
194         rq->mq_ctx = ctx;
195         rq->cmd_flags |= rw_flags;
196         rq->cmd_type = 0;
197         /* do not touch atomic flags, it needs atomic ops against the timer */
198         rq->cpu = -1;
199         rq->__data_len = 0;
200         rq->__sector = (sector_t) -1;
201         rq->bio = NULL;
202         rq->biotail = NULL;
203         INIT_HLIST_NODE(&rq->hash);
204         RB_CLEAR_NODE(&rq->rb_node);
205         memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv)));
206         rq->rq_disk = NULL;
207         rq->part = NULL;
208         rq->start_time = jiffies;
209 #ifdef CONFIG_BLK_CGROUP
210         rq->rl = NULL;
211         set_start_time_ns(rq);
212         rq->io_start_time_ns = 0;
213 #endif
214         rq->nr_phys_segments = 0;
215 #if defined(CONFIG_BLK_DEV_INTEGRITY)
216         rq->nr_integrity_segments = 0;
217 #endif
218         rq->ioprio = 0;
219         rq->special = NULL;
220         /* tag was already set */
221         rq->errors = 0;
222         memset(rq->__cmd, 0, sizeof(rq->__cmd));
223         rq->cmd = rq->__cmd;
224         rq->cmd_len = BLK_MAX_CDB;
225
226         rq->extra_len = 0;
227         rq->sense_len = 0;
228         rq->resid_len = 0;
229         rq->sense = NULL;
230
231         rq->deadline = 0;
232         INIT_LIST_HEAD(&rq->timeout_list);
233         rq->timeout = 0;
234         rq->retries = 0;
235         rq->end_io = NULL;
236         rq->end_io_data = NULL;
237         rq->next_rq = NULL;
238
239         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
240 }
241
242 static struct request *
243 __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
244                 struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved)
245 {
246         struct request *rq;
247         unsigned int tag;
248
249         tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
250         if (tag != BLK_MQ_TAG_FAIL) {
251                 rq = hctx->tags->rqs[tag];
252
253                 rq->cmd_flags = 0;
254                 if (blk_mq_tag_busy(hctx)) {
255                         rq->cmd_flags = REQ_MQ_INFLIGHT;
256                         atomic_inc(&hctx->nr_active);
257                 }
258
259                 rq->tag = tag;
260                 blk_mq_rq_ctx_init(q, ctx, rq, rw);
261                 return rq;
262         }
263
264         return NULL;
265 }
266
267 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
268                                                    int rw, gfp_t gfp,
269                                                    bool reserved)
270 {
271         bool gfp_mask = gfp & ~__GFP_WAIT;
272         struct request *rq;
273
274         do {
275                 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
276                 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
277
278                 rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp_mask,
279                                                 reserved);
280                 if (rq)
281                         break;
282
283                 if (!(gfp & __GFP_WAIT)) {
284                         blk_mq_put_ctx(ctx);
285                         break;
286                 }
287
288                 __blk_mq_run_hw_queue(hctx);
289                 blk_mq_put_ctx(ctx);
290                 gfp_mask = gfp;
291         } while (1);
292
293         return rq;
294 }
295
296 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
297                 bool reserved)
298 {
299         struct request *rq;
300
301         if (blk_mq_queue_enter(q))
302                 return NULL;
303
304         rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
305         if (rq)
306                 blk_mq_put_ctx(rq->mq_ctx);
307         return rq;
308 }
309 EXPORT_SYMBOL(blk_mq_alloc_request);
310
311 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
312                                   struct blk_mq_ctx *ctx, struct request *rq)
313 {
314         const int tag = rq->tag;
315         struct request_queue *q = rq->q;
316
317         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
318                 atomic_dec(&hctx->nr_active);
319
320         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
321         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
322         blk_mq_queue_exit(q);
323 }
324
325 void blk_mq_free_request(struct request *rq)
326 {
327         struct blk_mq_ctx *ctx = rq->mq_ctx;
328         struct blk_mq_hw_ctx *hctx;
329         struct request_queue *q = rq->q;
330
331         ctx->rq_completed[rq_is_sync(rq)]++;
332
333         hctx = q->mq_ops->map_queue(q, ctx->cpu);
334         __blk_mq_free_request(hctx, ctx, rq);
335 }
336
337 /*
338  * Clone all relevant state from a request that has been put on hold in
339  * the flush state machine into the preallocated flush request that hangs
340  * off the request queue.
341  *
342  * For a driver the flush request should be invisible, that's why we are
343  * impersonating the original request here.
344  */
345 void blk_mq_clone_flush_request(struct request *flush_rq,
346                 struct request *orig_rq)
347 {
348         struct blk_mq_hw_ctx *hctx =
349                 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
350
351         flush_rq->mq_ctx = orig_rq->mq_ctx;
352         flush_rq->tag = orig_rq->tag;
353         memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
354                 hctx->cmd_size);
355 }
356
357 inline void __blk_mq_end_io(struct request *rq, int error)
358 {
359         blk_account_io_done(rq);
360
361         if (rq->end_io) {
362                 rq->end_io(rq, error);
363         } else {
364                 if (unlikely(blk_bidi_rq(rq)))
365                         blk_mq_free_request(rq->next_rq);
366                 blk_mq_free_request(rq);
367         }
368 }
369 EXPORT_SYMBOL(__blk_mq_end_io);
370
371 void blk_mq_end_io(struct request *rq, int error)
372 {
373         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
374                 BUG();
375         __blk_mq_end_io(rq, error);
376 }
377 EXPORT_SYMBOL(blk_mq_end_io);
378
379 static void __blk_mq_complete_request_remote(void *data)
380 {
381         struct request *rq = data;
382
383         rq->q->softirq_done_fn(rq);
384 }
385
386 void __blk_mq_complete_request(struct request *rq)
387 {
388         struct blk_mq_ctx *ctx = rq->mq_ctx;
389         bool shared = false;
390         int cpu;
391
392         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
393                 rq->q->softirq_done_fn(rq);
394                 return;
395         }
396
397         cpu = get_cpu();
398         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
399                 shared = cpus_share_cache(cpu, ctx->cpu);
400
401         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
402                 rq->csd.func = __blk_mq_complete_request_remote;
403                 rq->csd.info = rq;
404                 rq->csd.flags = 0;
405                 smp_call_function_single_async(ctx->cpu, &rq->csd);
406         } else {
407                 rq->q->softirq_done_fn(rq);
408         }
409         put_cpu();
410 }
411
412 /**
413  * blk_mq_complete_request - end I/O on a request
414  * @rq:         the request being processed
415  *
416  * Description:
417  *      Ends all I/O on a request. It does not handle partial completions.
418  *      The actual completion happens out-of-order, through a IPI handler.
419  **/
420 void blk_mq_complete_request(struct request *rq)
421 {
422         struct request_queue *q = rq->q;
423
424         if (unlikely(blk_should_fake_timeout(q)))
425                 return;
426         if (!blk_mark_rq_complete(rq)) {
427                 if (q->softirq_done_fn)
428                         __blk_mq_complete_request(rq);
429                 else
430                         blk_mq_end_io(rq, rq->errors);
431         }
432 }
433 EXPORT_SYMBOL(blk_mq_complete_request);
434
435 static void blk_mq_start_request(struct request *rq, bool last)
436 {
437         struct request_queue *q = rq->q;
438
439         trace_block_rq_issue(q, rq);
440
441         rq->resid_len = blk_rq_bytes(rq);
442         if (unlikely(blk_bidi_rq(rq)))
443                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
444
445         /*
446          * Just mark start time and set the started bit. Due to memory
447          * ordering, we know we'll see the correct deadline as long as
448          * REQ_ATOMIC_STARTED is seen. Use the default queue timeout,
449          * unless one has been set in the request.
450          */
451         if (!rq->timeout)
452                 rq->deadline = jiffies + q->rq_timeout;
453         else
454                 rq->deadline = jiffies + rq->timeout;
455
456         /*
457          * Mark us as started and clear complete. Complete might have been
458          * set if requeue raced with timeout, which then marked it as
459          * complete. So be sure to clear complete again when we start
460          * the request, otherwise we'll ignore the completion event.
461          */
462         set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
463         clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
464
465         if (q->dma_drain_size && blk_rq_bytes(rq)) {
466                 /*
467                  * Make sure space for the drain appears.  We know we can do
468                  * this because max_hw_segments has been adjusted to be one
469                  * fewer than the device can handle.
470                  */
471                 rq->nr_phys_segments++;
472         }
473
474         /*
475          * Flag the last request in the series so that drivers know when IO
476          * should be kicked off, if they don't do it on a per-request basis.
477          *
478          * Note: the flag isn't the only condition drivers should do kick off.
479          * If drive is busy, the last request might not have the bit set.
480          */
481         if (last)
482                 rq->cmd_flags |= REQ_END;
483 }
484
485 static void __blk_mq_requeue_request(struct request *rq)
486 {
487         struct request_queue *q = rq->q;
488
489         trace_block_rq_requeue(q, rq);
490         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
491
492         rq->cmd_flags &= ~REQ_END;
493
494         if (q->dma_drain_size && blk_rq_bytes(rq))
495                 rq->nr_phys_segments--;
496 }
497
498 void blk_mq_requeue_request(struct request *rq)
499 {
500         __blk_mq_requeue_request(rq);
501         blk_clear_rq_complete(rq);
502
503         BUG_ON(blk_queued_rq(rq));
504         blk_mq_add_to_requeue_list(rq, true);
505 }
506 EXPORT_SYMBOL(blk_mq_requeue_request);
507
508 static void blk_mq_requeue_work(struct work_struct *work)
509 {
510         struct request_queue *q =
511                 container_of(work, struct request_queue, requeue_work);
512         LIST_HEAD(rq_list);
513         struct request *rq, *next;
514         unsigned long flags;
515
516         spin_lock_irqsave(&q->requeue_lock, flags);
517         list_splice_init(&q->requeue_list, &rq_list);
518         spin_unlock_irqrestore(&q->requeue_lock, flags);
519
520         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
521                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
522                         continue;
523
524                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
525                 list_del_init(&rq->queuelist);
526                 blk_mq_insert_request(rq, true, false, false);
527         }
528
529         while (!list_empty(&rq_list)) {
530                 rq = list_entry(rq_list.next, struct request, queuelist);
531                 list_del_init(&rq->queuelist);
532                 blk_mq_insert_request(rq, false, false, false);
533         }
534
535         blk_mq_run_queues(q, false);
536 }
537
538 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
539 {
540         struct request_queue *q = rq->q;
541         unsigned long flags;
542
543         /*
544          * We abuse this flag that is otherwise used by the I/O scheduler to
545          * request head insertation from the workqueue.
546          */
547         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
548
549         spin_lock_irqsave(&q->requeue_lock, flags);
550         if (at_head) {
551                 rq->cmd_flags |= REQ_SOFTBARRIER;
552                 list_add(&rq->queuelist, &q->requeue_list);
553         } else {
554                 list_add_tail(&rq->queuelist, &q->requeue_list);
555         }
556         spin_unlock_irqrestore(&q->requeue_lock, flags);
557 }
558 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
559
560 void blk_mq_kick_requeue_list(struct request_queue *q)
561 {
562         kblockd_schedule_work(&q->requeue_work);
563 }
564 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
565
566 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
567 {
568         return tags->rqs[tag];
569 }
570 EXPORT_SYMBOL(blk_mq_tag_to_rq);
571
572 struct blk_mq_timeout_data {
573         struct blk_mq_hw_ctx *hctx;
574         unsigned long *next;
575         unsigned int *next_set;
576 };
577
578 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
579 {
580         struct blk_mq_timeout_data *data = __data;
581         struct blk_mq_hw_ctx *hctx = data->hctx;
582         unsigned int tag;
583
584          /* It may not be in flight yet (this is where
585          * the REQ_ATOMIC_STARTED flag comes in). The requests are
586          * statically allocated, so we know it's always safe to access the
587          * memory associated with a bit offset into ->rqs[].
588          */
589         tag = 0;
590         do {
591                 struct request *rq;
592
593                 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
594                 if (tag >= hctx->tags->nr_tags)
595                         break;
596
597                 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
598                 if (rq->q != hctx->queue)
599                         continue;
600                 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
601                         continue;
602
603                 blk_rq_check_expired(rq, data->next, data->next_set);
604         } while (1);
605 }
606
607 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
608                                         unsigned long *next,
609                                         unsigned int *next_set)
610 {
611         struct blk_mq_timeout_data data = {
612                 .hctx           = hctx,
613                 .next           = next,
614                 .next_set       = next_set,
615         };
616
617         /*
618          * Ask the tagging code to iterate busy requests, so we can
619          * check them for timeout.
620          */
621         blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
622 }
623
624 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
625 {
626         struct request_queue *q = rq->q;
627
628         /*
629          * We know that complete is set at this point. If STARTED isn't set
630          * anymore, then the request isn't active and the "timeout" should
631          * just be ignored. This can happen due to the bitflag ordering.
632          * Timeout first checks if STARTED is set, and if it is, assumes
633          * the request is active. But if we race with completion, then
634          * we both flags will get cleared. So check here again, and ignore
635          * a timeout event with a request that isn't active.
636          */
637         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
638                 return BLK_EH_NOT_HANDLED;
639
640         if (!q->mq_ops->timeout)
641                 return BLK_EH_RESET_TIMER;
642
643         return q->mq_ops->timeout(rq);
644 }
645
646 static void blk_mq_rq_timer(unsigned long data)
647 {
648         struct request_queue *q = (struct request_queue *) data;
649         struct blk_mq_hw_ctx *hctx;
650         unsigned long next = 0;
651         int i, next_set = 0;
652
653         queue_for_each_hw_ctx(q, hctx, i) {
654                 /*
655                  * If not software queues are currently mapped to this
656                  * hardware queue, there's nothing to check
657                  */
658                 if (!hctx->nr_ctx || !hctx->tags)
659                         continue;
660
661                 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
662         }
663
664         if (next_set) {
665                 next = blk_rq_timeout(round_jiffies_up(next));
666                 mod_timer(&q->timeout, next);
667         } else {
668                 queue_for_each_hw_ctx(q, hctx, i)
669                         blk_mq_tag_idle(hctx);
670         }
671 }
672
673 /*
674  * Reverse check our software queue for entries that we could potentially
675  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
676  * too much time checking for merges.
677  */
678 static bool blk_mq_attempt_merge(struct request_queue *q,
679                                  struct blk_mq_ctx *ctx, struct bio *bio)
680 {
681         struct request *rq;
682         int checked = 8;
683
684         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
685                 int el_ret;
686
687                 if (!checked--)
688                         break;
689
690                 if (!blk_rq_merge_ok(rq, bio))
691                         continue;
692
693                 el_ret = blk_try_merge(rq, bio);
694                 if (el_ret == ELEVATOR_BACK_MERGE) {
695                         if (bio_attempt_back_merge(q, rq, bio)) {
696                                 ctx->rq_merged++;
697                                 return true;
698                         }
699                         break;
700                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
701                         if (bio_attempt_front_merge(q, rq, bio)) {
702                                 ctx->rq_merged++;
703                                 return true;
704                         }
705                         break;
706                 }
707         }
708
709         return false;
710 }
711
712 /*
713  * Process software queues that have been marked busy, splicing them
714  * to the for-dispatch
715  */
716 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
717 {
718         struct blk_mq_ctx *ctx;
719         int i;
720
721         for (i = 0; i < hctx->ctx_map.map_size; i++) {
722                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
723                 unsigned int off, bit;
724
725                 if (!bm->word)
726                         continue;
727
728                 bit = 0;
729                 off = i * hctx->ctx_map.bits_per_word;
730                 do {
731                         bit = find_next_bit(&bm->word, bm->depth, bit);
732                         if (bit >= bm->depth)
733                                 break;
734
735                         ctx = hctx->ctxs[bit + off];
736                         clear_bit(bit, &bm->word);
737                         spin_lock(&ctx->lock);
738                         list_splice_tail_init(&ctx->rq_list, list);
739                         spin_unlock(&ctx->lock);
740
741                         bit++;
742                 } while (1);
743         }
744 }
745
746 /*
747  * Run this hardware queue, pulling any software queues mapped to it in.
748  * Note that this function currently has various problems around ordering
749  * of IO. In particular, we'd like FIFO behaviour on handling existing
750  * items on the hctx->dispatch list. Ignore that for now.
751  */
752 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
753 {
754         struct request_queue *q = hctx->queue;
755         struct request *rq;
756         LIST_HEAD(rq_list);
757         int queued;
758
759         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
760
761         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
762                 return;
763
764         hctx->run++;
765
766         /*
767          * Touch any software queue that has pending entries.
768          */
769         flush_busy_ctxs(hctx, &rq_list);
770
771         /*
772          * If we have previous entries on our dispatch list, grab them
773          * and stuff them at the front for more fair dispatch.
774          */
775         if (!list_empty_careful(&hctx->dispatch)) {
776                 spin_lock(&hctx->lock);
777                 if (!list_empty(&hctx->dispatch))
778                         list_splice_init(&hctx->dispatch, &rq_list);
779                 spin_unlock(&hctx->lock);
780         }
781
782         /*
783          * Now process all the entries, sending them to the driver.
784          */
785         queued = 0;
786         while (!list_empty(&rq_list)) {
787                 int ret;
788
789                 rq = list_first_entry(&rq_list, struct request, queuelist);
790                 list_del_init(&rq->queuelist);
791
792                 blk_mq_start_request(rq, list_empty(&rq_list));
793
794                 ret = q->mq_ops->queue_rq(hctx, rq);
795                 switch (ret) {
796                 case BLK_MQ_RQ_QUEUE_OK:
797                         queued++;
798                         continue;
799                 case BLK_MQ_RQ_QUEUE_BUSY:
800                         list_add(&rq->queuelist, &rq_list);
801                         __blk_mq_requeue_request(rq);
802                         break;
803                 default:
804                         pr_err("blk-mq: bad return on queue: %d\n", ret);
805                 case BLK_MQ_RQ_QUEUE_ERROR:
806                         rq->errors = -EIO;
807                         blk_mq_end_io(rq, rq->errors);
808                         break;
809                 }
810
811                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
812                         break;
813         }
814
815         if (!queued)
816                 hctx->dispatched[0]++;
817         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
818                 hctx->dispatched[ilog2(queued) + 1]++;
819
820         /*
821          * Any items that need requeuing? Stuff them into hctx->dispatch,
822          * that is where we will continue on next queue run.
823          */
824         if (!list_empty(&rq_list)) {
825                 spin_lock(&hctx->lock);
826                 list_splice(&rq_list, &hctx->dispatch);
827                 spin_unlock(&hctx->lock);
828         }
829 }
830
831 /*
832  * It'd be great if the workqueue API had a way to pass
833  * in a mask and had some smarts for more clever placement.
834  * For now we just round-robin here, switching for every
835  * BLK_MQ_CPU_WORK_BATCH queued items.
836  */
837 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
838 {
839         int cpu = hctx->next_cpu;
840
841         if (--hctx->next_cpu_batch <= 0) {
842                 int next_cpu;
843
844                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
845                 if (next_cpu >= nr_cpu_ids)
846                         next_cpu = cpumask_first(hctx->cpumask);
847
848                 hctx->next_cpu = next_cpu;
849                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
850         }
851
852         return cpu;
853 }
854
855 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
856 {
857         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
858                 return;
859
860         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
861                 __blk_mq_run_hw_queue(hctx);
862         else if (hctx->queue->nr_hw_queues == 1)
863                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
864         else {
865                 unsigned int cpu;
866
867                 cpu = blk_mq_hctx_next_cpu(hctx);
868                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
869         }
870 }
871
872 void blk_mq_run_queues(struct request_queue *q, bool async)
873 {
874         struct blk_mq_hw_ctx *hctx;
875         int i;
876
877         queue_for_each_hw_ctx(q, hctx, i) {
878                 if ((!blk_mq_hctx_has_pending(hctx) &&
879                     list_empty_careful(&hctx->dispatch)) ||
880                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
881                         continue;
882
883                 preempt_disable();
884                 blk_mq_run_hw_queue(hctx, async);
885                 preempt_enable();
886         }
887 }
888 EXPORT_SYMBOL(blk_mq_run_queues);
889
890 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
891 {
892         cancel_delayed_work(&hctx->run_work);
893         cancel_delayed_work(&hctx->delay_work);
894         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
895 }
896 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
897
898 void blk_mq_stop_hw_queues(struct request_queue *q)
899 {
900         struct blk_mq_hw_ctx *hctx;
901         int i;
902
903         queue_for_each_hw_ctx(q, hctx, i)
904                 blk_mq_stop_hw_queue(hctx);
905 }
906 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
907
908 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
909 {
910         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
911
912         preempt_disable();
913         __blk_mq_run_hw_queue(hctx);
914         preempt_enable();
915 }
916 EXPORT_SYMBOL(blk_mq_start_hw_queue);
917
918 void blk_mq_start_hw_queues(struct request_queue *q)
919 {
920         struct blk_mq_hw_ctx *hctx;
921         int i;
922
923         queue_for_each_hw_ctx(q, hctx, i)
924                 blk_mq_start_hw_queue(hctx);
925 }
926 EXPORT_SYMBOL(blk_mq_start_hw_queues);
927
928
929 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
930 {
931         struct blk_mq_hw_ctx *hctx;
932         int i;
933
934         queue_for_each_hw_ctx(q, hctx, i) {
935                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
936                         continue;
937
938                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
939                 preempt_disable();
940                 blk_mq_run_hw_queue(hctx, async);
941                 preempt_enable();
942         }
943 }
944 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
945
946 static void blk_mq_run_work_fn(struct work_struct *work)
947 {
948         struct blk_mq_hw_ctx *hctx;
949
950         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
951
952         __blk_mq_run_hw_queue(hctx);
953 }
954
955 static void blk_mq_delay_work_fn(struct work_struct *work)
956 {
957         struct blk_mq_hw_ctx *hctx;
958
959         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
960
961         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
962                 __blk_mq_run_hw_queue(hctx);
963 }
964
965 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
966 {
967         unsigned long tmo = msecs_to_jiffies(msecs);
968
969         if (hctx->queue->nr_hw_queues == 1)
970                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
971         else {
972                 unsigned int cpu;
973
974                 cpu = blk_mq_hctx_next_cpu(hctx);
975                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
976         }
977 }
978 EXPORT_SYMBOL(blk_mq_delay_queue);
979
980 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
981                                     struct request *rq, bool at_head)
982 {
983         struct blk_mq_ctx *ctx = rq->mq_ctx;
984
985         trace_block_rq_insert(hctx->queue, rq);
986
987         if (at_head)
988                 list_add(&rq->queuelist, &ctx->rq_list);
989         else
990                 list_add_tail(&rq->queuelist, &ctx->rq_list);
991
992         blk_mq_hctx_mark_pending(hctx, ctx);
993
994         /*
995          * We do this early, to ensure we are on the right CPU.
996          */
997         blk_add_timer(rq);
998 }
999
1000 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1001                 bool async)
1002 {
1003         struct request_queue *q = rq->q;
1004         struct blk_mq_hw_ctx *hctx;
1005         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1006
1007         current_ctx = blk_mq_get_ctx(q);
1008         if (!cpu_online(ctx->cpu))
1009                 rq->mq_ctx = ctx = current_ctx;
1010
1011         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1012
1013         if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
1014             !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
1015                 blk_insert_flush(rq);
1016         } else {
1017                 spin_lock(&ctx->lock);
1018                 __blk_mq_insert_request(hctx, rq, at_head);
1019                 spin_unlock(&ctx->lock);
1020         }
1021
1022         if (run_queue)
1023                 blk_mq_run_hw_queue(hctx, async);
1024
1025         blk_mq_put_ctx(current_ctx);
1026 }
1027
1028 static void blk_mq_insert_requests(struct request_queue *q,
1029                                      struct blk_mq_ctx *ctx,
1030                                      struct list_head *list,
1031                                      int depth,
1032                                      bool from_schedule)
1033
1034 {
1035         struct blk_mq_hw_ctx *hctx;
1036         struct blk_mq_ctx *current_ctx;
1037
1038         trace_block_unplug(q, depth, !from_schedule);
1039
1040         current_ctx = blk_mq_get_ctx(q);
1041
1042         if (!cpu_online(ctx->cpu))
1043                 ctx = current_ctx;
1044         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1045
1046         /*
1047          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1048          * offline now
1049          */
1050         spin_lock(&ctx->lock);
1051         while (!list_empty(list)) {
1052                 struct request *rq;
1053
1054                 rq = list_first_entry(list, struct request, queuelist);
1055                 list_del_init(&rq->queuelist);
1056                 rq->mq_ctx = ctx;
1057                 __blk_mq_insert_request(hctx, rq, false);
1058         }
1059         spin_unlock(&ctx->lock);
1060
1061         blk_mq_run_hw_queue(hctx, from_schedule);
1062         blk_mq_put_ctx(current_ctx);
1063 }
1064
1065 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1066 {
1067         struct request *rqa = container_of(a, struct request, queuelist);
1068         struct request *rqb = container_of(b, struct request, queuelist);
1069
1070         return !(rqa->mq_ctx < rqb->mq_ctx ||
1071                  (rqa->mq_ctx == rqb->mq_ctx &&
1072                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1073 }
1074
1075 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1076 {
1077         struct blk_mq_ctx *this_ctx;
1078         struct request_queue *this_q;
1079         struct request *rq;
1080         LIST_HEAD(list);
1081         LIST_HEAD(ctx_list);
1082         unsigned int depth;
1083
1084         list_splice_init(&plug->mq_list, &list);
1085
1086         list_sort(NULL, &list, plug_ctx_cmp);
1087
1088         this_q = NULL;
1089         this_ctx = NULL;
1090         depth = 0;
1091
1092         while (!list_empty(&list)) {
1093                 rq = list_entry_rq(list.next);
1094                 list_del_init(&rq->queuelist);
1095                 BUG_ON(!rq->q);
1096                 if (rq->mq_ctx != this_ctx) {
1097                         if (this_ctx) {
1098                                 blk_mq_insert_requests(this_q, this_ctx,
1099                                                         &ctx_list, depth,
1100                                                         from_schedule);
1101                         }
1102
1103                         this_ctx = rq->mq_ctx;
1104                         this_q = rq->q;
1105                         depth = 0;
1106                 }
1107
1108                 depth++;
1109                 list_add_tail(&rq->queuelist, &ctx_list);
1110         }
1111
1112         /*
1113          * If 'this_ctx' is set, we know we have entries to complete
1114          * on 'ctx_list'. Do those.
1115          */
1116         if (this_ctx) {
1117                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1118                                        from_schedule);
1119         }
1120 }
1121
1122 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1123 {
1124         init_request_from_bio(rq, bio);
1125         blk_account_io_start(rq, 1);
1126 }
1127
1128 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1129                                          struct blk_mq_ctx *ctx,
1130                                          struct request *rq, struct bio *bio)
1131 {
1132         struct request_queue *q = hctx->queue;
1133
1134         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1135                 blk_mq_bio_to_request(rq, bio);
1136                 spin_lock(&ctx->lock);
1137 insert_rq:
1138                 __blk_mq_insert_request(hctx, rq, false);
1139                 spin_unlock(&ctx->lock);
1140                 return false;
1141         } else {
1142                 spin_lock(&ctx->lock);
1143                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1144                         blk_mq_bio_to_request(rq, bio);
1145                         goto insert_rq;
1146                 }
1147
1148                 spin_unlock(&ctx->lock);
1149                 __blk_mq_free_request(hctx, ctx, rq);
1150                 return true;
1151         }
1152 }
1153
1154 struct blk_map_ctx {
1155         struct blk_mq_hw_ctx *hctx;
1156         struct blk_mq_ctx *ctx;
1157 };
1158
1159 static struct request *blk_mq_map_request(struct request_queue *q,
1160                                           struct bio *bio,
1161                                           struct blk_map_ctx *data)
1162 {
1163         struct blk_mq_hw_ctx *hctx;
1164         struct blk_mq_ctx *ctx;
1165         struct request *rq;
1166         int rw = bio_data_dir(bio);
1167
1168         if (unlikely(blk_mq_queue_enter(q))) {
1169                 bio_endio(bio, -EIO);
1170                 return NULL;
1171         }
1172
1173         ctx = blk_mq_get_ctx(q);
1174         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1175
1176         if (rw_is_sync(bio->bi_rw))
1177                 rw |= REQ_SYNC;
1178
1179         trace_block_getrq(q, bio, rw);
1180         rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
1181         if (unlikely(!rq)) {
1182                 __blk_mq_run_hw_queue(hctx);
1183                 blk_mq_put_ctx(ctx);
1184                 trace_block_sleeprq(q, bio, rw);
1185
1186                 ctx = blk_mq_get_ctx(q);
1187                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1188                 rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
1189                                             __GFP_WAIT|GFP_ATOMIC, false);
1190         }
1191
1192         hctx->queued++;
1193         data->hctx = hctx;
1194         data->ctx = ctx;
1195         return rq;
1196 }
1197
1198 /*
1199  * Multiple hardware queue variant. This will not use per-process plugs,
1200  * but will attempt to bypass the hctx queueing if we can go straight to
1201  * hardware for SYNC IO.
1202  */
1203 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1204 {
1205         const int is_sync = rw_is_sync(bio->bi_rw);
1206         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1207         struct blk_map_ctx data;
1208         struct request *rq;
1209
1210         blk_queue_bounce(q, &bio);
1211
1212         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1213                 bio_endio(bio, -EIO);
1214                 return;
1215         }
1216
1217         rq = blk_mq_map_request(q, bio, &data);
1218         if (unlikely(!rq))
1219                 return;
1220
1221         if (unlikely(is_flush_fua)) {
1222                 blk_mq_bio_to_request(rq, bio);
1223                 blk_insert_flush(rq);
1224                 goto run_queue;
1225         }
1226
1227         if (is_sync) {
1228                 int ret;
1229
1230                 blk_mq_bio_to_request(rq, bio);
1231                 blk_mq_start_request(rq, true);
1232
1233                 /*
1234                  * For OK queue, we are done. For error, kill it. Any other
1235                  * error (busy), just add it to our list as we previously
1236                  * would have done
1237                  */
1238                 ret = q->mq_ops->queue_rq(data.hctx, rq);
1239                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1240                         goto done;
1241                 else {
1242                         __blk_mq_requeue_request(rq);
1243
1244                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1245                                 rq->errors = -EIO;
1246                                 blk_mq_end_io(rq, rq->errors);
1247                                 goto done;
1248                         }
1249                 }
1250         }
1251
1252         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1253                 /*
1254                  * For a SYNC request, send it to the hardware immediately. For
1255                  * an ASYNC request, just ensure that we run it later on. The
1256                  * latter allows for merging opportunities and more efficient
1257                  * dispatching.
1258                  */
1259 run_queue:
1260                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1261         }
1262 done:
1263         blk_mq_put_ctx(data.ctx);
1264 }
1265
1266 /*
1267  * Single hardware queue variant. This will attempt to use any per-process
1268  * plug for merging and IO deferral.
1269  */
1270 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1271 {
1272         const int is_sync = rw_is_sync(bio->bi_rw);
1273         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1274         unsigned int use_plug, request_count = 0;
1275         struct blk_map_ctx data;
1276         struct request *rq;
1277
1278         /*
1279          * If we have multiple hardware queues, just go directly to
1280          * one of those for sync IO.
1281          */
1282         use_plug = !is_flush_fua && !is_sync;
1283
1284         blk_queue_bounce(q, &bio);
1285
1286         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1287                 bio_endio(bio, -EIO);
1288                 return;
1289         }
1290
1291         if (use_plug && !blk_queue_nomerges(q) &&
1292             blk_attempt_plug_merge(q, bio, &request_count))
1293                 return;
1294
1295         rq = blk_mq_map_request(q, bio, &data);
1296
1297         if (unlikely(is_flush_fua)) {
1298                 blk_mq_bio_to_request(rq, bio);
1299                 blk_insert_flush(rq);
1300                 goto run_queue;
1301         }
1302
1303         /*
1304          * A task plug currently exists. Since this is completely lockless,
1305          * utilize that to temporarily store requests until the task is
1306          * either done or scheduled away.
1307          */
1308         if (use_plug) {
1309                 struct blk_plug *plug = current->plug;
1310
1311                 if (plug) {
1312                         blk_mq_bio_to_request(rq, bio);
1313                         if (list_empty(&plug->mq_list))
1314                                 trace_block_plug(q);
1315                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1316                                 blk_flush_plug_list(plug, false);
1317                                 trace_block_plug(q);
1318                         }
1319                         list_add_tail(&rq->queuelist, &plug->mq_list);
1320                         blk_mq_put_ctx(data.ctx);
1321                         return;
1322                 }
1323         }
1324
1325         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1326                 /*
1327                  * For a SYNC request, send it to the hardware immediately. For
1328                  * an ASYNC request, just ensure that we run it later on. The
1329                  * latter allows for merging opportunities and more efficient
1330                  * dispatching.
1331                  */
1332 run_queue:
1333                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1334         }
1335
1336         blk_mq_put_ctx(data.ctx);
1337 }
1338
1339 /*
1340  * Default mapping to a software queue, since we use one per CPU.
1341  */
1342 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1343 {
1344         return q->queue_hw_ctx[q->mq_map[cpu]];
1345 }
1346 EXPORT_SYMBOL(blk_mq_map_queue);
1347
1348 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
1349                                                    unsigned int hctx_index,
1350                                                    int node)
1351 {
1352         return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
1353 }
1354 EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1355
1356 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1357                                  unsigned int hctx_index)
1358 {
1359         kfree(hctx);
1360 }
1361 EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1362
1363 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1364                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1365 {
1366         struct page *page;
1367
1368         if (tags->rqs && set->ops->exit_request) {
1369                 int i;
1370
1371                 for (i = 0; i < tags->nr_tags; i++) {
1372                         if (!tags->rqs[i])
1373                                 continue;
1374                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1375                                                 hctx_idx, i);
1376                 }
1377         }
1378
1379         while (!list_empty(&tags->page_list)) {
1380                 page = list_first_entry(&tags->page_list, struct page, lru);
1381                 list_del_init(&page->lru);
1382                 __free_pages(page, page->private);
1383         }
1384
1385         kfree(tags->rqs);
1386
1387         blk_mq_free_tags(tags);
1388 }
1389
1390 static size_t order_to_size(unsigned int order)
1391 {
1392         return (size_t)PAGE_SIZE << order;
1393 }
1394
1395 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1396                 unsigned int hctx_idx)
1397 {
1398         struct blk_mq_tags *tags;
1399         unsigned int i, j, entries_per_page, max_order = 4;
1400         size_t rq_size, left;
1401
1402         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1403                                 set->numa_node);
1404         if (!tags)
1405                 return NULL;
1406
1407         INIT_LIST_HEAD(&tags->page_list);
1408
1409         tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1410                                         GFP_KERNEL, set->numa_node);
1411         if (!tags->rqs) {
1412                 blk_mq_free_tags(tags);
1413                 return NULL;
1414         }
1415
1416         /*
1417          * rq_size is the size of the request plus driver payload, rounded
1418          * to the cacheline size
1419          */
1420         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1421                                 cache_line_size());
1422         left = rq_size * set->queue_depth;
1423
1424         for (i = 0; i < set->queue_depth; ) {
1425                 int this_order = max_order;
1426                 struct page *page;
1427                 int to_do;
1428                 void *p;
1429
1430                 while (left < order_to_size(this_order - 1) && this_order)
1431                         this_order--;
1432
1433                 do {
1434                         page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1435                                                 this_order);
1436                         if (page)
1437                                 break;
1438                         if (!this_order--)
1439                                 break;
1440                         if (order_to_size(this_order) < rq_size)
1441                                 break;
1442                 } while (1);
1443
1444                 if (!page)
1445                         goto fail;
1446
1447                 page->private = this_order;
1448                 list_add_tail(&page->lru, &tags->page_list);
1449
1450                 p = page_address(page);
1451                 entries_per_page = order_to_size(this_order) / rq_size;
1452                 to_do = min(entries_per_page, set->queue_depth - i);
1453                 left -= to_do * rq_size;
1454                 for (j = 0; j < to_do; j++) {
1455                         tags->rqs[i] = p;
1456                         if (set->ops->init_request) {
1457                                 if (set->ops->init_request(set->driver_data,
1458                                                 tags->rqs[i], hctx_idx, i,
1459                                                 set->numa_node))
1460                                         goto fail;
1461                         }
1462
1463                         p += rq_size;
1464                         i++;
1465                 }
1466         }
1467
1468         return tags;
1469
1470 fail:
1471         pr_warn("%s: failed to allocate requests\n", __func__);
1472         blk_mq_free_rq_map(set, tags, hctx_idx);
1473         return NULL;
1474 }
1475
1476 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1477 {
1478         kfree(bitmap->map);
1479 }
1480
1481 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1482 {
1483         unsigned int bpw = 8, total, num_maps, i;
1484
1485         bitmap->bits_per_word = bpw;
1486
1487         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1488         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1489                                         GFP_KERNEL, node);
1490         if (!bitmap->map)
1491                 return -ENOMEM;
1492
1493         bitmap->map_size = num_maps;
1494
1495         total = nr_cpu_ids;
1496         for (i = 0; i < num_maps; i++) {
1497                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1498                 total -= bitmap->map[i].depth;
1499         }
1500
1501         return 0;
1502 }
1503
1504 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1505 {
1506         struct request_queue *q = hctx->queue;
1507         struct blk_mq_ctx *ctx;
1508         LIST_HEAD(tmp);
1509
1510         /*
1511          * Move ctx entries to new CPU, if this one is going away.
1512          */
1513         ctx = __blk_mq_get_ctx(q, cpu);
1514
1515         spin_lock(&ctx->lock);
1516         if (!list_empty(&ctx->rq_list)) {
1517                 list_splice_init(&ctx->rq_list, &tmp);
1518                 blk_mq_hctx_clear_pending(hctx, ctx);
1519         }
1520         spin_unlock(&ctx->lock);
1521
1522         if (list_empty(&tmp))
1523                 return NOTIFY_OK;
1524
1525         ctx = blk_mq_get_ctx(q);
1526         spin_lock(&ctx->lock);
1527
1528         while (!list_empty(&tmp)) {
1529                 struct request *rq;
1530
1531                 rq = list_first_entry(&tmp, struct request, queuelist);
1532                 rq->mq_ctx = ctx;
1533                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1534         }
1535
1536         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1537         blk_mq_hctx_mark_pending(hctx, ctx);
1538
1539         spin_unlock(&ctx->lock);
1540
1541         blk_mq_run_hw_queue(hctx, true);
1542         blk_mq_put_ctx(ctx);
1543         return NOTIFY_OK;
1544 }
1545
1546 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1547 {
1548         struct request_queue *q = hctx->queue;
1549         struct blk_mq_tag_set *set = q->tag_set;
1550
1551         if (set->tags[hctx->queue_num])
1552                 return NOTIFY_OK;
1553
1554         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1555         if (!set->tags[hctx->queue_num])
1556                 return NOTIFY_STOP;
1557
1558         hctx->tags = set->tags[hctx->queue_num];
1559         return NOTIFY_OK;
1560 }
1561
1562 static int blk_mq_hctx_notify(void *data, unsigned long action,
1563                               unsigned int cpu)
1564 {
1565         struct blk_mq_hw_ctx *hctx = data;
1566
1567         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1568                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1569         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1570                 return blk_mq_hctx_cpu_online(hctx, cpu);
1571
1572         return NOTIFY_OK;
1573 }
1574
1575 static void blk_mq_exit_hw_queues(struct request_queue *q,
1576                 struct blk_mq_tag_set *set, int nr_queue)
1577 {
1578         struct blk_mq_hw_ctx *hctx;
1579         unsigned int i;
1580
1581         queue_for_each_hw_ctx(q, hctx, i) {
1582                 if (i == nr_queue)
1583                         break;
1584
1585                 if (set->ops->exit_hctx)
1586                         set->ops->exit_hctx(hctx, i);
1587
1588                 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1589                 kfree(hctx->ctxs);
1590                 blk_mq_free_bitmap(&hctx->ctx_map);
1591         }
1592
1593 }
1594
1595 static void blk_mq_free_hw_queues(struct request_queue *q,
1596                 struct blk_mq_tag_set *set)
1597 {
1598         struct blk_mq_hw_ctx *hctx;
1599         unsigned int i;
1600
1601         queue_for_each_hw_ctx(q, hctx, i) {
1602                 free_cpumask_var(hctx->cpumask);
1603                 set->ops->free_hctx(hctx, i);
1604         }
1605 }
1606
1607 static int blk_mq_init_hw_queues(struct request_queue *q,
1608                 struct blk_mq_tag_set *set)
1609 {
1610         struct blk_mq_hw_ctx *hctx;
1611         unsigned int i;
1612
1613         /*
1614          * Initialize hardware queues
1615          */
1616         queue_for_each_hw_ctx(q, hctx, i) {
1617                 int node;
1618
1619                 node = hctx->numa_node;
1620                 if (node == NUMA_NO_NODE)
1621                         node = hctx->numa_node = set->numa_node;
1622
1623                 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1624                 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1625                 spin_lock_init(&hctx->lock);
1626                 INIT_LIST_HEAD(&hctx->dispatch);
1627                 hctx->queue = q;
1628                 hctx->queue_num = i;
1629                 hctx->flags = set->flags;
1630                 hctx->cmd_size = set->cmd_size;
1631
1632                 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1633                                                 blk_mq_hctx_notify, hctx);
1634                 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1635
1636                 hctx->tags = set->tags[i];
1637
1638                 /*
1639                  * Allocate space for all possible cpus to avoid allocation in
1640                  * runtime
1641                  */
1642                 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1643                                                 GFP_KERNEL, node);
1644                 if (!hctx->ctxs)
1645                         break;
1646
1647                 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1648                         break;
1649
1650                 hctx->nr_ctx = 0;
1651
1652                 if (set->ops->init_hctx &&
1653                     set->ops->init_hctx(hctx, set->driver_data, i))
1654                         break;
1655         }
1656
1657         if (i == q->nr_hw_queues)
1658                 return 0;
1659
1660         /*
1661          * Init failed
1662          */
1663         blk_mq_exit_hw_queues(q, set, i);
1664
1665         return 1;
1666 }
1667
1668 static void blk_mq_init_cpu_queues(struct request_queue *q,
1669                                    unsigned int nr_hw_queues)
1670 {
1671         unsigned int i;
1672
1673         for_each_possible_cpu(i) {
1674                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1675                 struct blk_mq_hw_ctx *hctx;
1676
1677                 memset(__ctx, 0, sizeof(*__ctx));
1678                 __ctx->cpu = i;
1679                 spin_lock_init(&__ctx->lock);
1680                 INIT_LIST_HEAD(&__ctx->rq_list);
1681                 __ctx->queue = q;
1682
1683                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1684                 if (!cpu_online(i))
1685                         continue;
1686
1687                 hctx = q->mq_ops->map_queue(q, i);
1688                 cpumask_set_cpu(i, hctx->cpumask);
1689                 hctx->nr_ctx++;
1690
1691                 /*
1692                  * Set local node, IFF we have more than one hw queue. If
1693                  * not, we remain on the home node of the device
1694                  */
1695                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1696                         hctx->numa_node = cpu_to_node(i);
1697         }
1698 }
1699
1700 static void blk_mq_map_swqueue(struct request_queue *q)
1701 {
1702         unsigned int i;
1703         struct blk_mq_hw_ctx *hctx;
1704         struct blk_mq_ctx *ctx;
1705
1706         queue_for_each_hw_ctx(q, hctx, i) {
1707                 cpumask_clear(hctx->cpumask);
1708                 hctx->nr_ctx = 0;
1709         }
1710
1711         /*
1712          * Map software to hardware queues
1713          */
1714         queue_for_each_ctx(q, ctx, i) {
1715                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1716                 if (!cpu_online(i))
1717                         continue;
1718
1719                 hctx = q->mq_ops->map_queue(q, i);
1720                 cpumask_set_cpu(i, hctx->cpumask);
1721                 ctx->index_hw = hctx->nr_ctx;
1722                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1723         }
1724
1725         queue_for_each_hw_ctx(q, hctx, i) {
1726                 /*
1727                  * If not software queues are mapped to this hardware queue,
1728                  * disable it and free the request entries
1729                  */
1730                 if (!hctx->nr_ctx) {
1731                         struct blk_mq_tag_set *set = q->tag_set;
1732
1733                         if (set->tags[i]) {
1734                                 blk_mq_free_rq_map(set, set->tags[i], i);
1735                                 set->tags[i] = NULL;
1736                                 hctx->tags = NULL;
1737                         }
1738                         continue;
1739                 }
1740
1741                 /*
1742                  * Initialize batch roundrobin counts
1743                  */
1744                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1745                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1746         }
1747 }
1748
1749 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1750 {
1751         struct blk_mq_hw_ctx *hctx;
1752         struct request_queue *q;
1753         bool shared;
1754         int i;
1755
1756         if (set->tag_list.next == set->tag_list.prev)
1757                 shared = false;
1758         else
1759                 shared = true;
1760
1761         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1762                 blk_mq_freeze_queue(q);
1763
1764                 queue_for_each_hw_ctx(q, hctx, i) {
1765                         if (shared)
1766                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1767                         else
1768                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1769                 }
1770                 blk_mq_unfreeze_queue(q);
1771         }
1772 }
1773
1774 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1775 {
1776         struct blk_mq_tag_set *set = q->tag_set;
1777
1778         blk_mq_freeze_queue(q);
1779
1780         mutex_lock(&set->tag_list_lock);
1781         list_del_init(&q->tag_set_list);
1782         blk_mq_update_tag_set_depth(set);
1783         mutex_unlock(&set->tag_list_lock);
1784
1785         blk_mq_unfreeze_queue(q);
1786 }
1787
1788 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1789                                      struct request_queue *q)
1790 {
1791         q->tag_set = set;
1792
1793         mutex_lock(&set->tag_list_lock);
1794         list_add_tail(&q->tag_set_list, &set->tag_list);
1795         blk_mq_update_tag_set_depth(set);
1796         mutex_unlock(&set->tag_list_lock);
1797 }
1798
1799 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1800 {
1801         struct blk_mq_hw_ctx **hctxs;
1802         struct blk_mq_ctx *ctx;
1803         struct request_queue *q;
1804         unsigned int *map;
1805         int i;
1806
1807         ctx = alloc_percpu(struct blk_mq_ctx);
1808         if (!ctx)
1809                 return ERR_PTR(-ENOMEM);
1810
1811         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1812                         set->numa_node);
1813
1814         if (!hctxs)
1815                 goto err_percpu;
1816
1817         map = blk_mq_make_queue_map(set);
1818         if (!map)
1819                 goto err_map;
1820
1821         for (i = 0; i < set->nr_hw_queues; i++) {
1822                 int node = blk_mq_hw_queue_to_node(map, i);
1823
1824                 hctxs[i] = set->ops->alloc_hctx(set, i, node);
1825                 if (!hctxs[i])
1826                         goto err_hctxs;
1827
1828                 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1829                         goto err_hctxs;
1830
1831                 atomic_set(&hctxs[i]->nr_active, 0);
1832                 hctxs[i]->numa_node = node;
1833                 hctxs[i]->queue_num = i;
1834         }
1835
1836         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1837         if (!q)
1838                 goto err_hctxs;
1839
1840         if (percpu_counter_init(&q->mq_usage_counter, 0))
1841                 goto err_map;
1842
1843         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1844         blk_queue_rq_timeout(q, 30000);
1845
1846         q->nr_queues = nr_cpu_ids;
1847         q->nr_hw_queues = set->nr_hw_queues;
1848         q->mq_map = map;
1849
1850         q->queue_ctx = ctx;
1851         q->queue_hw_ctx = hctxs;
1852
1853         q->mq_ops = set->ops;
1854         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1855
1856         q->sg_reserved_size = INT_MAX;
1857
1858         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1859         INIT_LIST_HEAD(&q->requeue_list);
1860         spin_lock_init(&q->requeue_lock);
1861
1862         if (q->nr_hw_queues > 1)
1863                 blk_queue_make_request(q, blk_mq_make_request);
1864         else
1865                 blk_queue_make_request(q, blk_sq_make_request);
1866
1867         blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1868         if (set->timeout)
1869                 blk_queue_rq_timeout(q, set->timeout);
1870
1871         /*
1872          * Do this after blk_queue_make_request() overrides it...
1873          */
1874         q->nr_requests = set->queue_depth;
1875
1876         if (set->ops->complete)
1877                 blk_queue_softirq_done(q, set->ops->complete);
1878
1879         blk_mq_init_flush(q);
1880         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1881
1882         q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1883                                 set->cmd_size, cache_line_size()),
1884                                 GFP_KERNEL);
1885         if (!q->flush_rq)
1886                 goto err_hw;
1887
1888         if (blk_mq_init_hw_queues(q, set))
1889                 goto err_flush_rq;
1890
1891         mutex_lock(&all_q_mutex);
1892         list_add_tail(&q->all_q_node, &all_q_list);
1893         mutex_unlock(&all_q_mutex);
1894
1895         blk_mq_add_queue_tag_set(set, q);
1896
1897         blk_mq_map_swqueue(q);
1898
1899         return q;
1900
1901 err_flush_rq:
1902         kfree(q->flush_rq);
1903 err_hw:
1904         blk_cleanup_queue(q);
1905 err_hctxs:
1906         kfree(map);
1907         for (i = 0; i < set->nr_hw_queues; i++) {
1908                 if (!hctxs[i])
1909                         break;
1910                 free_cpumask_var(hctxs[i]->cpumask);
1911                 set->ops->free_hctx(hctxs[i], i);
1912         }
1913 err_map:
1914         kfree(hctxs);
1915 err_percpu:
1916         free_percpu(ctx);
1917         return ERR_PTR(-ENOMEM);
1918 }
1919 EXPORT_SYMBOL(blk_mq_init_queue);
1920
1921 void blk_mq_free_queue(struct request_queue *q)
1922 {
1923         struct blk_mq_tag_set   *set = q->tag_set;
1924
1925         blk_mq_del_queue_tag_set(q);
1926
1927         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1928         blk_mq_free_hw_queues(q, set);
1929
1930         percpu_counter_destroy(&q->mq_usage_counter);
1931
1932         free_percpu(q->queue_ctx);
1933         kfree(q->queue_hw_ctx);
1934         kfree(q->mq_map);
1935
1936         q->queue_ctx = NULL;
1937         q->queue_hw_ctx = NULL;
1938         q->mq_map = NULL;
1939
1940         mutex_lock(&all_q_mutex);
1941         list_del_init(&q->all_q_node);
1942         mutex_unlock(&all_q_mutex);
1943 }
1944
1945 /* Basically redo blk_mq_init_queue with queue frozen */
1946 static void blk_mq_queue_reinit(struct request_queue *q)
1947 {
1948         blk_mq_freeze_queue(q);
1949
1950         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1951
1952         /*
1953          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1954          * we should change hctx numa_node according to new topology (this
1955          * involves free and re-allocate memory, worthy doing?)
1956          */
1957
1958         blk_mq_map_swqueue(q);
1959
1960         blk_mq_unfreeze_queue(q);
1961 }
1962
1963 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1964                                       unsigned long action, void *hcpu)
1965 {
1966         struct request_queue *q;
1967
1968         /*
1969          * Before new mappings are established, hotadded cpu might already
1970          * start handling requests. This doesn't break anything as we map
1971          * offline CPUs to first hardware queue. We will re-init the queue
1972          * below to get optimal settings.
1973          */
1974         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1975             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1976                 return NOTIFY_OK;
1977
1978         mutex_lock(&all_q_mutex);
1979         list_for_each_entry(q, &all_q_list, all_q_node)
1980                 blk_mq_queue_reinit(q);
1981         mutex_unlock(&all_q_mutex);
1982         return NOTIFY_OK;
1983 }
1984
1985 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1986 {
1987         int i;
1988
1989         if (!set->nr_hw_queues)
1990                 return -EINVAL;
1991         if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH)
1992                 return -EINVAL;
1993         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1994                 return -EINVAL;
1995
1996         if (!set->nr_hw_queues ||
1997             !set->ops->queue_rq || !set->ops->map_queue ||
1998             !set->ops->alloc_hctx || !set->ops->free_hctx)
1999                 return -EINVAL;
2000
2001
2002         set->tags = kmalloc_node(set->nr_hw_queues *
2003                                  sizeof(struct blk_mq_tags *),
2004                                  GFP_KERNEL, set->numa_node);
2005         if (!set->tags)
2006                 goto out;
2007
2008         for (i = 0; i < set->nr_hw_queues; i++) {
2009                 set->tags[i] = blk_mq_init_rq_map(set, i);
2010                 if (!set->tags[i])
2011                         goto out_unwind;
2012         }
2013
2014         mutex_init(&set->tag_list_lock);
2015         INIT_LIST_HEAD(&set->tag_list);
2016
2017         return 0;
2018
2019 out_unwind:
2020         while (--i >= 0)
2021                 blk_mq_free_rq_map(set, set->tags[i], i);
2022 out:
2023         return -ENOMEM;
2024 }
2025 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2026
2027 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2028 {
2029         int i;
2030
2031         for (i = 0; i < set->nr_hw_queues; i++) {
2032                 if (set->tags[i])
2033                         blk_mq_free_rq_map(set, set->tags[i], i);
2034         }
2035
2036         kfree(set->tags);
2037 }
2038 EXPORT_SYMBOL(blk_mq_free_tag_set);
2039
2040 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2041 {
2042         struct blk_mq_tag_set *set = q->tag_set;
2043         struct blk_mq_hw_ctx *hctx;
2044         int i, ret;
2045
2046         if (!set || nr > set->queue_depth)
2047                 return -EINVAL;
2048
2049         ret = 0;
2050         queue_for_each_hw_ctx(q, hctx, i) {
2051                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2052                 if (ret)
2053                         break;
2054         }
2055
2056         if (!ret)
2057                 q->nr_requests = nr;
2058
2059         return ret;
2060 }
2061
2062 void blk_mq_disable_hotplug(void)
2063 {
2064         mutex_lock(&all_q_mutex);
2065 }
2066
2067 void blk_mq_enable_hotplug(void)
2068 {
2069         mutex_unlock(&all_q_mutex);
2070 }
2071
2072 static int __init blk_mq_init(void)
2073 {
2074         blk_mq_cpu_init();
2075
2076         /* Must be called after percpu_counter_hotcpu_callback() */
2077         hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
2078
2079         return 0;
2080 }
2081 subsys_initcall(blk_mq_init);