kvm: x86: IA32_ARCH_CAPABILITIES is always supported
[linux-2.6-microblaze.git] / drivers / md / dm-rq.c
1 /*
2  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-core.h"
8 #include "dm-rq.h"
9
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
12
13 #define DM_MSG_PREFIX "core-rq"
14
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
19
20 /*
21  * Request-based DM's mempools' reserved IOs set by the user.
22  */
23 #define RESERVED_REQUEST_BASED_IOS      256
24 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
25
26 static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
27
28 bool dm_use_blk_mq_default(void)
29 {
30         return use_blk_mq;
31 }
32
33 bool dm_use_blk_mq(struct mapped_device *md)
34 {
35         return md->use_blk_mq;
36 }
37 EXPORT_SYMBOL_GPL(dm_use_blk_mq);
38
39 unsigned dm_get_reserved_rq_based_ios(void)
40 {
41         return __dm_get_module_param(&reserved_rq_based_ios,
42                                      RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
43 }
44 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
45
46 static unsigned dm_get_blk_mq_nr_hw_queues(void)
47 {
48         return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
49 }
50
51 static unsigned dm_get_blk_mq_queue_depth(void)
52 {
53         return __dm_get_module_param(&dm_mq_queue_depth,
54                                      DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
55 }
56
57 int dm_request_based(struct mapped_device *md)
58 {
59         return queue_is_rq_based(md->queue);
60 }
61
62 static void dm_old_start_queue(struct request_queue *q)
63 {
64         unsigned long flags;
65
66         spin_lock_irqsave(q->queue_lock, flags);
67         if (blk_queue_stopped(q))
68                 blk_start_queue(q);
69         spin_unlock_irqrestore(q->queue_lock, flags);
70 }
71
72 static void dm_mq_start_queue(struct request_queue *q)
73 {
74         blk_mq_unquiesce_queue(q);
75         blk_mq_kick_requeue_list(q);
76 }
77
78 void dm_start_queue(struct request_queue *q)
79 {
80         if (!q->mq_ops)
81                 dm_old_start_queue(q);
82         else
83                 dm_mq_start_queue(q);
84 }
85
86 static void dm_old_stop_queue(struct request_queue *q)
87 {
88         unsigned long flags;
89
90         spin_lock_irqsave(q->queue_lock, flags);
91         if (!blk_queue_stopped(q))
92                 blk_stop_queue(q);
93         spin_unlock_irqrestore(q->queue_lock, flags);
94 }
95
96 static void dm_mq_stop_queue(struct request_queue *q)
97 {
98         if (blk_mq_queue_stopped(q))
99                 return;
100
101         blk_mq_quiesce_queue(q);
102 }
103
104 void dm_stop_queue(struct request_queue *q)
105 {
106         if (!q->mq_ops)
107                 dm_old_stop_queue(q);
108         else
109                 dm_mq_stop_queue(q);
110 }
111
112 /*
113  * Partial completion handling for request-based dm
114  */
115 static void end_clone_bio(struct bio *clone)
116 {
117         struct dm_rq_clone_bio_info *info =
118                 container_of(clone, struct dm_rq_clone_bio_info, clone);
119         struct dm_rq_target_io *tio = info->tio;
120         unsigned int nr_bytes = info->orig->bi_iter.bi_size;
121         blk_status_t error = clone->bi_status;
122         bool is_last = !clone->bi_next;
123
124         bio_put(clone);
125
126         if (tio->error)
127                 /*
128                  * An error has already been detected on the request.
129                  * Once error occurred, just let clone->end_io() handle
130                  * the remainder.
131                  */
132                 return;
133         else if (error) {
134                 /*
135                  * Don't notice the error to the upper layer yet.
136                  * The error handling decision is made by the target driver,
137                  * when the request is completed.
138                  */
139                 tio->error = error;
140                 goto exit;
141         }
142
143         /*
144          * I/O for the bio successfully completed.
145          * Notice the data completion to the upper layer.
146          */
147         tio->completed += nr_bytes;
148
149         /*
150          * Update the original request.
151          * Do not use blk_end_request() here, because it may complete
152          * the original request before the clone, and break the ordering.
153          */
154         if (is_last)
155  exit:
156                 blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
157 }
158
159 static struct dm_rq_target_io *tio_from_request(struct request *rq)
160 {
161         return blk_mq_rq_to_pdu(rq);
162 }
163
164 static void rq_end_stats(struct mapped_device *md, struct request *orig)
165 {
166         if (unlikely(dm_stats_used(&md->stats))) {
167                 struct dm_rq_target_io *tio = tio_from_request(orig);
168                 tio->duration_jiffies = jiffies - tio->duration_jiffies;
169                 dm_stats_account_io(&md->stats, rq_data_dir(orig),
170                                     blk_rq_pos(orig), tio->n_sectors, true,
171                                     tio->duration_jiffies, &tio->stats_aux);
172         }
173 }
174
175 /*
176  * Don't touch any member of the md after calling this function because
177  * the md may be freed in dm_put() at the end of this function.
178  * Or do dm_get() before calling this function and dm_put() later.
179  */
180 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
181 {
182         struct request_queue *q = md->queue;
183         unsigned long flags;
184
185         atomic_dec(&md->pending[rw]);
186
187         /* nudge anyone waiting on suspend queue */
188         if (!md_in_flight(md))
189                 wake_up(&md->wait);
190
191         /*
192          * Run this off this callpath, as drivers could invoke end_io while
193          * inside their request_fn (and holding the queue lock). Calling
194          * back into ->request_fn() could deadlock attempting to grab the
195          * queue lock again.
196          */
197         if (!q->mq_ops && run_queue) {
198                 spin_lock_irqsave(q->queue_lock, flags);
199                 blk_run_queue_async(q);
200                 spin_unlock_irqrestore(q->queue_lock, flags);
201         }
202
203         /*
204          * dm_put() must be at the end of this function. See the comment above
205          */
206         dm_put(md);
207 }
208
209 /*
210  * Complete the clone and the original request.
211  * Must be called without clone's queue lock held,
212  * see end_clone_request() for more details.
213  */
214 static void dm_end_request(struct request *clone, blk_status_t error)
215 {
216         int rw = rq_data_dir(clone);
217         struct dm_rq_target_io *tio = clone->end_io_data;
218         struct mapped_device *md = tio->md;
219         struct request *rq = tio->orig;
220
221         blk_rq_unprep_clone(clone);
222         tio->ti->type->release_clone_rq(clone);
223
224         rq_end_stats(md, rq);
225         if (!rq->q->mq_ops)
226                 blk_end_request_all(rq, error);
227         else
228                 blk_mq_end_request(rq, error);
229         rq_completed(md, rw, true);
230 }
231
232 /*
233  * Requeue the original request of a clone.
234  */
235 static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms)
236 {
237         struct request_queue *q = rq->q;
238         unsigned long flags;
239
240         spin_lock_irqsave(q->queue_lock, flags);
241         blk_requeue_request(q, rq);
242         blk_delay_queue(q, delay_ms);
243         spin_unlock_irqrestore(q->queue_lock, flags);
244 }
245
246 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
247 {
248         blk_mq_delay_kick_requeue_list(q, msecs);
249 }
250
251 void dm_mq_kick_requeue_list(struct mapped_device *md)
252 {
253         __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
254 }
255 EXPORT_SYMBOL(dm_mq_kick_requeue_list);
256
257 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
258 {
259         blk_mq_requeue_request(rq, false);
260         __dm_mq_kick_requeue_list(rq->q, msecs);
261 }
262
263 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
264 {
265         struct mapped_device *md = tio->md;
266         struct request *rq = tio->orig;
267         int rw = rq_data_dir(rq);
268         unsigned long delay_ms = delay_requeue ? 100 : 0;
269
270         rq_end_stats(md, rq);
271         if (tio->clone) {
272                 blk_rq_unprep_clone(tio->clone);
273                 tio->ti->type->release_clone_rq(tio->clone);
274         }
275
276         if (!rq->q->mq_ops)
277                 dm_old_requeue_request(rq, delay_ms);
278         else
279                 dm_mq_delay_requeue_request(rq, delay_ms);
280
281         rq_completed(md, rw, false);
282 }
283
284 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
285 {
286         int r = DM_ENDIO_DONE;
287         struct dm_rq_target_io *tio = clone->end_io_data;
288         dm_request_endio_fn rq_end_io = NULL;
289
290         if (tio->ti) {
291                 rq_end_io = tio->ti->type->rq_end_io;
292
293                 if (mapped && rq_end_io)
294                         r = rq_end_io(tio->ti, clone, error, &tio->info);
295         }
296
297         if (unlikely(error == BLK_STS_TARGET)) {
298                 if (req_op(clone) == REQ_OP_WRITE_SAME &&
299                     !clone->q->limits.max_write_same_sectors)
300                         disable_write_same(tio->md);
301                 if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
302                     !clone->q->limits.max_write_zeroes_sectors)
303                         disable_write_zeroes(tio->md);
304         }
305
306         switch (r) {
307         case DM_ENDIO_DONE:
308                 /* The target wants to complete the I/O */
309                 dm_end_request(clone, error);
310                 break;
311         case DM_ENDIO_INCOMPLETE:
312                 /* The target will handle the I/O */
313                 return;
314         case DM_ENDIO_REQUEUE:
315                 /* The target wants to requeue the I/O */
316                 dm_requeue_original_request(tio, false);
317                 break;
318         case DM_ENDIO_DELAY_REQUEUE:
319                 /* The target wants to requeue the I/O after a delay */
320                 dm_requeue_original_request(tio, true);
321                 break;
322         default:
323                 DMWARN("unimplemented target endio return value: %d", r);
324                 BUG();
325         }
326 }
327
328 /*
329  * Request completion handler for request-based dm
330  */
331 static void dm_softirq_done(struct request *rq)
332 {
333         bool mapped = true;
334         struct dm_rq_target_io *tio = tio_from_request(rq);
335         struct request *clone = tio->clone;
336         int rw;
337
338         if (!clone) {
339                 struct mapped_device *md = tio->md;
340
341                 rq_end_stats(md, rq);
342                 rw = rq_data_dir(rq);
343                 if (!rq->q->mq_ops)
344                         blk_end_request_all(rq, tio->error);
345                 else
346                         blk_mq_end_request(rq, tio->error);
347                 rq_completed(md, rw, false);
348                 return;
349         }
350
351         if (rq->rq_flags & RQF_FAILED)
352                 mapped = false;
353
354         dm_done(clone, tio->error, mapped);
355 }
356
357 /*
358  * Complete the clone and the original request with the error status
359  * through softirq context.
360  */
361 static void dm_complete_request(struct request *rq, blk_status_t error)
362 {
363         struct dm_rq_target_io *tio = tio_from_request(rq);
364
365         tio->error = error;
366         if (!rq->q->mq_ops)
367                 blk_complete_request(rq);
368         else
369                 blk_mq_complete_request(rq);
370 }
371
372 /*
373  * Complete the not-mapped clone and the original request with the error status
374  * through softirq context.
375  * Target's rq_end_io() function isn't called.
376  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
377  */
378 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
379 {
380         rq->rq_flags |= RQF_FAILED;
381         dm_complete_request(rq, error);
382 }
383
384 /*
385  * Called with the clone's queue lock held (in the case of .request_fn)
386  */
387 static void end_clone_request(struct request *clone, blk_status_t error)
388 {
389         struct dm_rq_target_io *tio = clone->end_io_data;
390
391         /*
392          * Actual request completion is done in a softirq context which doesn't
393          * hold the clone's queue lock.  Otherwise, deadlock could occur because:
394          *     - another request may be submitted by the upper level driver
395          *       of the stacking during the completion
396          *     - the submission which requires queue lock may be done
397          *       against this clone's queue
398          */
399         dm_complete_request(tio->orig, error);
400 }
401
402 static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
403 {
404         blk_status_t r;
405
406         if (blk_queue_io_stat(clone->q))
407                 clone->rq_flags |= RQF_IO_STAT;
408
409         clone->start_time = jiffies;
410         r = blk_insert_cloned_request(clone->q, clone);
411         if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
412                 /* must complete clone in terms of original request */
413                 dm_complete_request(rq, r);
414         return r;
415 }
416
417 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
418                                  void *data)
419 {
420         struct dm_rq_target_io *tio = data;
421         struct dm_rq_clone_bio_info *info =
422                 container_of(bio, struct dm_rq_clone_bio_info, clone);
423
424         info->orig = bio_orig;
425         info->tio = tio;
426         bio->bi_end_io = end_clone_bio;
427
428         return 0;
429 }
430
431 static int setup_clone(struct request *clone, struct request *rq,
432                        struct dm_rq_target_io *tio, gfp_t gfp_mask)
433 {
434         int r;
435
436         r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
437                               dm_rq_bio_constructor, tio);
438         if (r)
439                 return r;
440
441         clone->end_io = end_clone_request;
442         clone->end_io_data = tio;
443
444         tio->clone = clone;
445
446         return 0;
447 }
448
449 static void map_tio_request(struct kthread_work *work);
450
451 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
452                      struct mapped_device *md)
453 {
454         tio->md = md;
455         tio->ti = NULL;
456         tio->clone = NULL;
457         tio->orig = rq;
458         tio->error = 0;
459         tio->completed = 0;
460         /*
461          * Avoid initializing info for blk-mq; it passes
462          * target-specific data through info.ptr
463          * (see: dm_mq_init_request)
464          */
465         if (!md->init_tio_pdu)
466                 memset(&tio->info, 0, sizeof(tio->info));
467         if (md->kworker_task)
468                 kthread_init_work(&tio->work, map_tio_request);
469 }
470
471 /*
472  * Returns:
473  * DM_MAPIO_*       : the request has been processed as indicated
474  * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
475  * < 0              : the request was completed due to failure
476  */
477 static int map_request(struct dm_rq_target_io *tio)
478 {
479         int r;
480         struct dm_target *ti = tio->ti;
481         struct mapped_device *md = tio->md;
482         struct request *rq = tio->orig;
483         struct request *clone = NULL;
484         blk_status_t ret;
485
486         r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
487 check_again:
488         switch (r) {
489         case DM_MAPIO_SUBMITTED:
490                 /* The target has taken the I/O to submit by itself later */
491                 break;
492         case DM_MAPIO_REMAPPED:
493                 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
494                         /* -ENOMEM */
495                         ti->type->release_clone_rq(clone);
496                         return DM_MAPIO_REQUEUE;
497                 }
498
499                 /* The target has remapped the I/O so dispatch it */
500                 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
501                                      blk_rq_pos(rq));
502                 ret = dm_dispatch_clone_request(clone, rq);
503                 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
504                         blk_rq_unprep_clone(clone);
505                         tio->ti->type->release_clone_rq(clone);
506                         tio->clone = NULL;
507                         if (!rq->q->mq_ops)
508                                 r = DM_MAPIO_DELAY_REQUEUE;
509                         else
510                                 r = DM_MAPIO_REQUEUE;
511                         goto check_again;
512                 }
513                 break;
514         case DM_MAPIO_REQUEUE:
515                 /* The target wants to requeue the I/O */
516                 break;
517         case DM_MAPIO_DELAY_REQUEUE:
518                 /* The target wants to requeue the I/O after a delay */
519                 dm_requeue_original_request(tio, true);
520                 break;
521         case DM_MAPIO_KILL:
522                 /* The target wants to complete the I/O */
523                 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
524                 break;
525         default:
526                 DMWARN("unimplemented target map return value: %d", r);
527                 BUG();
528         }
529
530         return r;
531 }
532
533 static void dm_start_request(struct mapped_device *md, struct request *orig)
534 {
535         if (!orig->q->mq_ops)
536                 blk_start_request(orig);
537         else
538                 blk_mq_start_request(orig);
539         atomic_inc(&md->pending[rq_data_dir(orig)]);
540
541         if (md->seq_rq_merge_deadline_usecs) {
542                 md->last_rq_pos = rq_end_sector(orig);
543                 md->last_rq_rw = rq_data_dir(orig);
544                 md->last_rq_start_time = ktime_get();
545         }
546
547         if (unlikely(dm_stats_used(&md->stats))) {
548                 struct dm_rq_target_io *tio = tio_from_request(orig);
549                 tio->duration_jiffies = jiffies;
550                 tio->n_sectors = blk_rq_sectors(orig);
551                 dm_stats_account_io(&md->stats, rq_data_dir(orig),
552                                     blk_rq_pos(orig), tio->n_sectors, false, 0,
553                                     &tio->stats_aux);
554         }
555
556         /*
557          * Hold the md reference here for the in-flight I/O.
558          * We can't rely on the reference count by device opener,
559          * because the device may be closed during the request completion
560          * when all bios are completed.
561          * See the comment in rq_completed() too.
562          */
563         dm_get(md);
564 }
565
566 static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
567 {
568         struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
569
570         /*
571          * Must initialize md member of tio, otherwise it won't
572          * be available in dm_mq_queue_rq.
573          */
574         tio->md = md;
575
576         if (md->init_tio_pdu) {
577                 /* target-specific per-io data is immediately after the tio */
578                 tio->info.ptr = tio + 1;
579         }
580
581         return 0;
582 }
583
584 static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
585 {
586         return __dm_rq_init_rq(q->rq_alloc_data, rq);
587 }
588
589 static void map_tio_request(struct kthread_work *work)
590 {
591         struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
592
593         if (map_request(tio) == DM_MAPIO_REQUEUE)
594                 dm_requeue_original_request(tio, false);
595 }
596
597 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
598 {
599         return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
600 }
601
602 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
603
604 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
605                                                      const char *buf, size_t count)
606 {
607         unsigned deadline;
608
609         if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
610                 return count;
611
612         if (kstrtouint(buf, 10, &deadline))
613                 return -EINVAL;
614
615         if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
616                 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
617
618         md->seq_rq_merge_deadline_usecs = deadline;
619
620         return count;
621 }
622
623 static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
624 {
625         ktime_t kt_deadline;
626
627         if (!md->seq_rq_merge_deadline_usecs)
628                 return false;
629
630         kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
631         kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
632
633         return !ktime_after(ktime_get(), kt_deadline);
634 }
635
636 /*
637  * q->request_fn for old request-based dm.
638  * Called with the queue lock held.
639  */
640 static void dm_old_request_fn(struct request_queue *q)
641 {
642         struct mapped_device *md = q->queuedata;
643         struct dm_target *ti = md->immutable_target;
644         struct request *rq;
645         struct dm_rq_target_io *tio;
646         sector_t pos = 0;
647
648         if (unlikely(!ti)) {
649                 int srcu_idx;
650                 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
651
652                 if (unlikely(!map)) {
653                         dm_put_live_table(md, srcu_idx);
654                         return;
655                 }
656                 ti = dm_table_find_target(map, pos);
657                 dm_put_live_table(md, srcu_idx);
658         }
659
660         /*
661          * For suspend, check blk_queue_stopped() and increment
662          * ->pending within a single queue_lock not to increment the
663          * number of in-flight I/Os after the queue is stopped in
664          * dm_suspend().
665          */
666         while (!blk_queue_stopped(q)) {
667                 rq = blk_peek_request(q);
668                 if (!rq)
669                         return;
670
671                 /* always use block 0 to find the target for flushes for now */
672                 pos = 0;
673                 if (req_op(rq) != REQ_OP_FLUSH)
674                         pos = blk_rq_pos(rq);
675
676                 if ((dm_old_request_peeked_before_merge_deadline(md) &&
677                      md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
678                      md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
679                     (ti->type->busy && ti->type->busy(ti))) {
680                         blk_delay_queue(q, 10);
681                         return;
682                 }
683
684                 dm_start_request(md, rq);
685
686                 tio = tio_from_request(rq);
687                 init_tio(tio, rq, md);
688                 /* Establish tio->ti before queuing work (map_tio_request) */
689                 tio->ti = ti;
690                 kthread_queue_work(&md->kworker, &tio->work);
691                 BUG_ON(!irqs_disabled());
692         }
693 }
694
695 /*
696  * Fully initialize a .request_fn request-based queue.
697  */
698 int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
699 {
700         struct dm_target *immutable_tgt;
701
702         /* Fully initialize the queue */
703         md->queue->cmd_size = sizeof(struct dm_rq_target_io);
704         md->queue->rq_alloc_data = md;
705         md->queue->request_fn = dm_old_request_fn;
706         md->queue->init_rq_fn = dm_rq_init_rq;
707
708         immutable_tgt = dm_table_get_immutable_target(t);
709         if (immutable_tgt && immutable_tgt->per_io_data_size) {
710                 /* any target-specific per-io data is immediately after the tio */
711                 md->queue->cmd_size += immutable_tgt->per_io_data_size;
712                 md->init_tio_pdu = true;
713         }
714         if (blk_init_allocated_queue(md->queue) < 0)
715                 return -EINVAL;
716
717         /* disable dm_old_request_fn's merge heuristic by default */
718         md->seq_rq_merge_deadline_usecs = 0;
719
720         blk_queue_softirq_done(md->queue, dm_softirq_done);
721
722         /* Initialize the request-based DM worker thread */
723         kthread_init_worker(&md->kworker);
724         md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
725                                        "kdmwork-%s", dm_device_name(md));
726         if (IS_ERR(md->kworker_task)) {
727                 int error = PTR_ERR(md->kworker_task);
728                 md->kworker_task = NULL;
729                 return error;
730         }
731
732         return 0;
733 }
734
735 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
736                 unsigned int hctx_idx, unsigned int numa_node)
737 {
738         return __dm_rq_init_rq(set->driver_data, rq);
739 }
740
741 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
742                           const struct blk_mq_queue_data *bd)
743 {
744         struct request *rq = bd->rq;
745         struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
746         struct mapped_device *md = tio->md;
747         struct dm_target *ti = md->immutable_target;
748
749         if (unlikely(!ti)) {
750                 int srcu_idx;
751                 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
752
753                 ti = dm_table_find_target(map, 0);
754                 dm_put_live_table(md, srcu_idx);
755         }
756
757         if (ti->type->busy && ti->type->busy(ti))
758                 return BLK_STS_RESOURCE;
759
760         dm_start_request(md, rq);
761
762         /* Init tio using md established in .init_request */
763         init_tio(tio, rq, md);
764
765         /*
766          * Establish tio->ti before calling map_request().
767          */
768         tio->ti = ti;
769
770         /* Direct call is fine since .queue_rq allows allocations */
771         if (map_request(tio) == DM_MAPIO_REQUEUE) {
772                 /* Undo dm_start_request() before requeuing */
773                 rq_end_stats(md, rq);
774                 rq_completed(md, rq_data_dir(rq), false);
775                 return BLK_STS_RESOURCE;
776         }
777
778         return BLK_STS_OK;
779 }
780
781 static const struct blk_mq_ops dm_mq_ops = {
782         .queue_rq = dm_mq_queue_rq,
783         .complete = dm_softirq_done,
784         .init_request = dm_mq_init_request,
785 };
786
787 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
788 {
789         struct request_queue *q;
790         struct dm_target *immutable_tgt;
791         int err;
792
793         if (!dm_table_all_blk_mq_devices(t)) {
794                 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
795                 return -EINVAL;
796         }
797
798         md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
799         if (!md->tag_set)
800                 return -ENOMEM;
801
802         md->tag_set->ops = &dm_mq_ops;
803         md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
804         md->tag_set->numa_node = md->numa_node_id;
805         md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
806         md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
807         md->tag_set->driver_data = md;
808
809         md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
810         immutable_tgt = dm_table_get_immutable_target(t);
811         if (immutable_tgt && immutable_tgt->per_io_data_size) {
812                 /* any target-specific per-io data is immediately after the tio */
813                 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
814                 md->init_tio_pdu = true;
815         }
816
817         err = blk_mq_alloc_tag_set(md->tag_set);
818         if (err)
819                 goto out_kfree_tag_set;
820
821         q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
822         if (IS_ERR(q)) {
823                 err = PTR_ERR(q);
824                 goto out_tag_set;
825         }
826
827         return 0;
828
829 out_tag_set:
830         blk_mq_free_tag_set(md->tag_set);
831 out_kfree_tag_set:
832         kfree(md->tag_set);
833
834         return err;
835 }
836
837 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
838 {
839         if (md->tag_set) {
840                 blk_mq_free_tag_set(md->tag_set);
841                 kfree(md->tag_set);
842         }
843 }
844
845 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
846 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
847
848 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
849 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
850
851 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
852 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
853
854 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
855 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");