Merge tag 'x86-urgent-2022-08-06' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / md / dm-rq.c
1 /*
2  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-core.h"
8 #include "dm-rq.h"
9
10 #include <linux/blk-mq.h>
11
12 #define DM_MSG_PREFIX "core-rq"
13
14 /*
15  * One of these is allocated per request.
16  */
17 struct dm_rq_target_io {
18         struct mapped_device *md;
19         struct dm_target *ti;
20         struct request *orig, *clone;
21         struct kthread_work work;
22         blk_status_t error;
23         union map_info info;
24         struct dm_stats_aux stats_aux;
25         unsigned long duration_jiffies;
26         unsigned n_sectors;
27         unsigned completed;
28 };
29
30 #define DM_MQ_NR_HW_QUEUES 1
31 #define DM_MQ_QUEUE_DEPTH 2048
32 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
33 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
34
35 /*
36  * Request-based DM's mempools' reserved IOs set by the user.
37  */
38 #define RESERVED_REQUEST_BASED_IOS      256
39 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
40
41 unsigned dm_get_reserved_rq_based_ios(void)
42 {
43         return __dm_get_module_param(&reserved_rq_based_ios,
44                                      RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
45 }
46
47 static unsigned dm_get_blk_mq_nr_hw_queues(void)
48 {
49         return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
50 }
51
52 static unsigned dm_get_blk_mq_queue_depth(void)
53 {
54         return __dm_get_module_param(&dm_mq_queue_depth,
55                                      DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
56 }
57
58 int dm_request_based(struct mapped_device *md)
59 {
60         return queue_is_mq(md->queue);
61 }
62
63 void dm_start_queue(struct request_queue *q)
64 {
65         blk_mq_unquiesce_queue(q);
66         blk_mq_kick_requeue_list(q);
67 }
68
69 void dm_stop_queue(struct request_queue *q)
70 {
71         blk_mq_quiesce_queue(q);
72 }
73
74 /*
75  * Partial completion handling for request-based dm
76  */
77 static void end_clone_bio(struct bio *clone)
78 {
79         struct dm_rq_clone_bio_info *info =
80                 container_of(clone, struct dm_rq_clone_bio_info, clone);
81         struct dm_rq_target_io *tio = info->tio;
82         unsigned int nr_bytes = info->orig->bi_iter.bi_size;
83         blk_status_t error = clone->bi_status;
84         bool is_last = !clone->bi_next;
85
86         bio_put(clone);
87
88         if (tio->error)
89                 /*
90                  * An error has already been detected on the request.
91                  * Once error occurred, just let clone->end_io() handle
92                  * the remainder.
93                  */
94                 return;
95         else if (error) {
96                 /*
97                  * Don't notice the error to the upper layer yet.
98                  * The error handling decision is made by the target driver,
99                  * when the request is completed.
100                  */
101                 tio->error = error;
102                 goto exit;
103         }
104
105         /*
106          * I/O for the bio successfully completed.
107          * Notice the data completion to the upper layer.
108          */
109         tio->completed += nr_bytes;
110
111         /*
112          * Update the original request.
113          * Do not use blk_mq_end_request() here, because it may complete
114          * the original request before the clone, and break the ordering.
115          */
116         if (is_last)
117  exit:
118                 blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
119 }
120
121 static struct dm_rq_target_io *tio_from_request(struct request *rq)
122 {
123         return blk_mq_rq_to_pdu(rq);
124 }
125
126 static void rq_end_stats(struct mapped_device *md, struct request *orig)
127 {
128         if (unlikely(dm_stats_used(&md->stats))) {
129                 struct dm_rq_target_io *tio = tio_from_request(orig);
130                 tio->duration_jiffies = jiffies - tio->duration_jiffies;
131                 dm_stats_account_io(&md->stats, rq_data_dir(orig),
132                                     blk_rq_pos(orig), tio->n_sectors, true,
133                                     tio->duration_jiffies, &tio->stats_aux);
134         }
135 }
136
137 /*
138  * Don't touch any member of the md after calling this function because
139  * the md may be freed in dm_put() at the end of this function.
140  * Or do dm_get() before calling this function and dm_put() later.
141  */
142 static void rq_completed(struct mapped_device *md)
143 {
144         /*
145          * dm_put() must be at the end of this function. See the comment above
146          */
147         dm_put(md);
148 }
149
150 /*
151  * Complete the clone and the original request.
152  * Must be called without clone's queue lock held,
153  * see end_clone_request() for more details.
154  */
155 static void dm_end_request(struct request *clone, blk_status_t error)
156 {
157         struct dm_rq_target_io *tio = clone->end_io_data;
158         struct mapped_device *md = tio->md;
159         struct request *rq = tio->orig;
160
161         blk_rq_unprep_clone(clone);
162         tio->ti->type->release_clone_rq(clone, NULL);
163
164         rq_end_stats(md, rq);
165         blk_mq_end_request(rq, error);
166         rq_completed(md);
167 }
168
169 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
170 {
171         blk_mq_delay_kick_requeue_list(q, msecs);
172 }
173
174 void dm_mq_kick_requeue_list(struct mapped_device *md)
175 {
176         __dm_mq_kick_requeue_list(md->queue, 0);
177 }
178 EXPORT_SYMBOL(dm_mq_kick_requeue_list);
179
180 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
181 {
182         blk_mq_requeue_request(rq, false);
183         __dm_mq_kick_requeue_list(rq->q, msecs);
184 }
185
186 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
187 {
188         struct mapped_device *md = tio->md;
189         struct request *rq = tio->orig;
190         unsigned long delay_ms = delay_requeue ? 100 : 0;
191
192         rq_end_stats(md, rq);
193         if (tio->clone) {
194                 blk_rq_unprep_clone(tio->clone);
195                 tio->ti->type->release_clone_rq(tio->clone, NULL);
196         }
197
198         dm_mq_delay_requeue_request(rq, delay_ms);
199         rq_completed(md);
200 }
201
202 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
203 {
204         int r = DM_ENDIO_DONE;
205         struct dm_rq_target_io *tio = clone->end_io_data;
206         dm_request_endio_fn rq_end_io = NULL;
207
208         if (tio->ti) {
209                 rq_end_io = tio->ti->type->rq_end_io;
210
211                 if (mapped && rq_end_io)
212                         r = rq_end_io(tio->ti, clone, error, &tio->info);
213         }
214
215         if (unlikely(error == BLK_STS_TARGET)) {
216                 if (req_op(clone) == REQ_OP_DISCARD &&
217                     !clone->q->limits.max_discard_sectors)
218                         disable_discard(tio->md);
219                 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
220                          !clone->q->limits.max_write_zeroes_sectors)
221                         disable_write_zeroes(tio->md);
222         }
223
224         switch (r) {
225         case DM_ENDIO_DONE:
226                 /* The target wants to complete the I/O */
227                 dm_end_request(clone, error);
228                 break;
229         case DM_ENDIO_INCOMPLETE:
230                 /* The target will handle the I/O */
231                 return;
232         case DM_ENDIO_REQUEUE:
233                 /* The target wants to requeue the I/O */
234                 dm_requeue_original_request(tio, false);
235                 break;
236         case DM_ENDIO_DELAY_REQUEUE:
237                 /* The target wants to requeue the I/O after a delay */
238                 dm_requeue_original_request(tio, true);
239                 break;
240         default:
241                 DMWARN("unimplemented target endio return value: %d", r);
242                 BUG();
243         }
244 }
245
246 /*
247  * Request completion handler for request-based dm
248  */
249 static void dm_softirq_done(struct request *rq)
250 {
251         bool mapped = true;
252         struct dm_rq_target_io *tio = tio_from_request(rq);
253         struct request *clone = tio->clone;
254
255         if (!clone) {
256                 struct mapped_device *md = tio->md;
257
258                 rq_end_stats(md, rq);
259                 blk_mq_end_request(rq, tio->error);
260                 rq_completed(md);
261                 return;
262         }
263
264         if (rq->rq_flags & RQF_FAILED)
265                 mapped = false;
266
267         dm_done(clone, tio->error, mapped);
268 }
269
270 /*
271  * Complete the clone and the original request with the error status
272  * through softirq context.
273  */
274 static void dm_complete_request(struct request *rq, blk_status_t error)
275 {
276         struct dm_rq_target_io *tio = tio_from_request(rq);
277
278         tio->error = error;
279         if (likely(!blk_should_fake_timeout(rq->q)))
280                 blk_mq_complete_request(rq);
281 }
282
283 /*
284  * Complete the not-mapped clone and the original request with the error status
285  * through softirq context.
286  * Target's rq_end_io() function isn't called.
287  * This may be used when the target's clone_and_map_rq() function fails.
288  */
289 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
290 {
291         rq->rq_flags |= RQF_FAILED;
292         dm_complete_request(rq, error);
293 }
294
295 static void end_clone_request(struct request *clone, blk_status_t error)
296 {
297         struct dm_rq_target_io *tio = clone->end_io_data;
298
299         dm_complete_request(tio->orig, error);
300 }
301
302 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
303                                  void *data)
304 {
305         struct dm_rq_target_io *tio = data;
306         struct dm_rq_clone_bio_info *info =
307                 container_of(bio, struct dm_rq_clone_bio_info, clone);
308
309         info->orig = bio_orig;
310         info->tio = tio;
311         bio->bi_end_io = end_clone_bio;
312
313         return 0;
314 }
315
316 static int setup_clone(struct request *clone, struct request *rq,
317                        struct dm_rq_target_io *tio, gfp_t gfp_mask)
318 {
319         int r;
320
321         r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
322                               dm_rq_bio_constructor, tio);
323         if (r)
324                 return r;
325
326         clone->end_io = end_clone_request;
327         clone->end_io_data = tio;
328
329         tio->clone = clone;
330
331         return 0;
332 }
333
334 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
335                      struct mapped_device *md)
336 {
337         tio->md = md;
338         tio->ti = NULL;
339         tio->clone = NULL;
340         tio->orig = rq;
341         tio->error = 0;
342         tio->completed = 0;
343         /*
344          * Avoid initializing info for blk-mq; it passes
345          * target-specific data through info.ptr
346          * (see: dm_mq_init_request)
347          */
348         if (!md->init_tio_pdu)
349                 memset(&tio->info, 0, sizeof(tio->info));
350 }
351
352 /*
353  * Returns:
354  * DM_MAPIO_*       : the request has been processed as indicated
355  * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
356  * < 0              : the request was completed due to failure
357  */
358 static int map_request(struct dm_rq_target_io *tio)
359 {
360         int r;
361         struct dm_target *ti = tio->ti;
362         struct mapped_device *md = tio->md;
363         struct request *rq = tio->orig;
364         struct request *clone = NULL;
365         blk_status_t ret;
366
367         r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
368         switch (r) {
369         case DM_MAPIO_SUBMITTED:
370                 /* The target has taken the I/O to submit by itself later */
371                 break;
372         case DM_MAPIO_REMAPPED:
373                 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
374                         /* -ENOMEM */
375                         ti->type->release_clone_rq(clone, &tio->info);
376                         return DM_MAPIO_REQUEUE;
377                 }
378
379                 /* The target has remapped the I/O so dispatch it */
380                 trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
381                                      blk_rq_pos(rq));
382                 ret = blk_insert_cloned_request(clone);
383                 switch (ret) {
384                 case BLK_STS_OK:
385                         break;
386                 case BLK_STS_RESOURCE:
387                 case BLK_STS_DEV_RESOURCE:
388                         blk_rq_unprep_clone(clone);
389                         blk_mq_cleanup_rq(clone);
390                         tio->ti->type->release_clone_rq(clone, &tio->info);
391                         tio->clone = NULL;
392                         return DM_MAPIO_REQUEUE;
393                 default:
394                         /* must complete clone in terms of original request */
395                         dm_complete_request(rq, ret);
396                 }
397                 break;
398         case DM_MAPIO_REQUEUE:
399                 /* The target wants to requeue the I/O */
400                 break;
401         case DM_MAPIO_DELAY_REQUEUE:
402                 /* The target wants to requeue the I/O after a delay */
403                 dm_requeue_original_request(tio, true);
404                 break;
405         case DM_MAPIO_KILL:
406                 /* The target wants to complete the I/O */
407                 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
408                 break;
409         default:
410                 DMWARN("unimplemented target map return value: %d", r);
411                 BUG();
412         }
413
414         return r;
415 }
416
417 /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
418 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
419 {
420         return sprintf(buf, "%u\n", 0);
421 }
422
423 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
424                                                      const char *buf, size_t count)
425 {
426         return count;
427 }
428
429 static void dm_start_request(struct mapped_device *md, struct request *orig)
430 {
431         blk_mq_start_request(orig);
432
433         if (unlikely(dm_stats_used(&md->stats))) {
434                 struct dm_rq_target_io *tio = tio_from_request(orig);
435                 tio->duration_jiffies = jiffies;
436                 tio->n_sectors = blk_rq_sectors(orig);
437                 dm_stats_account_io(&md->stats, rq_data_dir(orig),
438                                     blk_rq_pos(orig), tio->n_sectors, false, 0,
439                                     &tio->stats_aux);
440         }
441
442         /*
443          * Hold the md reference here for the in-flight I/O.
444          * We can't rely on the reference count by device opener,
445          * because the device may be closed during the request completion
446          * when all bios are completed.
447          * See the comment in rq_completed() too.
448          */
449         dm_get(md);
450 }
451
452 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
453                               unsigned int hctx_idx, unsigned int numa_node)
454 {
455         struct mapped_device *md = set->driver_data;
456         struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
457
458         /*
459          * Must initialize md member of tio, otherwise it won't
460          * be available in dm_mq_queue_rq.
461          */
462         tio->md = md;
463
464         if (md->init_tio_pdu) {
465                 /* target-specific per-io data is immediately after the tio */
466                 tio->info.ptr = tio + 1;
467         }
468
469         return 0;
470 }
471
472 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
473                           const struct blk_mq_queue_data *bd)
474 {
475         struct request *rq = bd->rq;
476         struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
477         struct mapped_device *md = tio->md;
478         struct dm_target *ti = md->immutable_target;
479
480         /*
481          * blk-mq's unquiesce may come from outside events, such as
482          * elevator switch, updating nr_requests or others, and request may
483          * come during suspend, so simply ask for blk-mq to requeue it.
484          */
485         if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
486                 return BLK_STS_RESOURCE;
487
488         if (unlikely(!ti)) {
489                 int srcu_idx;
490                 struct dm_table *map;
491
492                 map = dm_get_live_table(md, &srcu_idx);
493                 if (unlikely(!map)) {
494                         dm_put_live_table(md, srcu_idx);
495                         return BLK_STS_RESOURCE;
496                 }
497                 ti = dm_table_find_target(map, 0);
498                 dm_put_live_table(md, srcu_idx);
499         }
500
501         if (ti->type->busy && ti->type->busy(ti))
502                 return BLK_STS_RESOURCE;
503
504         dm_start_request(md, rq);
505
506         /* Init tio using md established in .init_request */
507         init_tio(tio, rq, md);
508
509         /*
510          * Establish tio->ti before calling map_request().
511          */
512         tio->ti = ti;
513
514         /* Direct call is fine since .queue_rq allows allocations */
515         if (map_request(tio) == DM_MAPIO_REQUEUE) {
516                 /* Undo dm_start_request() before requeuing */
517                 rq_end_stats(md, rq);
518                 rq_completed(md);
519                 return BLK_STS_RESOURCE;
520         }
521
522         return BLK_STS_OK;
523 }
524
525 static const struct blk_mq_ops dm_mq_ops = {
526         .queue_rq = dm_mq_queue_rq,
527         .complete = dm_softirq_done,
528         .init_request = dm_mq_init_request,
529 };
530
531 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
532 {
533         struct dm_target *immutable_tgt;
534         int err;
535
536         md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
537         if (!md->tag_set)
538                 return -ENOMEM;
539
540         md->tag_set->ops = &dm_mq_ops;
541         md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
542         md->tag_set->numa_node = md->numa_node_id;
543         md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
544         md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
545         md->tag_set->driver_data = md;
546
547         md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
548         immutable_tgt = dm_table_get_immutable_target(t);
549         if (immutable_tgt && immutable_tgt->per_io_data_size) {
550                 /* any target-specific per-io data is immediately after the tio */
551                 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
552                 md->init_tio_pdu = true;
553         }
554
555         err = blk_mq_alloc_tag_set(md->tag_set);
556         if (err)
557                 goto out_kfree_tag_set;
558
559         err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
560         if (err)
561                 goto out_tag_set;
562         return 0;
563
564 out_tag_set:
565         blk_mq_free_tag_set(md->tag_set);
566 out_kfree_tag_set:
567         kfree(md->tag_set);
568         md->tag_set = NULL;
569
570         return err;
571 }
572
573 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
574 {
575         if (md->tag_set) {
576                 blk_mq_free_tag_set(md->tag_set);
577                 kfree(md->tag_set);
578                 md->tag_set = NULL;
579         }
580 }
581
582 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
583 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
584
585 /* Unused, but preserved for userspace compatibility */
586 static bool use_blk_mq = true;
587 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
588 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
589
590 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
591 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
592
593 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
594 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");