Merge tag 'uninit-macro-v5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / md / dm-table.c
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-core.h"
9
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
21 #include <linux/blk-mq.h>
22 #include <linux/mount.h>
23 #include <linux/dax.h>
24
25 #define DM_MSG_PREFIX "table"
26
27 #define MAX_DEPTH 16
28 #define NODE_SIZE L1_CACHE_BYTES
29 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
30 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
31
32 struct dm_table {
33         struct mapped_device *md;
34         enum dm_queue_mode type;
35
36         /* btree table */
37         unsigned int depth;
38         unsigned int counts[MAX_DEPTH]; /* in nodes */
39         sector_t *index[MAX_DEPTH];
40
41         unsigned int num_targets;
42         unsigned int num_allocated;
43         sector_t *highs;
44         struct dm_target *targets;
45
46         struct target_type *immutable_target_type;
47
48         bool integrity_supported:1;
49         bool singleton:1;
50         unsigned integrity_added:1;
51
52         /*
53          * Indicates the rw permissions for the new logical
54          * device.  This should be a combination of FMODE_READ
55          * and FMODE_WRITE.
56          */
57         fmode_t mode;
58
59         /* a list of devices used by this table */
60         struct list_head devices;
61
62         /* events get handed up using this callback */
63         void (*event_fn)(void *);
64         void *event_context;
65
66         struct dm_md_mempools *mempools;
67 };
68
69 /*
70  * Similar to ceiling(log_size(n))
71  */
72 static unsigned int int_log(unsigned int n, unsigned int base)
73 {
74         int result = 0;
75
76         while (n > 1) {
77                 n = dm_div_up(n, base);
78                 result++;
79         }
80
81         return result;
82 }
83
84 /*
85  * Calculate the index of the child node of the n'th node k'th key.
86  */
87 static inline unsigned int get_child(unsigned int n, unsigned int k)
88 {
89         return (n * CHILDREN_PER_NODE) + k;
90 }
91
92 /*
93  * Return the n'th node of level l from table t.
94  */
95 static inline sector_t *get_node(struct dm_table *t,
96                                  unsigned int l, unsigned int n)
97 {
98         return t->index[l] + (n * KEYS_PER_NODE);
99 }
100
101 /*
102  * Return the highest key that you could lookup from the n'th
103  * node on level l of the btree.
104  */
105 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
106 {
107         for (; l < t->depth - 1; l++)
108                 n = get_child(n, CHILDREN_PER_NODE - 1);
109
110         if (n >= t->counts[l])
111                 return (sector_t) - 1;
112
113         return get_node(t, l, n)[KEYS_PER_NODE - 1];
114 }
115
116 /*
117  * Fills in a level of the btree based on the highs of the level
118  * below it.
119  */
120 static int setup_btree_index(unsigned int l, struct dm_table *t)
121 {
122         unsigned int n, k;
123         sector_t *node;
124
125         for (n = 0U; n < t->counts[l]; n++) {
126                 node = get_node(t, l, n);
127
128                 for (k = 0U; k < KEYS_PER_NODE; k++)
129                         node[k] = high(t, l + 1, get_child(n, k));
130         }
131
132         return 0;
133 }
134
135 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
136 {
137         unsigned long size;
138         void *addr;
139
140         /*
141          * Check that we're not going to overflow.
142          */
143         if (nmemb > (ULONG_MAX / elem_size))
144                 return NULL;
145
146         size = nmemb * elem_size;
147         addr = vzalloc(size);
148
149         return addr;
150 }
151 EXPORT_SYMBOL(dm_vcalloc);
152
153 /*
154  * highs, and targets are managed as dynamic arrays during a
155  * table load.
156  */
157 static int alloc_targets(struct dm_table *t, unsigned int num)
158 {
159         sector_t *n_highs;
160         struct dm_target *n_targets;
161
162         /*
163          * Allocate both the target array and offset array at once.
164          */
165         n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
166                                           sizeof(sector_t));
167         if (!n_highs)
168                 return -ENOMEM;
169
170         n_targets = (struct dm_target *) (n_highs + num);
171
172         memset(n_highs, -1, sizeof(*n_highs) * num);
173         vfree(t->highs);
174
175         t->num_allocated = num;
176         t->highs = n_highs;
177         t->targets = n_targets;
178
179         return 0;
180 }
181
182 int dm_table_create(struct dm_table **result, fmode_t mode,
183                     unsigned num_targets, struct mapped_device *md)
184 {
185         struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
186
187         if (!t)
188                 return -ENOMEM;
189
190         INIT_LIST_HEAD(&t->devices);
191
192         if (!num_targets)
193                 num_targets = KEYS_PER_NODE;
194
195         num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
196
197         if (!num_targets) {
198                 kfree(t);
199                 return -ENOMEM;
200         }
201
202         if (alloc_targets(t, num_targets)) {
203                 kfree(t);
204                 return -ENOMEM;
205         }
206
207         t->type = DM_TYPE_NONE;
208         t->mode = mode;
209         t->md = md;
210         *result = t;
211         return 0;
212 }
213
214 static void free_devices(struct list_head *devices, struct mapped_device *md)
215 {
216         struct list_head *tmp, *next;
217
218         list_for_each_safe(tmp, next, devices) {
219                 struct dm_dev_internal *dd =
220                     list_entry(tmp, struct dm_dev_internal, list);
221                 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
222                        dm_device_name(md), dd->dm_dev->name);
223                 dm_put_table_device(md, dd->dm_dev);
224                 kfree(dd);
225         }
226 }
227
228 void dm_table_destroy(struct dm_table *t)
229 {
230         unsigned int i;
231
232         if (!t)
233                 return;
234
235         /* free the indexes */
236         if (t->depth >= 2)
237                 vfree(t->index[t->depth - 2]);
238
239         /* free the targets */
240         for (i = 0; i < t->num_targets; i++) {
241                 struct dm_target *tgt = t->targets + i;
242
243                 if (tgt->type->dtr)
244                         tgt->type->dtr(tgt);
245
246                 dm_put_target_type(tgt->type);
247         }
248
249         vfree(t->highs);
250
251         /* free the device list */
252         free_devices(&t->devices, t->md);
253
254         dm_free_md_mempools(t->mempools);
255
256         kfree(t);
257 }
258
259 /*
260  * See if we've already got a device in the list.
261  */
262 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
263 {
264         struct dm_dev_internal *dd;
265
266         list_for_each_entry (dd, l, list)
267                 if (dd->dm_dev->bdev->bd_dev == dev)
268                         return dd;
269
270         return NULL;
271 }
272
273 /*
274  * If possible, this checks an area of a destination device is invalid.
275  */
276 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
277                                   sector_t start, sector_t len, void *data)
278 {
279         struct queue_limits *limits = data;
280         struct block_device *bdev = dev->bdev;
281         sector_t dev_size =
282                 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
283         unsigned short logical_block_size_sectors =
284                 limits->logical_block_size >> SECTOR_SHIFT;
285         char b[BDEVNAME_SIZE];
286
287         if (!dev_size)
288                 return 0;
289
290         if ((start >= dev_size) || (start + len > dev_size)) {
291                 DMWARN("%s: %s too small for target: "
292                        "start=%llu, len=%llu, dev_size=%llu",
293                        dm_device_name(ti->table->md), bdevname(bdev, b),
294                        (unsigned long long)start,
295                        (unsigned long long)len,
296                        (unsigned long long)dev_size);
297                 return 1;
298         }
299
300         /*
301          * If the target is mapped to zoned block device(s), check
302          * that the zones are not partially mapped.
303          */
304         if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
305                 unsigned int zone_sectors = bdev_zone_sectors(bdev);
306
307                 if (start & (zone_sectors - 1)) {
308                         DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
309                                dm_device_name(ti->table->md),
310                                (unsigned long long)start,
311                                zone_sectors, bdevname(bdev, b));
312                         return 1;
313                 }
314
315                 /*
316                  * Note: The last zone of a zoned block device may be smaller
317                  * than other zones. So for a target mapping the end of a
318                  * zoned block device with such a zone, len would not be zone
319                  * aligned. We do not allow such last smaller zone to be part
320                  * of the mapping here to ensure that mappings with multiple
321                  * devices do not end up with a smaller zone in the middle of
322                  * the sector range.
323                  */
324                 if (len & (zone_sectors - 1)) {
325                         DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
326                                dm_device_name(ti->table->md),
327                                (unsigned long long)len,
328                                zone_sectors, bdevname(bdev, b));
329                         return 1;
330                 }
331         }
332
333         if (logical_block_size_sectors <= 1)
334                 return 0;
335
336         if (start & (logical_block_size_sectors - 1)) {
337                 DMWARN("%s: start=%llu not aligned to h/w "
338                        "logical block size %u of %s",
339                        dm_device_name(ti->table->md),
340                        (unsigned long long)start,
341                        limits->logical_block_size, bdevname(bdev, b));
342                 return 1;
343         }
344
345         if (len & (logical_block_size_sectors - 1)) {
346                 DMWARN("%s: len=%llu not aligned to h/w "
347                        "logical block size %u of %s",
348                        dm_device_name(ti->table->md),
349                        (unsigned long long)len,
350                        limits->logical_block_size, bdevname(bdev, b));
351                 return 1;
352         }
353
354         return 0;
355 }
356
357 /*
358  * This upgrades the mode on an already open dm_dev, being
359  * careful to leave things as they were if we fail to reopen the
360  * device and not to touch the existing bdev field in case
361  * it is accessed concurrently.
362  */
363 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
364                         struct mapped_device *md)
365 {
366         int r;
367         struct dm_dev *old_dev, *new_dev;
368
369         old_dev = dd->dm_dev;
370
371         r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
372                                 dd->dm_dev->mode | new_mode, &new_dev);
373         if (r)
374                 return r;
375
376         dd->dm_dev = new_dev;
377         dm_put_table_device(md, old_dev);
378
379         return 0;
380 }
381
382 /*
383  * Convert the path to a device
384  */
385 dev_t dm_get_dev_t(const char *path)
386 {
387         dev_t dev;
388         struct block_device *bdev;
389
390         bdev = lookup_bdev(path);
391         if (IS_ERR(bdev))
392                 dev = name_to_dev_t(path);
393         else {
394                 dev = bdev->bd_dev;
395                 bdput(bdev);
396         }
397
398         return dev;
399 }
400 EXPORT_SYMBOL_GPL(dm_get_dev_t);
401
402 /*
403  * Add a device to the list, or just increment the usage count if
404  * it's already present.
405  */
406 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
407                   struct dm_dev **result)
408 {
409         int r;
410         dev_t dev;
411         struct dm_dev_internal *dd;
412         struct dm_table *t = ti->table;
413
414         BUG_ON(!t);
415
416         dev = dm_get_dev_t(path);
417         if (!dev)
418                 return -ENODEV;
419
420         dd = find_device(&t->devices, dev);
421         if (!dd) {
422                 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
423                 if (!dd)
424                         return -ENOMEM;
425
426                 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
427                         kfree(dd);
428                         return r;
429                 }
430
431                 refcount_set(&dd->count, 1);
432                 list_add(&dd->list, &t->devices);
433                 goto out;
434
435         } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
436                 r = upgrade_mode(dd, mode, t->md);
437                 if (r)
438                         return r;
439         }
440         refcount_inc(&dd->count);
441 out:
442         *result = dd->dm_dev;
443         return 0;
444 }
445 EXPORT_SYMBOL(dm_get_device);
446
447 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
448                                 sector_t start, sector_t len, void *data)
449 {
450         struct queue_limits *limits = data;
451         struct block_device *bdev = dev->bdev;
452         struct request_queue *q = bdev_get_queue(bdev);
453         char b[BDEVNAME_SIZE];
454
455         if (unlikely(!q)) {
456                 DMWARN("%s: Cannot set limits for nonexistent device %s",
457                        dm_device_name(ti->table->md), bdevname(bdev, b));
458                 return 0;
459         }
460
461         if (bdev_stack_limits(limits, bdev, start) < 0)
462                 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
463                        "physical_block_size=%u, logical_block_size=%u, "
464                        "alignment_offset=%u, start=%llu",
465                        dm_device_name(ti->table->md), bdevname(bdev, b),
466                        q->limits.physical_block_size,
467                        q->limits.logical_block_size,
468                        q->limits.alignment_offset,
469                        (unsigned long long) start << SECTOR_SHIFT);
470
471         limits->zoned = blk_queue_zoned_model(q);
472
473         return 0;
474 }
475
476 /*
477  * Decrement a device's use count and remove it if necessary.
478  */
479 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
480 {
481         int found = 0;
482         struct list_head *devices = &ti->table->devices;
483         struct dm_dev_internal *dd;
484
485         list_for_each_entry(dd, devices, list) {
486                 if (dd->dm_dev == d) {
487                         found = 1;
488                         break;
489                 }
490         }
491         if (!found) {
492                 DMWARN("%s: device %s not in table devices list",
493                        dm_device_name(ti->table->md), d->name);
494                 return;
495         }
496         if (refcount_dec_and_test(&dd->count)) {
497                 dm_put_table_device(ti->table->md, d);
498                 list_del(&dd->list);
499                 kfree(dd);
500         }
501 }
502 EXPORT_SYMBOL(dm_put_device);
503
504 /*
505  * Checks to see if the target joins onto the end of the table.
506  */
507 static int adjoin(struct dm_table *table, struct dm_target *ti)
508 {
509         struct dm_target *prev;
510
511         if (!table->num_targets)
512                 return !ti->begin;
513
514         prev = &table->targets[table->num_targets - 1];
515         return (ti->begin == (prev->begin + prev->len));
516 }
517
518 /*
519  * Used to dynamically allocate the arg array.
520  *
521  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
522  * process messages even if some device is suspended. These messages have a
523  * small fixed number of arguments.
524  *
525  * On the other hand, dm-switch needs to process bulk data using messages and
526  * excessive use of GFP_NOIO could cause trouble.
527  */
528 static char **realloc_argv(unsigned *size, char **old_argv)
529 {
530         char **argv;
531         unsigned new_size;
532         gfp_t gfp;
533
534         if (*size) {
535                 new_size = *size * 2;
536                 gfp = GFP_KERNEL;
537         } else {
538                 new_size = 8;
539                 gfp = GFP_NOIO;
540         }
541         argv = kmalloc_array(new_size, sizeof(*argv), gfp);
542         if (argv && old_argv) {
543                 memcpy(argv, old_argv, *size * sizeof(*argv));
544                 *size = new_size;
545         }
546
547         kfree(old_argv);
548         return argv;
549 }
550
551 /*
552  * Destructively splits up the argument list to pass to ctr.
553  */
554 int dm_split_args(int *argc, char ***argvp, char *input)
555 {
556         char *start, *end = input, *out, **argv = NULL;
557         unsigned array_size = 0;
558
559         *argc = 0;
560
561         if (!input) {
562                 *argvp = NULL;
563                 return 0;
564         }
565
566         argv = realloc_argv(&array_size, argv);
567         if (!argv)
568                 return -ENOMEM;
569
570         while (1) {
571                 /* Skip whitespace */
572                 start = skip_spaces(end);
573
574                 if (!*start)
575                         break;  /* success, we hit the end */
576
577                 /* 'out' is used to remove any back-quotes */
578                 end = out = start;
579                 while (*end) {
580                         /* Everything apart from '\0' can be quoted */
581                         if (*end == '\\' && *(end + 1)) {
582                                 *out++ = *(end + 1);
583                                 end += 2;
584                                 continue;
585                         }
586
587                         if (isspace(*end))
588                                 break;  /* end of token */
589
590                         *out++ = *end++;
591                 }
592
593                 /* have we already filled the array ? */
594                 if ((*argc + 1) > array_size) {
595                         argv = realloc_argv(&array_size, argv);
596                         if (!argv)
597                                 return -ENOMEM;
598                 }
599
600                 /* we know this is whitespace */
601                 if (*end)
602                         end++;
603
604                 /* terminate the string and put it in the array */
605                 *out = '\0';
606                 argv[*argc] = start;
607                 (*argc)++;
608         }
609
610         *argvp = argv;
611         return 0;
612 }
613
614 /*
615  * Impose necessary and sufficient conditions on a devices's table such
616  * that any incoming bio which respects its logical_block_size can be
617  * processed successfully.  If it falls across the boundary between
618  * two or more targets, the size of each piece it gets split into must
619  * be compatible with the logical_block_size of the target processing it.
620  */
621 static int validate_hardware_logical_block_alignment(struct dm_table *table,
622                                                  struct queue_limits *limits)
623 {
624         /*
625          * This function uses arithmetic modulo the logical_block_size
626          * (in units of 512-byte sectors).
627          */
628         unsigned short device_logical_block_size_sects =
629                 limits->logical_block_size >> SECTOR_SHIFT;
630
631         /*
632          * Offset of the start of the next table entry, mod logical_block_size.
633          */
634         unsigned short next_target_start = 0;
635
636         /*
637          * Given an aligned bio that extends beyond the end of a
638          * target, how many sectors must the next target handle?
639          */
640         unsigned short remaining = 0;
641
642         struct dm_target *ti;
643         struct queue_limits ti_limits;
644         unsigned i;
645
646         /*
647          * Check each entry in the table in turn.
648          */
649         for (i = 0; i < dm_table_get_num_targets(table); i++) {
650                 ti = dm_table_get_target(table, i);
651
652                 blk_set_stacking_limits(&ti_limits);
653
654                 /* combine all target devices' limits */
655                 if (ti->type->iterate_devices)
656                         ti->type->iterate_devices(ti, dm_set_device_limits,
657                                                   &ti_limits);
658
659                 /*
660                  * If the remaining sectors fall entirely within this
661                  * table entry are they compatible with its logical_block_size?
662                  */
663                 if (remaining < ti->len &&
664                     remaining & ((ti_limits.logical_block_size >>
665                                   SECTOR_SHIFT) - 1))
666                         break;  /* Error */
667
668                 next_target_start =
669                     (unsigned short) ((next_target_start + ti->len) &
670                                       (device_logical_block_size_sects - 1));
671                 remaining = next_target_start ?
672                     device_logical_block_size_sects - next_target_start : 0;
673         }
674
675         if (remaining) {
676                 DMWARN("%s: table line %u (start sect %llu len %llu) "
677                        "not aligned to h/w logical block size %u",
678                        dm_device_name(table->md), i,
679                        (unsigned long long) ti->begin,
680                        (unsigned long long) ti->len,
681                        limits->logical_block_size);
682                 return -EINVAL;
683         }
684
685         return 0;
686 }
687
688 int dm_table_add_target(struct dm_table *t, const char *type,
689                         sector_t start, sector_t len, char *params)
690 {
691         int r = -EINVAL, argc;
692         char **argv;
693         struct dm_target *tgt;
694
695         if (t->singleton) {
696                 DMERR("%s: target type %s must appear alone in table",
697                       dm_device_name(t->md), t->targets->type->name);
698                 return -EINVAL;
699         }
700
701         BUG_ON(t->num_targets >= t->num_allocated);
702
703         tgt = t->targets + t->num_targets;
704         memset(tgt, 0, sizeof(*tgt));
705
706         if (!len) {
707                 DMERR("%s: zero-length target", dm_device_name(t->md));
708                 return -EINVAL;
709         }
710
711         tgt->type = dm_get_target_type(type);
712         if (!tgt->type) {
713                 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
714                 return -EINVAL;
715         }
716
717         if (dm_target_needs_singleton(tgt->type)) {
718                 if (t->num_targets) {
719                         tgt->error = "singleton target type must appear alone in table";
720                         goto bad;
721                 }
722                 t->singleton = true;
723         }
724
725         if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
726                 tgt->error = "target type may not be included in a read-only table";
727                 goto bad;
728         }
729
730         if (t->immutable_target_type) {
731                 if (t->immutable_target_type != tgt->type) {
732                         tgt->error = "immutable target type cannot be mixed with other target types";
733                         goto bad;
734                 }
735         } else if (dm_target_is_immutable(tgt->type)) {
736                 if (t->num_targets) {
737                         tgt->error = "immutable target type cannot be mixed with other target types";
738                         goto bad;
739                 }
740                 t->immutable_target_type = tgt->type;
741         }
742
743         if (dm_target_has_integrity(tgt->type))
744                 t->integrity_added = 1;
745
746         tgt->table = t;
747         tgt->begin = start;
748         tgt->len = len;
749         tgt->error = "Unknown error";
750
751         /*
752          * Does this target adjoin the previous one ?
753          */
754         if (!adjoin(t, tgt)) {
755                 tgt->error = "Gap in table";
756                 goto bad;
757         }
758
759         r = dm_split_args(&argc, &argv, params);
760         if (r) {
761                 tgt->error = "couldn't split parameters (insufficient memory)";
762                 goto bad;
763         }
764
765         r = tgt->type->ctr(tgt, argc, argv);
766         kfree(argv);
767         if (r)
768                 goto bad;
769
770         t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
771
772         if (!tgt->num_discard_bios && tgt->discards_supported)
773                 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
774                        dm_device_name(t->md), type);
775
776         return 0;
777
778  bad:
779         DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
780         dm_put_target_type(tgt->type);
781         return r;
782 }
783
784 /*
785  * Target argument parsing helpers.
786  */
787 static int validate_next_arg(const struct dm_arg *arg,
788                              struct dm_arg_set *arg_set,
789                              unsigned *value, char **error, unsigned grouped)
790 {
791         const char *arg_str = dm_shift_arg(arg_set);
792         char dummy;
793
794         if (!arg_str ||
795             (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
796             (*value < arg->min) ||
797             (*value > arg->max) ||
798             (grouped && arg_set->argc < *value)) {
799                 *error = arg->error;
800                 return -EINVAL;
801         }
802
803         return 0;
804 }
805
806 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
807                 unsigned *value, char **error)
808 {
809         return validate_next_arg(arg, arg_set, value, error, 0);
810 }
811 EXPORT_SYMBOL(dm_read_arg);
812
813 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
814                       unsigned *value, char **error)
815 {
816         return validate_next_arg(arg, arg_set, value, error, 1);
817 }
818 EXPORT_SYMBOL(dm_read_arg_group);
819
820 const char *dm_shift_arg(struct dm_arg_set *as)
821 {
822         char *r;
823
824         if (as->argc) {
825                 as->argc--;
826                 r = *as->argv;
827                 as->argv++;
828                 return r;
829         }
830
831         return NULL;
832 }
833 EXPORT_SYMBOL(dm_shift_arg);
834
835 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
836 {
837         BUG_ON(as->argc < num_args);
838         as->argc -= num_args;
839         as->argv += num_args;
840 }
841 EXPORT_SYMBOL(dm_consume_args);
842
843 static bool __table_type_bio_based(enum dm_queue_mode table_type)
844 {
845         return (table_type == DM_TYPE_BIO_BASED ||
846                 table_type == DM_TYPE_DAX_BIO_BASED ||
847                 table_type == DM_TYPE_NVME_BIO_BASED);
848 }
849
850 static bool __table_type_request_based(enum dm_queue_mode table_type)
851 {
852         return table_type == DM_TYPE_REQUEST_BASED;
853 }
854
855 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
856 {
857         t->type = type;
858 }
859 EXPORT_SYMBOL_GPL(dm_table_set_type);
860
861 /* validate the dax capability of the target device span */
862 int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
863                         sector_t start, sector_t len, void *data)
864 {
865         int blocksize = *(int *) data;
866
867         return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
868                                        start, len);
869 }
870
871 /* Check devices support synchronous DAX */
872 static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
873                                   sector_t start, sector_t len, void *data)
874 {
875         return dev->dax_dev && dax_synchronous(dev->dax_dev);
876 }
877
878 bool dm_table_supports_dax(struct dm_table *t,
879                            iterate_devices_callout_fn iterate_fn, int *blocksize)
880 {
881         struct dm_target *ti;
882         unsigned i;
883
884         /* Ensure that all targets support DAX. */
885         for (i = 0; i < dm_table_get_num_targets(t); i++) {
886                 ti = dm_table_get_target(t, i);
887
888                 if (!ti->type->direct_access)
889                         return false;
890
891                 if (!ti->type->iterate_devices ||
892                     !ti->type->iterate_devices(ti, iterate_fn, blocksize))
893                         return false;
894         }
895
896         return true;
897 }
898
899 static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
900
901 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
902                                   sector_t start, sector_t len, void *data)
903 {
904         struct block_device *bdev = dev->bdev;
905         struct request_queue *q = bdev_get_queue(bdev);
906
907         /* request-based cannot stack on partitions! */
908         if (bdev != bdev->bd_contains)
909                 return false;
910
911         return queue_is_mq(q);
912 }
913
914 static int dm_table_determine_type(struct dm_table *t)
915 {
916         unsigned i;
917         unsigned bio_based = 0, request_based = 0, hybrid = 0;
918         struct dm_target *tgt;
919         struct list_head *devices = dm_table_get_devices(t);
920         enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
921         int page_size = PAGE_SIZE;
922
923         if (t->type != DM_TYPE_NONE) {
924                 /* target already set the table's type */
925                 if (t->type == DM_TYPE_BIO_BASED) {
926                         /* possibly upgrade to a variant of bio-based */
927                         goto verify_bio_based;
928                 }
929                 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
930                 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
931                 goto verify_rq_based;
932         }
933
934         for (i = 0; i < t->num_targets; i++) {
935                 tgt = t->targets + i;
936                 if (dm_target_hybrid(tgt))
937                         hybrid = 1;
938                 else if (dm_target_request_based(tgt))
939                         request_based = 1;
940                 else
941                         bio_based = 1;
942
943                 if (bio_based && request_based) {
944                         DMERR("Inconsistent table: different target types"
945                               " can't be mixed up");
946                         return -EINVAL;
947                 }
948         }
949
950         if (hybrid && !bio_based && !request_based) {
951                 /*
952                  * The targets can work either way.
953                  * Determine the type from the live device.
954                  * Default to bio-based if device is new.
955                  */
956                 if (__table_type_request_based(live_md_type))
957                         request_based = 1;
958                 else
959                         bio_based = 1;
960         }
961
962         if (bio_based) {
963 verify_bio_based:
964                 /* We must use this table as bio-based */
965                 t->type = DM_TYPE_BIO_BASED;
966                 if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
967                     (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
968                         t->type = DM_TYPE_DAX_BIO_BASED;
969                 } else {
970                         /* Check if upgrading to NVMe bio-based is valid or required */
971                         tgt = dm_table_get_immutable_target(t);
972                         if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
973                                 t->type = DM_TYPE_NVME_BIO_BASED;
974                                 goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
975                         } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
976                                 t->type = DM_TYPE_NVME_BIO_BASED;
977                         }
978                 }
979                 return 0;
980         }
981
982         BUG_ON(!request_based); /* No targets in this table */
983
984         t->type = DM_TYPE_REQUEST_BASED;
985
986 verify_rq_based:
987         /*
988          * Request-based dm supports only tables that have a single target now.
989          * To support multiple targets, request splitting support is needed,
990          * and that needs lots of changes in the block-layer.
991          * (e.g. request completion process for partial completion.)
992          */
993         if (t->num_targets > 1) {
994                 DMERR("%s DM doesn't support multiple targets",
995                       t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
996                 return -EINVAL;
997         }
998
999         if (list_empty(devices)) {
1000                 int srcu_idx;
1001                 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
1002
1003                 /* inherit live table's type */
1004                 if (live_table)
1005                         t->type = live_table->type;
1006                 dm_put_live_table(t->md, srcu_idx);
1007                 return 0;
1008         }
1009
1010         tgt = dm_table_get_immutable_target(t);
1011         if (!tgt) {
1012                 DMERR("table load rejected: immutable target is required");
1013                 return -EINVAL;
1014         } else if (tgt->max_io_len) {
1015                 DMERR("table load rejected: immutable target that splits IO is not supported");
1016                 return -EINVAL;
1017         }
1018
1019         /* Non-request-stackable devices can't be used for request-based dm */
1020         if (!tgt->type->iterate_devices ||
1021             !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
1022                 DMERR("table load rejected: including non-request-stackable devices");
1023                 return -EINVAL;
1024         }
1025
1026         return 0;
1027 }
1028
1029 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
1030 {
1031         return t->type;
1032 }
1033
1034 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
1035 {
1036         return t->immutable_target_type;
1037 }
1038
1039 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
1040 {
1041         /* Immutable target is implicitly a singleton */
1042         if (t->num_targets > 1 ||
1043             !dm_target_is_immutable(t->targets[0].type))
1044                 return NULL;
1045
1046         return t->targets;
1047 }
1048
1049 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1050 {
1051         struct dm_target *ti;
1052         unsigned i;
1053
1054         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1055                 ti = dm_table_get_target(t, i);
1056                 if (dm_target_is_wildcard(ti->type))
1057                         return ti;
1058         }
1059
1060         return NULL;
1061 }
1062
1063 bool dm_table_bio_based(struct dm_table *t)
1064 {
1065         return __table_type_bio_based(dm_table_get_type(t));
1066 }
1067
1068 bool dm_table_request_based(struct dm_table *t)
1069 {
1070         return __table_type_request_based(dm_table_get_type(t));
1071 }
1072
1073 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1074 {
1075         enum dm_queue_mode type = dm_table_get_type(t);
1076         unsigned per_io_data_size = 0;
1077         unsigned min_pool_size = 0;
1078         struct dm_target *ti;
1079         unsigned i;
1080
1081         if (unlikely(type == DM_TYPE_NONE)) {
1082                 DMWARN("no table type is set, can't allocate mempools");
1083                 return -EINVAL;
1084         }
1085
1086         if (__table_type_bio_based(type))
1087                 for (i = 0; i < t->num_targets; i++) {
1088                         ti = t->targets + i;
1089                         per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1090                         min_pool_size = max(min_pool_size, ti->num_flush_bios);
1091                 }
1092
1093         t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1094                                            per_io_data_size, min_pool_size);
1095         if (!t->mempools)
1096                 return -ENOMEM;
1097
1098         return 0;
1099 }
1100
1101 void dm_table_free_md_mempools(struct dm_table *t)
1102 {
1103         dm_free_md_mempools(t->mempools);
1104         t->mempools = NULL;
1105 }
1106
1107 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1108 {
1109         return t->mempools;
1110 }
1111
1112 static int setup_indexes(struct dm_table *t)
1113 {
1114         int i;
1115         unsigned int total = 0;
1116         sector_t *indexes;
1117
1118         /* allocate the space for *all* the indexes */
1119         for (i = t->depth - 2; i >= 0; i--) {
1120                 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1121                 total += t->counts[i];
1122         }
1123
1124         indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1125         if (!indexes)
1126                 return -ENOMEM;
1127
1128         /* set up internal nodes, bottom-up */
1129         for (i = t->depth - 2; i >= 0; i--) {
1130                 t->index[i] = indexes;
1131                 indexes += (KEYS_PER_NODE * t->counts[i]);
1132                 setup_btree_index(i, t);
1133         }
1134
1135         return 0;
1136 }
1137
1138 /*
1139  * Builds the btree to index the map.
1140  */
1141 static int dm_table_build_index(struct dm_table *t)
1142 {
1143         int r = 0;
1144         unsigned int leaf_nodes;
1145
1146         /* how many indexes will the btree have ? */
1147         leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1148         t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1149
1150         /* leaf layer has already been set up */
1151         t->counts[t->depth - 1] = leaf_nodes;
1152         t->index[t->depth - 1] = t->highs;
1153
1154         if (t->depth >= 2)
1155                 r = setup_indexes(t);
1156
1157         return r;
1158 }
1159
1160 static bool integrity_profile_exists(struct gendisk *disk)
1161 {
1162         return !!blk_get_integrity(disk);
1163 }
1164
1165 /*
1166  * Get a disk whose integrity profile reflects the table's profile.
1167  * Returns NULL if integrity support was inconsistent or unavailable.
1168  */
1169 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1170 {
1171         struct list_head *devices = dm_table_get_devices(t);
1172         struct dm_dev_internal *dd = NULL;
1173         struct gendisk *prev_disk = NULL, *template_disk = NULL;
1174         unsigned i;
1175
1176         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1177                 struct dm_target *ti = dm_table_get_target(t, i);
1178                 if (!dm_target_passes_integrity(ti->type))
1179                         goto no_integrity;
1180         }
1181
1182         list_for_each_entry(dd, devices, list) {
1183                 template_disk = dd->dm_dev->bdev->bd_disk;
1184                 if (!integrity_profile_exists(template_disk))
1185                         goto no_integrity;
1186                 else if (prev_disk &&
1187                          blk_integrity_compare(prev_disk, template_disk) < 0)
1188                         goto no_integrity;
1189                 prev_disk = template_disk;
1190         }
1191
1192         return template_disk;
1193
1194 no_integrity:
1195         if (prev_disk)
1196                 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1197                        dm_device_name(t->md),
1198                        prev_disk->disk_name,
1199                        template_disk->disk_name);
1200         return NULL;
1201 }
1202
1203 /*
1204  * Register the mapped device for blk_integrity support if the
1205  * underlying devices have an integrity profile.  But all devices may
1206  * not have matching profiles (checking all devices isn't reliable
1207  * during table load because this table may use other DM device(s) which
1208  * must be resumed before they will have an initialized integity
1209  * profile).  Consequently, stacked DM devices force a 2 stage integrity
1210  * profile validation: First pass during table load, final pass during
1211  * resume.
1212  */
1213 static int dm_table_register_integrity(struct dm_table *t)
1214 {
1215         struct mapped_device *md = t->md;
1216         struct gendisk *template_disk = NULL;
1217
1218         /* If target handles integrity itself do not register it here. */
1219         if (t->integrity_added)
1220                 return 0;
1221
1222         template_disk = dm_table_get_integrity_disk(t);
1223         if (!template_disk)
1224                 return 0;
1225
1226         if (!integrity_profile_exists(dm_disk(md))) {
1227                 t->integrity_supported = true;
1228                 /*
1229                  * Register integrity profile during table load; we can do
1230                  * this because the final profile must match during resume.
1231                  */
1232                 blk_integrity_register(dm_disk(md),
1233                                        blk_get_integrity(template_disk));
1234                 return 0;
1235         }
1236
1237         /*
1238          * If DM device already has an initialized integrity
1239          * profile the new profile should not conflict.
1240          */
1241         if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1242                 DMWARN("%s: conflict with existing integrity profile: "
1243                        "%s profile mismatch",
1244                        dm_device_name(t->md),
1245                        template_disk->disk_name);
1246                 return 1;
1247         }
1248
1249         /* Preserve existing integrity profile */
1250         t->integrity_supported = true;
1251         return 0;
1252 }
1253
1254 /*
1255  * Prepares the table for use by building the indices,
1256  * setting the type, and allocating mempools.
1257  */
1258 int dm_table_complete(struct dm_table *t)
1259 {
1260         int r;
1261
1262         r = dm_table_determine_type(t);
1263         if (r) {
1264                 DMERR("unable to determine table type");
1265                 return r;
1266         }
1267
1268         r = dm_table_build_index(t);
1269         if (r) {
1270                 DMERR("unable to build btrees");
1271                 return r;
1272         }
1273
1274         r = dm_table_register_integrity(t);
1275         if (r) {
1276                 DMERR("could not register integrity profile.");
1277                 return r;
1278         }
1279
1280         r = dm_table_alloc_md_mempools(t, t->md);
1281         if (r)
1282                 DMERR("unable to allocate mempools");
1283
1284         return r;
1285 }
1286
1287 static DEFINE_MUTEX(_event_lock);
1288 void dm_table_event_callback(struct dm_table *t,
1289                              void (*fn)(void *), void *context)
1290 {
1291         mutex_lock(&_event_lock);
1292         t->event_fn = fn;
1293         t->event_context = context;
1294         mutex_unlock(&_event_lock);
1295 }
1296
1297 void dm_table_event(struct dm_table *t)
1298 {
1299         /*
1300          * You can no longer call dm_table_event() from interrupt
1301          * context, use a bottom half instead.
1302          */
1303         BUG_ON(in_interrupt());
1304
1305         mutex_lock(&_event_lock);
1306         if (t->event_fn)
1307                 t->event_fn(t->event_context);
1308         mutex_unlock(&_event_lock);
1309 }
1310 EXPORT_SYMBOL(dm_table_event);
1311
1312 inline sector_t dm_table_get_size(struct dm_table *t)
1313 {
1314         return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1315 }
1316 EXPORT_SYMBOL(dm_table_get_size);
1317
1318 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1319 {
1320         if (index >= t->num_targets)
1321                 return NULL;
1322
1323         return t->targets + index;
1324 }
1325
1326 /*
1327  * Search the btree for the correct target.
1328  *
1329  * Caller should check returned pointer for NULL
1330  * to trap I/O beyond end of device.
1331  */
1332 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1333 {
1334         unsigned int l, n = 0, k = 0;
1335         sector_t *node;
1336
1337         if (unlikely(sector >= dm_table_get_size(t)))
1338                 return NULL;
1339
1340         for (l = 0; l < t->depth; l++) {
1341                 n = get_child(n, k);
1342                 node = get_node(t, l, n);
1343
1344                 for (k = 0; k < KEYS_PER_NODE; k++)
1345                         if (node[k] >= sector)
1346                                 break;
1347         }
1348
1349         return &t->targets[(KEYS_PER_NODE * n) + k];
1350 }
1351
1352 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1353                         sector_t start, sector_t len, void *data)
1354 {
1355         unsigned *num_devices = data;
1356
1357         (*num_devices)++;
1358
1359         return 0;
1360 }
1361
1362 /*
1363  * Check whether a table has no data devices attached using each
1364  * target's iterate_devices method.
1365  * Returns false if the result is unknown because a target doesn't
1366  * support iterate_devices.
1367  */
1368 bool dm_table_has_no_data_devices(struct dm_table *table)
1369 {
1370         struct dm_target *ti;
1371         unsigned i, num_devices;
1372
1373         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1374                 ti = dm_table_get_target(table, i);
1375
1376                 if (!ti->type->iterate_devices)
1377                         return false;
1378
1379                 num_devices = 0;
1380                 ti->type->iterate_devices(ti, count_device, &num_devices);
1381                 if (num_devices)
1382                         return false;
1383         }
1384
1385         return true;
1386 }
1387
1388 static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1389                                  sector_t start, sector_t len, void *data)
1390 {
1391         struct request_queue *q = bdev_get_queue(dev->bdev);
1392         enum blk_zoned_model *zoned_model = data;
1393
1394         return q && blk_queue_zoned_model(q) == *zoned_model;
1395 }
1396
1397 static bool dm_table_supports_zoned_model(struct dm_table *t,
1398                                           enum blk_zoned_model zoned_model)
1399 {
1400         struct dm_target *ti;
1401         unsigned i;
1402
1403         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1404                 ti = dm_table_get_target(t, i);
1405
1406                 if (zoned_model == BLK_ZONED_HM &&
1407                     !dm_target_supports_zoned_hm(ti->type))
1408                         return false;
1409
1410                 if (!ti->type->iterate_devices ||
1411                     !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
1412                         return false;
1413         }
1414
1415         return true;
1416 }
1417
1418 static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1419                                        sector_t start, sector_t len, void *data)
1420 {
1421         struct request_queue *q = bdev_get_queue(dev->bdev);
1422         unsigned int *zone_sectors = data;
1423
1424         return q && blk_queue_zone_sectors(q) == *zone_sectors;
1425 }
1426
1427 static bool dm_table_matches_zone_sectors(struct dm_table *t,
1428                                           unsigned int zone_sectors)
1429 {
1430         struct dm_target *ti;
1431         unsigned i;
1432
1433         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1434                 ti = dm_table_get_target(t, i);
1435
1436                 if (!ti->type->iterate_devices ||
1437                     !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
1438                         return false;
1439         }
1440
1441         return true;
1442 }
1443
1444 static int validate_hardware_zoned_model(struct dm_table *table,
1445                                          enum blk_zoned_model zoned_model,
1446                                          unsigned int zone_sectors)
1447 {
1448         if (zoned_model == BLK_ZONED_NONE)
1449                 return 0;
1450
1451         if (!dm_table_supports_zoned_model(table, zoned_model)) {
1452                 DMERR("%s: zoned model is not consistent across all devices",
1453                       dm_device_name(table->md));
1454                 return -EINVAL;
1455         }
1456
1457         /* Check zone size validity and compatibility */
1458         if (!zone_sectors || !is_power_of_2(zone_sectors))
1459                 return -EINVAL;
1460
1461         if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
1462                 DMERR("%s: zone sectors is not consistent across all devices",
1463                       dm_device_name(table->md));
1464                 return -EINVAL;
1465         }
1466
1467         return 0;
1468 }
1469
1470 /*
1471  * Establish the new table's queue_limits and validate them.
1472  */
1473 int dm_calculate_queue_limits(struct dm_table *table,
1474                               struct queue_limits *limits)
1475 {
1476         struct dm_target *ti;
1477         struct queue_limits ti_limits;
1478         unsigned i;
1479         enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1480         unsigned int zone_sectors = 0;
1481
1482         blk_set_stacking_limits(limits);
1483
1484         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1485                 blk_set_stacking_limits(&ti_limits);
1486
1487                 ti = dm_table_get_target(table, i);
1488
1489                 if (!ti->type->iterate_devices)
1490                         goto combine_limits;
1491
1492                 /*
1493                  * Combine queue limits of all the devices this target uses.
1494                  */
1495                 ti->type->iterate_devices(ti, dm_set_device_limits,
1496                                           &ti_limits);
1497
1498                 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1499                         /*
1500                          * After stacking all limits, validate all devices
1501                          * in table support this zoned model and zone sectors.
1502                          */
1503                         zoned_model = ti_limits.zoned;
1504                         zone_sectors = ti_limits.chunk_sectors;
1505                 }
1506
1507                 /* Set I/O hints portion of queue limits */
1508                 if (ti->type->io_hints)
1509                         ti->type->io_hints(ti, &ti_limits);
1510
1511                 /*
1512                  * Check each device area is consistent with the target's
1513                  * overall queue limits.
1514                  */
1515                 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1516                                               &ti_limits))
1517                         return -EINVAL;
1518
1519 combine_limits:
1520                 /*
1521                  * Merge this target's queue limits into the overall limits
1522                  * for the table.
1523                  */
1524                 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1525                         DMWARN("%s: adding target device "
1526                                "(start sect %llu len %llu) "
1527                                "caused an alignment inconsistency",
1528                                dm_device_name(table->md),
1529                                (unsigned long long) ti->begin,
1530                                (unsigned long long) ti->len);
1531
1532                 /*
1533                  * FIXME: this should likely be moved to blk_stack_limits(), would
1534                  * also eliminate limits->zoned stacking hack in dm_set_device_limits()
1535                  */
1536                 if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1537                         /*
1538                          * By default, the stacked limits zoned model is set to
1539                          * BLK_ZONED_NONE in blk_set_stacking_limits(). Update
1540                          * this model using the first target model reported
1541                          * that is not BLK_ZONED_NONE. This will be either the
1542                          * first target device zoned model or the model reported
1543                          * by the target .io_hints.
1544                          */
1545                         limits->zoned = ti_limits.zoned;
1546                 }
1547         }
1548
1549         /*
1550          * Verify that the zoned model and zone sectors, as determined before
1551          * any .io_hints override, are the same across all devices in the table.
1552          * - this is especially relevant if .io_hints is emulating a disk-managed
1553          *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1554          * BUT...
1555          */
1556         if (limits->zoned != BLK_ZONED_NONE) {
1557                 /*
1558                  * ...IF the above limits stacking determined a zoned model
1559                  * validate that all of the table's devices conform to it.
1560                  */
1561                 zoned_model = limits->zoned;
1562                 zone_sectors = limits->chunk_sectors;
1563         }
1564         if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1565                 return -EINVAL;
1566
1567         return validate_hardware_logical_block_alignment(table, limits);
1568 }
1569
1570 /*
1571  * Verify that all devices have an integrity profile that matches the
1572  * DM device's registered integrity profile.  If the profiles don't
1573  * match then unregister the DM device's integrity profile.
1574  */
1575 static void dm_table_verify_integrity(struct dm_table *t)
1576 {
1577         struct gendisk *template_disk = NULL;
1578
1579         if (t->integrity_added)
1580                 return;
1581
1582         if (t->integrity_supported) {
1583                 /*
1584                  * Verify that the original integrity profile
1585                  * matches all the devices in this table.
1586                  */
1587                 template_disk = dm_table_get_integrity_disk(t);
1588                 if (template_disk &&
1589                     blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1590                         return;
1591         }
1592
1593         if (integrity_profile_exists(dm_disk(t->md))) {
1594                 DMWARN("%s: unable to establish an integrity profile",
1595                        dm_device_name(t->md));
1596                 blk_integrity_unregister(dm_disk(t->md));
1597         }
1598 }
1599
1600 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1601                                 sector_t start, sector_t len, void *data)
1602 {
1603         unsigned long flush = (unsigned long) data;
1604         struct request_queue *q = bdev_get_queue(dev->bdev);
1605
1606         return q && (q->queue_flags & flush);
1607 }
1608
1609 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1610 {
1611         struct dm_target *ti;
1612         unsigned i;
1613
1614         /*
1615          * Require at least one underlying device to support flushes.
1616          * t->devices includes internal dm devices such as mirror logs
1617          * so we need to use iterate_devices here, which targets
1618          * supporting flushes must provide.
1619          */
1620         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1621                 ti = dm_table_get_target(t, i);
1622
1623                 if (!ti->num_flush_bios)
1624                         continue;
1625
1626                 if (ti->flush_supported)
1627                         return true;
1628
1629                 if (ti->type->iterate_devices &&
1630                     ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1631                         return true;
1632         }
1633
1634         return false;
1635 }
1636
1637 static int device_dax_write_cache_enabled(struct dm_target *ti,
1638                                           struct dm_dev *dev, sector_t start,
1639                                           sector_t len, void *data)
1640 {
1641         struct dax_device *dax_dev = dev->dax_dev;
1642
1643         if (!dax_dev)
1644                 return false;
1645
1646         if (dax_write_cache_enabled(dax_dev))
1647                 return true;
1648         return false;
1649 }
1650
1651 static int dm_table_supports_dax_write_cache(struct dm_table *t)
1652 {
1653         struct dm_target *ti;
1654         unsigned i;
1655
1656         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1657                 ti = dm_table_get_target(t, i);
1658
1659                 if (ti->type->iterate_devices &&
1660                     ti->type->iterate_devices(ti,
1661                                 device_dax_write_cache_enabled, NULL))
1662                         return true;
1663         }
1664
1665         return false;
1666 }
1667
1668 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1669                             sector_t start, sector_t len, void *data)
1670 {
1671         struct request_queue *q = bdev_get_queue(dev->bdev);
1672
1673         return q && blk_queue_nonrot(q);
1674 }
1675
1676 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1677                              sector_t start, sector_t len, void *data)
1678 {
1679         struct request_queue *q = bdev_get_queue(dev->bdev);
1680
1681         return q && !blk_queue_add_random(q);
1682 }
1683
1684 static bool dm_table_all_devices_attribute(struct dm_table *t,
1685                                            iterate_devices_callout_fn func)
1686 {
1687         struct dm_target *ti;
1688         unsigned i;
1689
1690         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1691                 ti = dm_table_get_target(t, i);
1692
1693                 if (!ti->type->iterate_devices ||
1694                     !ti->type->iterate_devices(ti, func, NULL))
1695                         return false;
1696         }
1697
1698         return true;
1699 }
1700
1701 static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
1702                                         sector_t start, sector_t len, void *data)
1703 {
1704         char b[BDEVNAME_SIZE];
1705
1706         /* For now, NVMe devices are the only devices of this class */
1707         return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1708 }
1709
1710 static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
1711 {
1712         return dm_table_all_devices_attribute(t, device_no_partial_completion);
1713 }
1714
1715 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1716                                          sector_t start, sector_t len, void *data)
1717 {
1718         struct request_queue *q = bdev_get_queue(dev->bdev);
1719
1720         return q && !q->limits.max_write_same_sectors;
1721 }
1722
1723 static bool dm_table_supports_write_same(struct dm_table *t)
1724 {
1725         struct dm_target *ti;
1726         unsigned i;
1727
1728         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1729                 ti = dm_table_get_target(t, i);
1730
1731                 if (!ti->num_write_same_bios)
1732                         return false;
1733
1734                 if (!ti->type->iterate_devices ||
1735                     ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1736                         return false;
1737         }
1738
1739         return true;
1740 }
1741
1742 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1743                                            sector_t start, sector_t len, void *data)
1744 {
1745         struct request_queue *q = bdev_get_queue(dev->bdev);
1746
1747         return q && !q->limits.max_write_zeroes_sectors;
1748 }
1749
1750 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1751 {
1752         struct dm_target *ti;
1753         unsigned i = 0;
1754
1755         while (i < dm_table_get_num_targets(t)) {
1756                 ti = dm_table_get_target(t, i++);
1757
1758                 if (!ti->num_write_zeroes_bios)
1759                         return false;
1760
1761                 if (!ti->type->iterate_devices ||
1762                     ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1763                         return false;
1764         }
1765
1766         return true;
1767 }
1768
1769 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1770                                       sector_t start, sector_t len, void *data)
1771 {
1772         struct request_queue *q = bdev_get_queue(dev->bdev);
1773
1774         return q && !blk_queue_discard(q);
1775 }
1776
1777 static bool dm_table_supports_discards(struct dm_table *t)
1778 {
1779         struct dm_target *ti;
1780         unsigned i;
1781
1782         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1783                 ti = dm_table_get_target(t, i);
1784
1785                 if (!ti->num_discard_bios)
1786                         return false;
1787
1788                 /*
1789                  * Either the target provides discard support (as implied by setting
1790                  * 'discards_supported') or it relies on _all_ data devices having
1791                  * discard support.
1792                  */
1793                 if (!ti->discards_supported &&
1794                     (!ti->type->iterate_devices ||
1795                      ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1796                         return false;
1797         }
1798
1799         return true;
1800 }
1801
1802 static int device_not_secure_erase_capable(struct dm_target *ti,
1803                                            struct dm_dev *dev, sector_t start,
1804                                            sector_t len, void *data)
1805 {
1806         struct request_queue *q = bdev_get_queue(dev->bdev);
1807
1808         return q && !blk_queue_secure_erase(q);
1809 }
1810
1811 static bool dm_table_supports_secure_erase(struct dm_table *t)
1812 {
1813         struct dm_target *ti;
1814         unsigned int i;
1815
1816         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1817                 ti = dm_table_get_target(t, i);
1818
1819                 if (!ti->num_secure_erase_bios)
1820                         return false;
1821
1822                 if (!ti->type->iterate_devices ||
1823                     ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1824                         return false;
1825         }
1826
1827         return true;
1828 }
1829
1830 static int device_requires_stable_pages(struct dm_target *ti,
1831                                         struct dm_dev *dev, sector_t start,
1832                                         sector_t len, void *data)
1833 {
1834         struct request_queue *q = bdev_get_queue(dev->bdev);
1835
1836         return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1837 }
1838
1839 /*
1840  * If any underlying device requires stable pages, a table must require
1841  * them as well.  Only targets that support iterate_devices are considered:
1842  * don't want error, zero, etc to require stable pages.
1843  */
1844 static bool dm_table_requires_stable_pages(struct dm_table *t)
1845 {
1846         struct dm_target *ti;
1847         unsigned i;
1848
1849         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1850                 ti = dm_table_get_target(t, i);
1851
1852                 if (ti->type->iterate_devices &&
1853                     ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1854                         return true;
1855         }
1856
1857         return false;
1858 }
1859
1860 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1861                                struct queue_limits *limits)
1862 {
1863         bool wc = false, fua = false;
1864         int page_size = PAGE_SIZE;
1865
1866         /*
1867          * Copy table's limits to the DM device's request_queue
1868          */
1869         q->limits = *limits;
1870
1871         if (!dm_table_supports_discards(t)) {
1872                 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1873                 /* Must also clear discard limits... */
1874                 q->limits.max_discard_sectors = 0;
1875                 q->limits.max_hw_discard_sectors = 0;
1876                 q->limits.discard_granularity = 0;
1877                 q->limits.discard_alignment = 0;
1878                 q->limits.discard_misaligned = 0;
1879         } else
1880                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1881
1882         if (dm_table_supports_secure_erase(t))
1883                 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
1884
1885         if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1886                 wc = true;
1887                 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1888                         fua = true;
1889         }
1890         blk_queue_write_cache(q, wc, fua);
1891
1892         if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
1893                 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1894                 if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
1895                         set_dax_synchronous(t->md->dax_dev);
1896         }
1897         else
1898                 blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
1899
1900         if (dm_table_supports_dax_write_cache(t))
1901                 dax_write_cache(t->md->dax_dev, true);
1902
1903         /* Ensure that all underlying devices are non-rotational. */
1904         if (dm_table_all_devices_attribute(t, device_is_nonrot))
1905                 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1906         else
1907                 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1908
1909         if (!dm_table_supports_write_same(t))
1910                 q->limits.max_write_same_sectors = 0;
1911         if (!dm_table_supports_write_zeroes(t))
1912                 q->limits.max_write_zeroes_sectors = 0;
1913
1914         dm_table_verify_integrity(t);
1915
1916         /*
1917          * Some devices don't use blk_integrity but still want stable pages
1918          * because they do their own checksumming.
1919          */
1920         if (dm_table_requires_stable_pages(t))
1921                 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1922         else
1923                 q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1924
1925         /*
1926          * Determine whether or not this queue's I/O timings contribute
1927          * to the entropy pool, Only request-based targets use this.
1928          * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1929          * have it set.
1930          */
1931         if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1932                 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
1933
1934         /*
1935          * For a zoned target, the number of zones should be updated for the
1936          * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
1937          * target, this is all that is needed.
1938          */
1939 #ifdef CONFIG_BLK_DEV_ZONED
1940         if (blk_queue_is_zoned(q)) {
1941                 WARN_ON_ONCE(queue_is_mq(q));
1942                 q->nr_zones = blkdev_nr_zones(t->md->disk);
1943         }
1944 #endif
1945
1946         /* Allow reads to exceed readahead limits */
1947         q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
1948 }
1949
1950 unsigned int dm_table_get_num_targets(struct dm_table *t)
1951 {
1952         return t->num_targets;
1953 }
1954
1955 struct list_head *dm_table_get_devices(struct dm_table *t)
1956 {
1957         return &t->devices;
1958 }
1959
1960 fmode_t dm_table_get_mode(struct dm_table *t)
1961 {
1962         return t->mode;
1963 }
1964 EXPORT_SYMBOL(dm_table_get_mode);
1965
1966 enum suspend_mode {
1967         PRESUSPEND,
1968         PRESUSPEND_UNDO,
1969         POSTSUSPEND,
1970 };
1971
1972 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1973 {
1974         int i = t->num_targets;
1975         struct dm_target *ti = t->targets;
1976
1977         lockdep_assert_held(&t->md->suspend_lock);
1978
1979         while (i--) {
1980                 switch (mode) {
1981                 case PRESUSPEND:
1982                         if (ti->type->presuspend)
1983                                 ti->type->presuspend(ti);
1984                         break;
1985                 case PRESUSPEND_UNDO:
1986                         if (ti->type->presuspend_undo)
1987                                 ti->type->presuspend_undo(ti);
1988                         break;
1989                 case POSTSUSPEND:
1990                         if (ti->type->postsuspend)
1991                                 ti->type->postsuspend(ti);
1992                         break;
1993                 }
1994                 ti++;
1995         }
1996 }
1997
1998 void dm_table_presuspend_targets(struct dm_table *t)
1999 {
2000         if (!t)
2001                 return;
2002
2003         suspend_targets(t, PRESUSPEND);
2004 }
2005
2006 void dm_table_presuspend_undo_targets(struct dm_table *t)
2007 {
2008         if (!t)
2009                 return;
2010
2011         suspend_targets(t, PRESUSPEND_UNDO);
2012 }
2013
2014 void dm_table_postsuspend_targets(struct dm_table *t)
2015 {
2016         if (!t)
2017                 return;
2018
2019         suspend_targets(t, POSTSUSPEND);
2020 }
2021
2022 int dm_table_resume_targets(struct dm_table *t)
2023 {
2024         int i, r = 0;
2025
2026         lockdep_assert_held(&t->md->suspend_lock);
2027
2028         for (i = 0; i < t->num_targets; i++) {
2029                 struct dm_target *ti = t->targets + i;
2030
2031                 if (!ti->type->preresume)
2032                         continue;
2033
2034                 r = ti->type->preresume(ti);
2035                 if (r) {
2036                         DMERR("%s: %s: preresume failed, error = %d",
2037                               dm_device_name(t->md), ti->type->name, r);
2038                         return r;
2039                 }
2040         }
2041
2042         for (i = 0; i < t->num_targets; i++) {
2043                 struct dm_target *ti = t->targets + i;
2044
2045                 if (ti->type->resume)
2046                         ti->type->resume(ti);
2047         }
2048
2049         return 0;
2050 }
2051
2052 struct mapped_device *dm_table_get_md(struct dm_table *t)
2053 {
2054         return t->md;
2055 }
2056 EXPORT_SYMBOL(dm_table_get_md);
2057
2058 const char *dm_table_device_name(struct dm_table *t)
2059 {
2060         return dm_device_name(t->md);
2061 }
2062 EXPORT_SYMBOL_GPL(dm_table_device_name);
2063
2064 void dm_table_run_md_queue_async(struct dm_table *t)
2065 {
2066         struct mapped_device *md;
2067         struct request_queue *queue;
2068
2069         if (!dm_table_request_based(t))
2070                 return;
2071
2072         md = dm_table_get_md(t);
2073         queue = dm_get_md_queue(md);
2074         if (queue)
2075                 blk_mq_run_hw_queues(queue, true);
2076 }
2077 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2078