Merge tag 'gpio-v5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6-microblaze.git] / drivers / md / dm-table.c
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-core.h"
9
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
21 #include <linux/blk-mq.h>
22 #include <linux/mount.h>
23 #include <linux/dax.h>
24
25 #define DM_MSG_PREFIX "table"
26
27 #define MAX_DEPTH 16
28 #define NODE_SIZE L1_CACHE_BYTES
29 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
30 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
31
32 struct dm_table {
33         struct mapped_device *md;
34         enum dm_queue_mode type;
35
36         /* btree table */
37         unsigned int depth;
38         unsigned int counts[MAX_DEPTH]; /* in nodes */
39         sector_t *index[MAX_DEPTH];
40
41         unsigned int num_targets;
42         unsigned int num_allocated;
43         sector_t *highs;
44         struct dm_target *targets;
45
46         struct target_type *immutable_target_type;
47
48         bool integrity_supported:1;
49         bool singleton:1;
50         unsigned integrity_added:1;
51
52         /*
53          * Indicates the rw permissions for the new logical
54          * device.  This should be a combination of FMODE_READ
55          * and FMODE_WRITE.
56          */
57         fmode_t mode;
58
59         /* a list of devices used by this table */
60         struct list_head devices;
61
62         /* events get handed up using this callback */
63         void (*event_fn)(void *);
64         void *event_context;
65
66         struct dm_md_mempools *mempools;
67 };
68
69 /*
70  * Similar to ceiling(log_size(n))
71  */
72 static unsigned int int_log(unsigned int n, unsigned int base)
73 {
74         int result = 0;
75
76         while (n > 1) {
77                 n = dm_div_up(n, base);
78                 result++;
79         }
80
81         return result;
82 }
83
84 /*
85  * Calculate the index of the child node of the n'th node k'th key.
86  */
87 static inline unsigned int get_child(unsigned int n, unsigned int k)
88 {
89         return (n * CHILDREN_PER_NODE) + k;
90 }
91
92 /*
93  * Return the n'th node of level l from table t.
94  */
95 static inline sector_t *get_node(struct dm_table *t,
96                                  unsigned int l, unsigned int n)
97 {
98         return t->index[l] + (n * KEYS_PER_NODE);
99 }
100
101 /*
102  * Return the highest key that you could lookup from the n'th
103  * node on level l of the btree.
104  */
105 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
106 {
107         for (; l < t->depth - 1; l++)
108                 n = get_child(n, CHILDREN_PER_NODE - 1);
109
110         if (n >= t->counts[l])
111                 return (sector_t) - 1;
112
113         return get_node(t, l, n)[KEYS_PER_NODE - 1];
114 }
115
116 /*
117  * Fills in a level of the btree based on the highs of the level
118  * below it.
119  */
120 static int setup_btree_index(unsigned int l, struct dm_table *t)
121 {
122         unsigned int n, k;
123         sector_t *node;
124
125         for (n = 0U; n < t->counts[l]; n++) {
126                 node = get_node(t, l, n);
127
128                 for (k = 0U; k < KEYS_PER_NODE; k++)
129                         node[k] = high(t, l + 1, get_child(n, k));
130         }
131
132         return 0;
133 }
134
135 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
136 {
137         unsigned long size;
138         void *addr;
139
140         /*
141          * Check that we're not going to overflow.
142          */
143         if (nmemb > (ULONG_MAX / elem_size))
144                 return NULL;
145
146         size = nmemb * elem_size;
147         addr = vzalloc(size);
148
149         return addr;
150 }
151 EXPORT_SYMBOL(dm_vcalloc);
152
153 /*
154  * highs, and targets are managed as dynamic arrays during a
155  * table load.
156  */
157 static int alloc_targets(struct dm_table *t, unsigned int num)
158 {
159         sector_t *n_highs;
160         struct dm_target *n_targets;
161
162         /*
163          * Allocate both the target array and offset array at once.
164          */
165         n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
166                                           sizeof(sector_t));
167         if (!n_highs)
168                 return -ENOMEM;
169
170         n_targets = (struct dm_target *) (n_highs + num);
171
172         memset(n_highs, -1, sizeof(*n_highs) * num);
173         vfree(t->highs);
174
175         t->num_allocated = num;
176         t->highs = n_highs;
177         t->targets = n_targets;
178
179         return 0;
180 }
181
182 int dm_table_create(struct dm_table **result, fmode_t mode,
183                     unsigned num_targets, struct mapped_device *md)
184 {
185         struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
186
187         if (!t)
188                 return -ENOMEM;
189
190         INIT_LIST_HEAD(&t->devices);
191
192         if (!num_targets)
193                 num_targets = KEYS_PER_NODE;
194
195         num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
196
197         if (!num_targets) {
198                 kfree(t);
199                 return -ENOMEM;
200         }
201
202         if (alloc_targets(t, num_targets)) {
203                 kfree(t);
204                 return -ENOMEM;
205         }
206
207         t->type = DM_TYPE_NONE;
208         t->mode = mode;
209         t->md = md;
210         *result = t;
211         return 0;
212 }
213
214 static void free_devices(struct list_head *devices, struct mapped_device *md)
215 {
216         struct list_head *tmp, *next;
217
218         list_for_each_safe(tmp, next, devices) {
219                 struct dm_dev_internal *dd =
220                     list_entry(tmp, struct dm_dev_internal, list);
221                 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
222                        dm_device_name(md), dd->dm_dev->name);
223                 dm_put_table_device(md, dd->dm_dev);
224                 kfree(dd);
225         }
226 }
227
228 void dm_table_destroy(struct dm_table *t)
229 {
230         unsigned int i;
231
232         if (!t)
233                 return;
234
235         /* free the indexes */
236         if (t->depth >= 2)
237                 vfree(t->index[t->depth - 2]);
238
239         /* free the targets */
240         for (i = 0; i < t->num_targets; i++) {
241                 struct dm_target *tgt = t->targets + i;
242
243                 if (tgt->type->dtr)
244                         tgt->type->dtr(tgt);
245
246                 dm_put_target_type(tgt->type);
247         }
248
249         vfree(t->highs);
250
251         /* free the device list */
252         free_devices(&t->devices, t->md);
253
254         dm_free_md_mempools(t->mempools);
255
256         kfree(t);
257 }
258
259 /*
260  * See if we've already got a device in the list.
261  */
262 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
263 {
264         struct dm_dev_internal *dd;
265
266         list_for_each_entry (dd, l, list)
267                 if (dd->dm_dev->bdev->bd_dev == dev)
268                         return dd;
269
270         return NULL;
271 }
272
273 /*
274  * If possible, this checks an area of a destination device is invalid.
275  */
276 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
277                                   sector_t start, sector_t len, void *data)
278 {
279         struct queue_limits *limits = data;
280         struct block_device *bdev = dev->bdev;
281         sector_t dev_size =
282                 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
283         unsigned short logical_block_size_sectors =
284                 limits->logical_block_size >> SECTOR_SHIFT;
285         char b[BDEVNAME_SIZE];
286
287         if (!dev_size)
288                 return 0;
289
290         if ((start >= dev_size) || (start + len > dev_size)) {
291                 DMWARN("%s: %s too small for target: "
292                        "start=%llu, len=%llu, dev_size=%llu",
293                        dm_device_name(ti->table->md), bdevname(bdev, b),
294                        (unsigned long long)start,
295                        (unsigned long long)len,
296                        (unsigned long long)dev_size);
297                 return 1;
298         }
299
300         /*
301          * If the target is mapped to zoned block device(s), check
302          * that the zones are not partially mapped.
303          */
304         if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
305                 unsigned int zone_sectors = bdev_zone_sectors(bdev);
306
307                 if (start & (zone_sectors - 1)) {
308                         DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
309                                dm_device_name(ti->table->md),
310                                (unsigned long long)start,
311                                zone_sectors, bdevname(bdev, b));
312                         return 1;
313                 }
314
315                 /*
316                  * Note: The last zone of a zoned block device may be smaller
317                  * than other zones. So for a target mapping the end of a
318                  * zoned block device with such a zone, len would not be zone
319                  * aligned. We do not allow such last smaller zone to be part
320                  * of the mapping here to ensure that mappings with multiple
321                  * devices do not end up with a smaller zone in the middle of
322                  * the sector range.
323                  */
324                 if (len & (zone_sectors - 1)) {
325                         DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
326                                dm_device_name(ti->table->md),
327                                (unsigned long long)len,
328                                zone_sectors, bdevname(bdev, b));
329                         return 1;
330                 }
331         }
332
333         if (logical_block_size_sectors <= 1)
334                 return 0;
335
336         if (start & (logical_block_size_sectors - 1)) {
337                 DMWARN("%s: start=%llu not aligned to h/w "
338                        "logical block size %u of %s",
339                        dm_device_name(ti->table->md),
340                        (unsigned long long)start,
341                        limits->logical_block_size, bdevname(bdev, b));
342                 return 1;
343         }
344
345         if (len & (logical_block_size_sectors - 1)) {
346                 DMWARN("%s: len=%llu not aligned to h/w "
347                        "logical block size %u of %s",
348                        dm_device_name(ti->table->md),
349                        (unsigned long long)len,
350                        limits->logical_block_size, bdevname(bdev, b));
351                 return 1;
352         }
353
354         return 0;
355 }
356
357 /*
358  * This upgrades the mode on an already open dm_dev, being
359  * careful to leave things as they were if we fail to reopen the
360  * device and not to touch the existing bdev field in case
361  * it is accessed concurrently.
362  */
363 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
364                         struct mapped_device *md)
365 {
366         int r;
367         struct dm_dev *old_dev, *new_dev;
368
369         old_dev = dd->dm_dev;
370
371         r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
372                                 dd->dm_dev->mode | new_mode, &new_dev);
373         if (r)
374                 return r;
375
376         dd->dm_dev = new_dev;
377         dm_put_table_device(md, old_dev);
378
379         return 0;
380 }
381
382 /*
383  * Convert the path to a device
384  */
385 dev_t dm_get_dev_t(const char *path)
386 {
387         dev_t dev;
388         struct block_device *bdev;
389
390         bdev = lookup_bdev(path);
391         if (IS_ERR(bdev))
392                 dev = name_to_dev_t(path);
393         else {
394                 dev = bdev->bd_dev;
395                 bdput(bdev);
396         }
397
398         return dev;
399 }
400 EXPORT_SYMBOL_GPL(dm_get_dev_t);
401
402 /*
403  * Add a device to the list, or just increment the usage count if
404  * it's already present.
405  */
406 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
407                   struct dm_dev **result)
408 {
409         int r;
410         dev_t dev;
411         struct dm_dev_internal *dd;
412         struct dm_table *t = ti->table;
413
414         BUG_ON(!t);
415
416         dev = dm_get_dev_t(path);
417         if (!dev)
418                 return -ENODEV;
419
420         dd = find_device(&t->devices, dev);
421         if (!dd) {
422                 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
423                 if (!dd)
424                         return -ENOMEM;
425
426                 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
427                         kfree(dd);
428                         return r;
429                 }
430
431                 refcount_set(&dd->count, 1);
432                 list_add(&dd->list, &t->devices);
433                 goto out;
434
435         } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
436                 r = upgrade_mode(dd, mode, t->md);
437                 if (r)
438                         return r;
439         }
440         refcount_inc(&dd->count);
441 out:
442         *result = dd->dm_dev;
443         return 0;
444 }
445 EXPORT_SYMBOL(dm_get_device);
446
447 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
448                                 sector_t start, sector_t len, void *data)
449 {
450         struct queue_limits *limits = data;
451         struct block_device *bdev = dev->bdev;
452         struct request_queue *q = bdev_get_queue(bdev);
453         char b[BDEVNAME_SIZE];
454
455         if (unlikely(!q)) {
456                 DMWARN("%s: Cannot set limits for nonexistent device %s",
457                        dm_device_name(ti->table->md), bdevname(bdev, b));
458                 return 0;
459         }
460
461         if (blk_stack_limits(limits, &q->limits,
462                         get_start_sect(bdev) + start) < 0)
463                 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
464                        "physical_block_size=%u, logical_block_size=%u, "
465                        "alignment_offset=%u, start=%llu",
466                        dm_device_name(ti->table->md), bdevname(bdev, b),
467                        q->limits.physical_block_size,
468                        q->limits.logical_block_size,
469                        q->limits.alignment_offset,
470                        (unsigned long long) start << SECTOR_SHIFT);
471         return 0;
472 }
473
474 /*
475  * Decrement a device's use count and remove it if necessary.
476  */
477 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
478 {
479         int found = 0;
480         struct list_head *devices = &ti->table->devices;
481         struct dm_dev_internal *dd;
482
483         list_for_each_entry(dd, devices, list) {
484                 if (dd->dm_dev == d) {
485                         found = 1;
486                         break;
487                 }
488         }
489         if (!found) {
490                 DMWARN("%s: device %s not in table devices list",
491                        dm_device_name(ti->table->md), d->name);
492                 return;
493         }
494         if (refcount_dec_and_test(&dd->count)) {
495                 dm_put_table_device(ti->table->md, d);
496                 list_del(&dd->list);
497                 kfree(dd);
498         }
499 }
500 EXPORT_SYMBOL(dm_put_device);
501
502 /*
503  * Checks to see if the target joins onto the end of the table.
504  */
505 static int adjoin(struct dm_table *table, struct dm_target *ti)
506 {
507         struct dm_target *prev;
508
509         if (!table->num_targets)
510                 return !ti->begin;
511
512         prev = &table->targets[table->num_targets - 1];
513         return (ti->begin == (prev->begin + prev->len));
514 }
515
516 /*
517  * Used to dynamically allocate the arg array.
518  *
519  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
520  * process messages even if some device is suspended. These messages have a
521  * small fixed number of arguments.
522  *
523  * On the other hand, dm-switch needs to process bulk data using messages and
524  * excessive use of GFP_NOIO could cause trouble.
525  */
526 static char **realloc_argv(unsigned *size, char **old_argv)
527 {
528         char **argv;
529         unsigned new_size;
530         gfp_t gfp;
531
532         if (*size) {
533                 new_size = *size * 2;
534                 gfp = GFP_KERNEL;
535         } else {
536                 new_size = 8;
537                 gfp = GFP_NOIO;
538         }
539         argv = kmalloc_array(new_size, sizeof(*argv), gfp);
540         if (argv && old_argv) {
541                 memcpy(argv, old_argv, *size * sizeof(*argv));
542                 *size = new_size;
543         }
544
545         kfree(old_argv);
546         return argv;
547 }
548
549 /*
550  * Destructively splits up the argument list to pass to ctr.
551  */
552 int dm_split_args(int *argc, char ***argvp, char *input)
553 {
554         char *start, *end = input, *out, **argv = NULL;
555         unsigned array_size = 0;
556
557         *argc = 0;
558
559         if (!input) {
560                 *argvp = NULL;
561                 return 0;
562         }
563
564         argv = realloc_argv(&array_size, argv);
565         if (!argv)
566                 return -ENOMEM;
567
568         while (1) {
569                 /* Skip whitespace */
570                 start = skip_spaces(end);
571
572                 if (!*start)
573                         break;  /* success, we hit the end */
574
575                 /* 'out' is used to remove any back-quotes */
576                 end = out = start;
577                 while (*end) {
578                         /* Everything apart from '\0' can be quoted */
579                         if (*end == '\\' && *(end + 1)) {
580                                 *out++ = *(end + 1);
581                                 end += 2;
582                                 continue;
583                         }
584
585                         if (isspace(*end))
586                                 break;  /* end of token */
587
588                         *out++ = *end++;
589                 }
590
591                 /* have we already filled the array ? */
592                 if ((*argc + 1) > array_size) {
593                         argv = realloc_argv(&array_size, argv);
594                         if (!argv)
595                                 return -ENOMEM;
596                 }
597
598                 /* we know this is whitespace */
599                 if (*end)
600                         end++;
601
602                 /* terminate the string and put it in the array */
603                 *out = '\0';
604                 argv[*argc] = start;
605                 (*argc)++;
606         }
607
608         *argvp = argv;
609         return 0;
610 }
611
612 /*
613  * Impose necessary and sufficient conditions on a devices's table such
614  * that any incoming bio which respects its logical_block_size can be
615  * processed successfully.  If it falls across the boundary between
616  * two or more targets, the size of each piece it gets split into must
617  * be compatible with the logical_block_size of the target processing it.
618  */
619 static int validate_hardware_logical_block_alignment(struct dm_table *table,
620                                                  struct queue_limits *limits)
621 {
622         /*
623          * This function uses arithmetic modulo the logical_block_size
624          * (in units of 512-byte sectors).
625          */
626         unsigned short device_logical_block_size_sects =
627                 limits->logical_block_size >> SECTOR_SHIFT;
628
629         /*
630          * Offset of the start of the next table entry, mod logical_block_size.
631          */
632         unsigned short next_target_start = 0;
633
634         /*
635          * Given an aligned bio that extends beyond the end of a
636          * target, how many sectors must the next target handle?
637          */
638         unsigned short remaining = 0;
639
640         struct dm_target *ti;
641         struct queue_limits ti_limits;
642         unsigned i;
643
644         /*
645          * Check each entry in the table in turn.
646          */
647         for (i = 0; i < dm_table_get_num_targets(table); i++) {
648                 ti = dm_table_get_target(table, i);
649
650                 blk_set_stacking_limits(&ti_limits);
651
652                 /* combine all target devices' limits */
653                 if (ti->type->iterate_devices)
654                         ti->type->iterate_devices(ti, dm_set_device_limits,
655                                                   &ti_limits);
656
657                 /*
658                  * If the remaining sectors fall entirely within this
659                  * table entry are they compatible with its logical_block_size?
660                  */
661                 if (remaining < ti->len &&
662                     remaining & ((ti_limits.logical_block_size >>
663                                   SECTOR_SHIFT) - 1))
664                         break;  /* Error */
665
666                 next_target_start =
667                     (unsigned short) ((next_target_start + ti->len) &
668                                       (device_logical_block_size_sects - 1));
669                 remaining = next_target_start ?
670                     device_logical_block_size_sects - next_target_start : 0;
671         }
672
673         if (remaining) {
674                 DMWARN("%s: table line %u (start sect %llu len %llu) "
675                        "not aligned to h/w logical block size %u",
676                        dm_device_name(table->md), i,
677                        (unsigned long long) ti->begin,
678                        (unsigned long long) ti->len,
679                        limits->logical_block_size);
680                 return -EINVAL;
681         }
682
683         return 0;
684 }
685
686 int dm_table_add_target(struct dm_table *t, const char *type,
687                         sector_t start, sector_t len, char *params)
688 {
689         int r = -EINVAL, argc;
690         char **argv;
691         struct dm_target *tgt;
692
693         if (t->singleton) {
694                 DMERR("%s: target type %s must appear alone in table",
695                       dm_device_name(t->md), t->targets->type->name);
696                 return -EINVAL;
697         }
698
699         BUG_ON(t->num_targets >= t->num_allocated);
700
701         tgt = t->targets + t->num_targets;
702         memset(tgt, 0, sizeof(*tgt));
703
704         if (!len) {
705                 DMERR("%s: zero-length target", dm_device_name(t->md));
706                 return -EINVAL;
707         }
708
709         tgt->type = dm_get_target_type(type);
710         if (!tgt->type) {
711                 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
712                 return -EINVAL;
713         }
714
715         if (dm_target_needs_singleton(tgt->type)) {
716                 if (t->num_targets) {
717                         tgt->error = "singleton target type must appear alone in table";
718                         goto bad;
719                 }
720                 t->singleton = true;
721         }
722
723         if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
724                 tgt->error = "target type may not be included in a read-only table";
725                 goto bad;
726         }
727
728         if (t->immutable_target_type) {
729                 if (t->immutable_target_type != tgt->type) {
730                         tgt->error = "immutable target type cannot be mixed with other target types";
731                         goto bad;
732                 }
733         } else if (dm_target_is_immutable(tgt->type)) {
734                 if (t->num_targets) {
735                         tgt->error = "immutable target type cannot be mixed with other target types";
736                         goto bad;
737                 }
738                 t->immutable_target_type = tgt->type;
739         }
740
741         if (dm_target_has_integrity(tgt->type))
742                 t->integrity_added = 1;
743
744         tgt->table = t;
745         tgt->begin = start;
746         tgt->len = len;
747         tgt->error = "Unknown error";
748
749         /*
750          * Does this target adjoin the previous one ?
751          */
752         if (!adjoin(t, tgt)) {
753                 tgt->error = "Gap in table";
754                 goto bad;
755         }
756
757         r = dm_split_args(&argc, &argv, params);
758         if (r) {
759                 tgt->error = "couldn't split parameters (insufficient memory)";
760                 goto bad;
761         }
762
763         r = tgt->type->ctr(tgt, argc, argv);
764         kfree(argv);
765         if (r)
766                 goto bad;
767
768         t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
769
770         if (!tgt->num_discard_bios && tgt->discards_supported)
771                 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
772                        dm_device_name(t->md), type);
773
774         return 0;
775
776  bad:
777         DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
778         dm_put_target_type(tgt->type);
779         return r;
780 }
781
782 /*
783  * Target argument parsing helpers.
784  */
785 static int validate_next_arg(const struct dm_arg *arg,
786                              struct dm_arg_set *arg_set,
787                              unsigned *value, char **error, unsigned grouped)
788 {
789         const char *arg_str = dm_shift_arg(arg_set);
790         char dummy;
791
792         if (!arg_str ||
793             (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
794             (*value < arg->min) ||
795             (*value > arg->max) ||
796             (grouped && arg_set->argc < *value)) {
797                 *error = arg->error;
798                 return -EINVAL;
799         }
800
801         return 0;
802 }
803
804 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
805                 unsigned *value, char **error)
806 {
807         return validate_next_arg(arg, arg_set, value, error, 0);
808 }
809 EXPORT_SYMBOL(dm_read_arg);
810
811 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
812                       unsigned *value, char **error)
813 {
814         return validate_next_arg(arg, arg_set, value, error, 1);
815 }
816 EXPORT_SYMBOL(dm_read_arg_group);
817
818 const char *dm_shift_arg(struct dm_arg_set *as)
819 {
820         char *r;
821
822         if (as->argc) {
823                 as->argc--;
824                 r = *as->argv;
825                 as->argv++;
826                 return r;
827         }
828
829         return NULL;
830 }
831 EXPORT_SYMBOL(dm_shift_arg);
832
833 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
834 {
835         BUG_ON(as->argc < num_args);
836         as->argc -= num_args;
837         as->argv += num_args;
838 }
839 EXPORT_SYMBOL(dm_consume_args);
840
841 static bool __table_type_bio_based(enum dm_queue_mode table_type)
842 {
843         return (table_type == DM_TYPE_BIO_BASED ||
844                 table_type == DM_TYPE_DAX_BIO_BASED ||
845                 table_type == DM_TYPE_NVME_BIO_BASED);
846 }
847
848 static bool __table_type_request_based(enum dm_queue_mode table_type)
849 {
850         return table_type == DM_TYPE_REQUEST_BASED;
851 }
852
853 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
854 {
855         t->type = type;
856 }
857 EXPORT_SYMBOL_GPL(dm_table_set_type);
858
859 /* validate the dax capability of the target device span */
860 int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
861                         sector_t start, sector_t len, void *data)
862 {
863         int blocksize = *(int *) data, id;
864         bool rc;
865
866         id = dax_read_lock();
867         rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
868         dax_read_unlock(id);
869
870         return rc;
871 }
872
873 /* Check devices support synchronous DAX */
874 static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
875                                   sector_t start, sector_t len, void *data)
876 {
877         return dev->dax_dev && dax_synchronous(dev->dax_dev);
878 }
879
880 bool dm_table_supports_dax(struct dm_table *t,
881                            iterate_devices_callout_fn iterate_fn, int *blocksize)
882 {
883         struct dm_target *ti;
884         unsigned i;
885
886         /* Ensure that all targets support DAX. */
887         for (i = 0; i < dm_table_get_num_targets(t); i++) {
888                 ti = dm_table_get_target(t, i);
889
890                 if (!ti->type->direct_access)
891                         return false;
892
893                 if (!ti->type->iterate_devices ||
894                     !ti->type->iterate_devices(ti, iterate_fn, blocksize))
895                         return false;
896         }
897
898         return true;
899 }
900
901 static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
902
903 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
904                                   sector_t start, sector_t len, void *data)
905 {
906         struct block_device *bdev = dev->bdev;
907         struct request_queue *q = bdev_get_queue(bdev);
908
909         /* request-based cannot stack on partitions! */
910         if (bdev != bdev->bd_contains)
911                 return false;
912
913         return queue_is_mq(q);
914 }
915
916 static int dm_table_determine_type(struct dm_table *t)
917 {
918         unsigned i;
919         unsigned bio_based = 0, request_based = 0, hybrid = 0;
920         struct dm_target *tgt;
921         struct list_head *devices = dm_table_get_devices(t);
922         enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
923         int page_size = PAGE_SIZE;
924
925         if (t->type != DM_TYPE_NONE) {
926                 /* target already set the table's type */
927                 if (t->type == DM_TYPE_BIO_BASED) {
928                         /* possibly upgrade to a variant of bio-based */
929                         goto verify_bio_based;
930                 }
931                 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
932                 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
933                 goto verify_rq_based;
934         }
935
936         for (i = 0; i < t->num_targets; i++) {
937                 tgt = t->targets + i;
938                 if (dm_target_hybrid(tgt))
939                         hybrid = 1;
940                 else if (dm_target_request_based(tgt))
941                         request_based = 1;
942                 else
943                         bio_based = 1;
944
945                 if (bio_based && request_based) {
946                         DMERR("Inconsistent table: different target types"
947                               " can't be mixed up");
948                         return -EINVAL;
949                 }
950         }
951
952         if (hybrid && !bio_based && !request_based) {
953                 /*
954                  * The targets can work either way.
955                  * Determine the type from the live device.
956                  * Default to bio-based if device is new.
957                  */
958                 if (__table_type_request_based(live_md_type))
959                         request_based = 1;
960                 else
961                         bio_based = 1;
962         }
963
964         if (bio_based) {
965 verify_bio_based:
966                 /* We must use this table as bio-based */
967                 t->type = DM_TYPE_BIO_BASED;
968                 if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
969                     (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
970                         t->type = DM_TYPE_DAX_BIO_BASED;
971                 } else {
972                         /* Check if upgrading to NVMe bio-based is valid or required */
973                         tgt = dm_table_get_immutable_target(t);
974                         if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
975                                 t->type = DM_TYPE_NVME_BIO_BASED;
976                                 goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
977                         } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
978                                 t->type = DM_TYPE_NVME_BIO_BASED;
979                         }
980                 }
981                 return 0;
982         }
983
984         BUG_ON(!request_based); /* No targets in this table */
985
986         t->type = DM_TYPE_REQUEST_BASED;
987
988 verify_rq_based:
989         /*
990          * Request-based dm supports only tables that have a single target now.
991          * To support multiple targets, request splitting support is needed,
992          * and that needs lots of changes in the block-layer.
993          * (e.g. request completion process for partial completion.)
994          */
995         if (t->num_targets > 1) {
996                 DMERR("%s DM doesn't support multiple targets",
997                       t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
998                 return -EINVAL;
999         }
1000
1001         if (list_empty(devices)) {
1002                 int srcu_idx;
1003                 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
1004
1005                 /* inherit live table's type */
1006                 if (live_table)
1007                         t->type = live_table->type;
1008                 dm_put_live_table(t->md, srcu_idx);
1009                 return 0;
1010         }
1011
1012         tgt = dm_table_get_immutable_target(t);
1013         if (!tgt) {
1014                 DMERR("table load rejected: immutable target is required");
1015                 return -EINVAL;
1016         } else if (tgt->max_io_len) {
1017                 DMERR("table load rejected: immutable target that splits IO is not supported");
1018                 return -EINVAL;
1019         }
1020
1021         /* Non-request-stackable devices can't be used for request-based dm */
1022         if (!tgt->type->iterate_devices ||
1023             !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
1024                 DMERR("table load rejected: including non-request-stackable devices");
1025                 return -EINVAL;
1026         }
1027
1028         return 0;
1029 }
1030
1031 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
1032 {
1033         return t->type;
1034 }
1035
1036 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
1037 {
1038         return t->immutable_target_type;
1039 }
1040
1041 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
1042 {
1043         /* Immutable target is implicitly a singleton */
1044         if (t->num_targets > 1 ||
1045             !dm_target_is_immutable(t->targets[0].type))
1046                 return NULL;
1047
1048         return t->targets;
1049 }
1050
1051 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1052 {
1053         struct dm_target *ti;
1054         unsigned i;
1055
1056         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1057                 ti = dm_table_get_target(t, i);
1058                 if (dm_target_is_wildcard(ti->type))
1059                         return ti;
1060         }
1061
1062         return NULL;
1063 }
1064
1065 bool dm_table_bio_based(struct dm_table *t)
1066 {
1067         return __table_type_bio_based(dm_table_get_type(t));
1068 }
1069
1070 bool dm_table_request_based(struct dm_table *t)
1071 {
1072         return __table_type_request_based(dm_table_get_type(t));
1073 }
1074
1075 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1076 {
1077         enum dm_queue_mode type = dm_table_get_type(t);
1078         unsigned per_io_data_size = 0;
1079         unsigned min_pool_size = 0;
1080         struct dm_target *ti;
1081         unsigned i;
1082
1083         if (unlikely(type == DM_TYPE_NONE)) {
1084                 DMWARN("no table type is set, can't allocate mempools");
1085                 return -EINVAL;
1086         }
1087
1088         if (__table_type_bio_based(type))
1089                 for (i = 0; i < t->num_targets; i++) {
1090                         ti = t->targets + i;
1091                         per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1092                         min_pool_size = max(min_pool_size, ti->num_flush_bios);
1093                 }
1094
1095         t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1096                                            per_io_data_size, min_pool_size);
1097         if (!t->mempools)
1098                 return -ENOMEM;
1099
1100         return 0;
1101 }
1102
1103 void dm_table_free_md_mempools(struct dm_table *t)
1104 {
1105         dm_free_md_mempools(t->mempools);
1106         t->mempools = NULL;
1107 }
1108
1109 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1110 {
1111         return t->mempools;
1112 }
1113
1114 static int setup_indexes(struct dm_table *t)
1115 {
1116         int i;
1117         unsigned int total = 0;
1118         sector_t *indexes;
1119
1120         /* allocate the space for *all* the indexes */
1121         for (i = t->depth - 2; i >= 0; i--) {
1122                 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1123                 total += t->counts[i];
1124         }
1125
1126         indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1127         if (!indexes)
1128                 return -ENOMEM;
1129
1130         /* set up internal nodes, bottom-up */
1131         for (i = t->depth - 2; i >= 0; i--) {
1132                 t->index[i] = indexes;
1133                 indexes += (KEYS_PER_NODE * t->counts[i]);
1134                 setup_btree_index(i, t);
1135         }
1136
1137         return 0;
1138 }
1139
1140 /*
1141  * Builds the btree to index the map.
1142  */
1143 static int dm_table_build_index(struct dm_table *t)
1144 {
1145         int r = 0;
1146         unsigned int leaf_nodes;
1147
1148         /* how many indexes will the btree have ? */
1149         leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1150         t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1151
1152         /* leaf layer has already been set up */
1153         t->counts[t->depth - 1] = leaf_nodes;
1154         t->index[t->depth - 1] = t->highs;
1155
1156         if (t->depth >= 2)
1157                 r = setup_indexes(t);
1158
1159         return r;
1160 }
1161
1162 static bool integrity_profile_exists(struct gendisk *disk)
1163 {
1164         return !!blk_get_integrity(disk);
1165 }
1166
1167 /*
1168  * Get a disk whose integrity profile reflects the table's profile.
1169  * Returns NULL if integrity support was inconsistent or unavailable.
1170  */
1171 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1172 {
1173         struct list_head *devices = dm_table_get_devices(t);
1174         struct dm_dev_internal *dd = NULL;
1175         struct gendisk *prev_disk = NULL, *template_disk = NULL;
1176         unsigned i;
1177
1178         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1179                 struct dm_target *ti = dm_table_get_target(t, i);
1180                 if (!dm_target_passes_integrity(ti->type))
1181                         goto no_integrity;
1182         }
1183
1184         list_for_each_entry(dd, devices, list) {
1185                 template_disk = dd->dm_dev->bdev->bd_disk;
1186                 if (!integrity_profile_exists(template_disk))
1187                         goto no_integrity;
1188                 else if (prev_disk &&
1189                          blk_integrity_compare(prev_disk, template_disk) < 0)
1190                         goto no_integrity;
1191                 prev_disk = template_disk;
1192         }
1193
1194         return template_disk;
1195
1196 no_integrity:
1197         if (prev_disk)
1198                 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1199                        dm_device_name(t->md),
1200                        prev_disk->disk_name,
1201                        template_disk->disk_name);
1202         return NULL;
1203 }
1204
1205 /*
1206  * Register the mapped device for blk_integrity support if the
1207  * underlying devices have an integrity profile.  But all devices may
1208  * not have matching profiles (checking all devices isn't reliable
1209  * during table load because this table may use other DM device(s) which
1210  * must be resumed before they will have an initialized integity
1211  * profile).  Consequently, stacked DM devices force a 2 stage integrity
1212  * profile validation: First pass during table load, final pass during
1213  * resume.
1214  */
1215 static int dm_table_register_integrity(struct dm_table *t)
1216 {
1217         struct mapped_device *md = t->md;
1218         struct gendisk *template_disk = NULL;
1219
1220         /* If target handles integrity itself do not register it here. */
1221         if (t->integrity_added)
1222                 return 0;
1223
1224         template_disk = dm_table_get_integrity_disk(t);
1225         if (!template_disk)
1226                 return 0;
1227
1228         if (!integrity_profile_exists(dm_disk(md))) {
1229                 t->integrity_supported = true;
1230                 /*
1231                  * Register integrity profile during table load; we can do
1232                  * this because the final profile must match during resume.
1233                  */
1234                 blk_integrity_register(dm_disk(md),
1235                                        blk_get_integrity(template_disk));
1236                 return 0;
1237         }
1238
1239         /*
1240          * If DM device already has an initialized integrity
1241          * profile the new profile should not conflict.
1242          */
1243         if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1244                 DMWARN("%s: conflict with existing integrity profile: "
1245                        "%s profile mismatch",
1246                        dm_device_name(t->md),
1247                        template_disk->disk_name);
1248                 return 1;
1249         }
1250
1251         /* Preserve existing integrity profile */
1252         t->integrity_supported = true;
1253         return 0;
1254 }
1255
1256 /*
1257  * Prepares the table for use by building the indices,
1258  * setting the type, and allocating mempools.
1259  */
1260 int dm_table_complete(struct dm_table *t)
1261 {
1262         int r;
1263
1264         r = dm_table_determine_type(t);
1265         if (r) {
1266                 DMERR("unable to determine table type");
1267                 return r;
1268         }
1269
1270         r = dm_table_build_index(t);
1271         if (r) {
1272                 DMERR("unable to build btrees");
1273                 return r;
1274         }
1275
1276         r = dm_table_register_integrity(t);
1277         if (r) {
1278                 DMERR("could not register integrity profile.");
1279                 return r;
1280         }
1281
1282         r = dm_table_alloc_md_mempools(t, t->md);
1283         if (r)
1284                 DMERR("unable to allocate mempools");
1285
1286         return r;
1287 }
1288
1289 static DEFINE_MUTEX(_event_lock);
1290 void dm_table_event_callback(struct dm_table *t,
1291                              void (*fn)(void *), void *context)
1292 {
1293         mutex_lock(&_event_lock);
1294         t->event_fn = fn;
1295         t->event_context = context;
1296         mutex_unlock(&_event_lock);
1297 }
1298
1299 void dm_table_event(struct dm_table *t)
1300 {
1301         /*
1302          * You can no longer call dm_table_event() from interrupt
1303          * context, use a bottom half instead.
1304          */
1305         BUG_ON(in_interrupt());
1306
1307         mutex_lock(&_event_lock);
1308         if (t->event_fn)
1309                 t->event_fn(t->event_context);
1310         mutex_unlock(&_event_lock);
1311 }
1312 EXPORT_SYMBOL(dm_table_event);
1313
1314 inline sector_t dm_table_get_size(struct dm_table *t)
1315 {
1316         return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1317 }
1318 EXPORT_SYMBOL(dm_table_get_size);
1319
1320 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1321 {
1322         if (index >= t->num_targets)
1323                 return NULL;
1324
1325         return t->targets + index;
1326 }
1327
1328 /*
1329  * Search the btree for the correct target.
1330  *
1331  * Caller should check returned pointer for NULL
1332  * to trap I/O beyond end of device.
1333  */
1334 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1335 {
1336         unsigned int l, n = 0, k = 0;
1337         sector_t *node;
1338
1339         if (unlikely(sector >= dm_table_get_size(t)))
1340                 return NULL;
1341
1342         for (l = 0; l < t->depth; l++) {
1343                 n = get_child(n, k);
1344                 node = get_node(t, l, n);
1345
1346                 for (k = 0; k < KEYS_PER_NODE; k++)
1347                         if (node[k] >= sector)
1348                                 break;
1349         }
1350
1351         return &t->targets[(KEYS_PER_NODE * n) + k];
1352 }
1353
1354 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1355                         sector_t start, sector_t len, void *data)
1356 {
1357         unsigned *num_devices = data;
1358
1359         (*num_devices)++;
1360
1361         return 0;
1362 }
1363
1364 /*
1365  * Check whether a table has no data devices attached using each
1366  * target's iterate_devices method.
1367  * Returns false if the result is unknown because a target doesn't
1368  * support iterate_devices.
1369  */
1370 bool dm_table_has_no_data_devices(struct dm_table *table)
1371 {
1372         struct dm_target *ti;
1373         unsigned i, num_devices;
1374
1375         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1376                 ti = dm_table_get_target(table, i);
1377
1378                 if (!ti->type->iterate_devices)
1379                         return false;
1380
1381                 num_devices = 0;
1382                 ti->type->iterate_devices(ti, count_device, &num_devices);
1383                 if (num_devices)
1384                         return false;
1385         }
1386
1387         return true;
1388 }
1389
1390 static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1391                                  sector_t start, sector_t len, void *data)
1392 {
1393         struct request_queue *q = bdev_get_queue(dev->bdev);
1394         enum blk_zoned_model *zoned_model = data;
1395
1396         return q && blk_queue_zoned_model(q) == *zoned_model;
1397 }
1398
1399 static bool dm_table_supports_zoned_model(struct dm_table *t,
1400                                           enum blk_zoned_model zoned_model)
1401 {
1402         struct dm_target *ti;
1403         unsigned i;
1404
1405         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1406                 ti = dm_table_get_target(t, i);
1407
1408                 if (zoned_model == BLK_ZONED_HM &&
1409                     !dm_target_supports_zoned_hm(ti->type))
1410                         return false;
1411
1412                 if (!ti->type->iterate_devices ||
1413                     !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
1414                         return false;
1415         }
1416
1417         return true;
1418 }
1419
1420 static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1421                                        sector_t start, sector_t len, void *data)
1422 {
1423         struct request_queue *q = bdev_get_queue(dev->bdev);
1424         unsigned int *zone_sectors = data;
1425
1426         return q && blk_queue_zone_sectors(q) == *zone_sectors;
1427 }
1428
1429 static bool dm_table_matches_zone_sectors(struct dm_table *t,
1430                                           unsigned int zone_sectors)
1431 {
1432         struct dm_target *ti;
1433         unsigned i;
1434
1435         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1436                 ti = dm_table_get_target(t, i);
1437
1438                 if (!ti->type->iterate_devices ||
1439                     !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
1440                         return false;
1441         }
1442
1443         return true;
1444 }
1445
1446 static int validate_hardware_zoned_model(struct dm_table *table,
1447                                          enum blk_zoned_model zoned_model,
1448                                          unsigned int zone_sectors)
1449 {
1450         if (zoned_model == BLK_ZONED_NONE)
1451                 return 0;
1452
1453         if (!dm_table_supports_zoned_model(table, zoned_model)) {
1454                 DMERR("%s: zoned model is not consistent across all devices",
1455                       dm_device_name(table->md));
1456                 return -EINVAL;
1457         }
1458
1459         /* Check zone size validity and compatibility */
1460         if (!zone_sectors || !is_power_of_2(zone_sectors))
1461                 return -EINVAL;
1462
1463         if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
1464                 DMERR("%s: zone sectors is not consistent across all devices",
1465                       dm_device_name(table->md));
1466                 return -EINVAL;
1467         }
1468
1469         return 0;
1470 }
1471
1472 /*
1473  * Establish the new table's queue_limits and validate them.
1474  */
1475 int dm_calculate_queue_limits(struct dm_table *table,
1476                               struct queue_limits *limits)
1477 {
1478         struct dm_target *ti;
1479         struct queue_limits ti_limits;
1480         unsigned i;
1481         enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1482         unsigned int zone_sectors = 0;
1483
1484         blk_set_stacking_limits(limits);
1485
1486         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1487                 blk_set_stacking_limits(&ti_limits);
1488
1489                 ti = dm_table_get_target(table, i);
1490
1491                 if (!ti->type->iterate_devices)
1492                         goto combine_limits;
1493
1494                 /*
1495                  * Combine queue limits of all the devices this target uses.
1496                  */
1497                 ti->type->iterate_devices(ti, dm_set_device_limits,
1498                                           &ti_limits);
1499
1500                 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1501                         /*
1502                          * After stacking all limits, validate all devices
1503                          * in table support this zoned model and zone sectors.
1504                          */
1505                         zoned_model = ti_limits.zoned;
1506                         zone_sectors = ti_limits.chunk_sectors;
1507                 }
1508
1509                 /* Set I/O hints portion of queue limits */
1510                 if (ti->type->io_hints)
1511                         ti->type->io_hints(ti, &ti_limits);
1512
1513                 /*
1514                  * Check each device area is consistent with the target's
1515                  * overall queue limits.
1516                  */
1517                 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1518                                               &ti_limits))
1519                         return -EINVAL;
1520
1521 combine_limits:
1522                 /*
1523                  * Merge this target's queue limits into the overall limits
1524                  * for the table.
1525                  */
1526                 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1527                         DMWARN("%s: adding target device "
1528                                "(start sect %llu len %llu) "
1529                                "caused an alignment inconsistency",
1530                                dm_device_name(table->md),
1531                                (unsigned long long) ti->begin,
1532                                (unsigned long long) ti->len);
1533         }
1534
1535         /*
1536          * Verify that the zoned model and zone sectors, as determined before
1537          * any .io_hints override, are the same across all devices in the table.
1538          * - this is especially relevant if .io_hints is emulating a disk-managed
1539          *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1540          * BUT...
1541          */
1542         if (limits->zoned != BLK_ZONED_NONE) {
1543                 /*
1544                  * ...IF the above limits stacking determined a zoned model
1545                  * validate that all of the table's devices conform to it.
1546                  */
1547                 zoned_model = limits->zoned;
1548                 zone_sectors = limits->chunk_sectors;
1549         }
1550         if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1551                 return -EINVAL;
1552
1553         return validate_hardware_logical_block_alignment(table, limits);
1554 }
1555
1556 /*
1557  * Verify that all devices have an integrity profile that matches the
1558  * DM device's registered integrity profile.  If the profiles don't
1559  * match then unregister the DM device's integrity profile.
1560  */
1561 static void dm_table_verify_integrity(struct dm_table *t)
1562 {
1563         struct gendisk *template_disk = NULL;
1564
1565         if (t->integrity_added)
1566                 return;
1567
1568         if (t->integrity_supported) {
1569                 /*
1570                  * Verify that the original integrity profile
1571                  * matches all the devices in this table.
1572                  */
1573                 template_disk = dm_table_get_integrity_disk(t);
1574                 if (template_disk &&
1575                     blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1576                         return;
1577         }
1578
1579         if (integrity_profile_exists(dm_disk(t->md))) {
1580                 DMWARN("%s: unable to establish an integrity profile",
1581                        dm_device_name(t->md));
1582                 blk_integrity_unregister(dm_disk(t->md));
1583         }
1584 }
1585
1586 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1587                                 sector_t start, sector_t len, void *data)
1588 {
1589         unsigned long flush = (unsigned long) data;
1590         struct request_queue *q = bdev_get_queue(dev->bdev);
1591
1592         return q && (q->queue_flags & flush);
1593 }
1594
1595 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1596 {
1597         struct dm_target *ti;
1598         unsigned i;
1599
1600         /*
1601          * Require at least one underlying device to support flushes.
1602          * t->devices includes internal dm devices such as mirror logs
1603          * so we need to use iterate_devices here, which targets
1604          * supporting flushes must provide.
1605          */
1606         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1607                 ti = dm_table_get_target(t, i);
1608
1609                 if (!ti->num_flush_bios)
1610                         continue;
1611
1612                 if (ti->flush_supported)
1613                         return true;
1614
1615                 if (ti->type->iterate_devices &&
1616                     ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1617                         return true;
1618         }
1619
1620         return false;
1621 }
1622
1623 static int device_dax_write_cache_enabled(struct dm_target *ti,
1624                                           struct dm_dev *dev, sector_t start,
1625                                           sector_t len, void *data)
1626 {
1627         struct dax_device *dax_dev = dev->dax_dev;
1628
1629         if (!dax_dev)
1630                 return false;
1631
1632         if (dax_write_cache_enabled(dax_dev))
1633                 return true;
1634         return false;
1635 }
1636
1637 static int dm_table_supports_dax_write_cache(struct dm_table *t)
1638 {
1639         struct dm_target *ti;
1640         unsigned i;
1641
1642         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1643                 ti = dm_table_get_target(t, i);
1644
1645                 if (ti->type->iterate_devices &&
1646                     ti->type->iterate_devices(ti,
1647                                 device_dax_write_cache_enabled, NULL))
1648                         return true;
1649         }
1650
1651         return false;
1652 }
1653
1654 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1655                             sector_t start, sector_t len, void *data)
1656 {
1657         struct request_queue *q = bdev_get_queue(dev->bdev);
1658
1659         return q && blk_queue_nonrot(q);
1660 }
1661
1662 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1663                              sector_t start, sector_t len, void *data)
1664 {
1665         struct request_queue *q = bdev_get_queue(dev->bdev);
1666
1667         return q && !blk_queue_add_random(q);
1668 }
1669
1670 static bool dm_table_all_devices_attribute(struct dm_table *t,
1671                                            iterate_devices_callout_fn func)
1672 {
1673         struct dm_target *ti;
1674         unsigned i;
1675
1676         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1677                 ti = dm_table_get_target(t, i);
1678
1679                 if (!ti->type->iterate_devices ||
1680                     !ti->type->iterate_devices(ti, func, NULL))
1681                         return false;
1682         }
1683
1684         return true;
1685 }
1686
1687 static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
1688                                         sector_t start, sector_t len, void *data)
1689 {
1690         char b[BDEVNAME_SIZE];
1691
1692         /* For now, NVMe devices are the only devices of this class */
1693         return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1694 }
1695
1696 static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
1697 {
1698         return dm_table_all_devices_attribute(t, device_no_partial_completion);
1699 }
1700
1701 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1702                                          sector_t start, sector_t len, void *data)
1703 {
1704         struct request_queue *q = bdev_get_queue(dev->bdev);
1705
1706         return q && !q->limits.max_write_same_sectors;
1707 }
1708
1709 static bool dm_table_supports_write_same(struct dm_table *t)
1710 {
1711         struct dm_target *ti;
1712         unsigned i;
1713
1714         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1715                 ti = dm_table_get_target(t, i);
1716
1717                 if (!ti->num_write_same_bios)
1718                         return false;
1719
1720                 if (!ti->type->iterate_devices ||
1721                     ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1722                         return false;
1723         }
1724
1725         return true;
1726 }
1727
1728 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1729                                            sector_t start, sector_t len, void *data)
1730 {
1731         struct request_queue *q = bdev_get_queue(dev->bdev);
1732
1733         return q && !q->limits.max_write_zeroes_sectors;
1734 }
1735
1736 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1737 {
1738         struct dm_target *ti;
1739         unsigned i = 0;
1740
1741         while (i < dm_table_get_num_targets(t)) {
1742                 ti = dm_table_get_target(t, i++);
1743
1744                 if (!ti->num_write_zeroes_bios)
1745                         return false;
1746
1747                 if (!ti->type->iterate_devices ||
1748                     ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1749                         return false;
1750         }
1751
1752         return true;
1753 }
1754
1755 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1756                                       sector_t start, sector_t len, void *data)
1757 {
1758         struct request_queue *q = bdev_get_queue(dev->bdev);
1759
1760         return q && !blk_queue_discard(q);
1761 }
1762
1763 static bool dm_table_supports_discards(struct dm_table *t)
1764 {
1765         struct dm_target *ti;
1766         unsigned i;
1767
1768         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1769                 ti = dm_table_get_target(t, i);
1770
1771                 if (!ti->num_discard_bios)
1772                         return false;
1773
1774                 /*
1775                  * Either the target provides discard support (as implied by setting
1776                  * 'discards_supported') or it relies on _all_ data devices having
1777                  * discard support.
1778                  */
1779                 if (!ti->discards_supported &&
1780                     (!ti->type->iterate_devices ||
1781                      ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1782                         return false;
1783         }
1784
1785         return true;
1786 }
1787
1788 static int device_not_secure_erase_capable(struct dm_target *ti,
1789                                            struct dm_dev *dev, sector_t start,
1790                                            sector_t len, void *data)
1791 {
1792         struct request_queue *q = bdev_get_queue(dev->bdev);
1793
1794         return q && !blk_queue_secure_erase(q);
1795 }
1796
1797 static bool dm_table_supports_secure_erase(struct dm_table *t)
1798 {
1799         struct dm_target *ti;
1800         unsigned int i;
1801
1802         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1803                 ti = dm_table_get_target(t, i);
1804
1805                 if (!ti->num_secure_erase_bios)
1806                         return false;
1807
1808                 if (!ti->type->iterate_devices ||
1809                     ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1810                         return false;
1811         }
1812
1813         return true;
1814 }
1815
1816 static int device_requires_stable_pages(struct dm_target *ti,
1817                                         struct dm_dev *dev, sector_t start,
1818                                         sector_t len, void *data)
1819 {
1820         struct request_queue *q = bdev_get_queue(dev->bdev);
1821
1822         return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1823 }
1824
1825 /*
1826  * If any underlying device requires stable pages, a table must require
1827  * them as well.  Only targets that support iterate_devices are considered:
1828  * don't want error, zero, etc to require stable pages.
1829  */
1830 static bool dm_table_requires_stable_pages(struct dm_table *t)
1831 {
1832         struct dm_target *ti;
1833         unsigned i;
1834
1835         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1836                 ti = dm_table_get_target(t, i);
1837
1838                 if (ti->type->iterate_devices &&
1839                     ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1840                         return true;
1841         }
1842
1843         return false;
1844 }
1845
1846 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1847                                struct queue_limits *limits)
1848 {
1849         bool wc = false, fua = false;
1850         int page_size = PAGE_SIZE;
1851
1852         /*
1853          * Copy table's limits to the DM device's request_queue
1854          */
1855         q->limits = *limits;
1856
1857         if (!dm_table_supports_discards(t)) {
1858                 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1859                 /* Must also clear discard limits... */
1860                 q->limits.max_discard_sectors = 0;
1861                 q->limits.max_hw_discard_sectors = 0;
1862                 q->limits.discard_granularity = 0;
1863                 q->limits.discard_alignment = 0;
1864                 q->limits.discard_misaligned = 0;
1865         } else
1866                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1867
1868         if (dm_table_supports_secure_erase(t))
1869                 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
1870
1871         if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1872                 wc = true;
1873                 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1874                         fua = true;
1875         }
1876         blk_queue_write_cache(q, wc, fua);
1877
1878         if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
1879                 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1880                 if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
1881                         set_dax_synchronous(t->md->dax_dev);
1882         }
1883         else
1884                 blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
1885
1886         if (dm_table_supports_dax_write_cache(t))
1887                 dax_write_cache(t->md->dax_dev, true);
1888
1889         /* Ensure that all underlying devices are non-rotational. */
1890         if (dm_table_all_devices_attribute(t, device_is_nonrot))
1891                 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1892         else
1893                 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1894
1895         if (!dm_table_supports_write_same(t))
1896                 q->limits.max_write_same_sectors = 0;
1897         if (!dm_table_supports_write_zeroes(t))
1898                 q->limits.max_write_zeroes_sectors = 0;
1899
1900         dm_table_verify_integrity(t);
1901
1902         /*
1903          * Some devices don't use blk_integrity but still want stable pages
1904          * because they do their own checksumming.
1905          */
1906         if (dm_table_requires_stable_pages(t))
1907                 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1908         else
1909                 q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1910
1911         /*
1912          * Determine whether or not this queue's I/O timings contribute
1913          * to the entropy pool, Only request-based targets use this.
1914          * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1915          * have it set.
1916          */
1917         if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1918                 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
1919
1920         /*
1921          * For a zoned target, the number of zones should be updated for the
1922          * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
1923          * target, this is all that is needed.
1924          */
1925 #ifdef CONFIG_BLK_DEV_ZONED
1926         if (blk_queue_is_zoned(q)) {
1927                 WARN_ON_ONCE(queue_is_mq(q));
1928                 q->nr_zones = blkdev_nr_zones(t->md->disk);
1929         }
1930 #endif
1931
1932         /* Allow reads to exceed readahead limits */
1933         q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
1934 }
1935
1936 unsigned int dm_table_get_num_targets(struct dm_table *t)
1937 {
1938         return t->num_targets;
1939 }
1940
1941 struct list_head *dm_table_get_devices(struct dm_table *t)
1942 {
1943         return &t->devices;
1944 }
1945
1946 fmode_t dm_table_get_mode(struct dm_table *t)
1947 {
1948         return t->mode;
1949 }
1950 EXPORT_SYMBOL(dm_table_get_mode);
1951
1952 enum suspend_mode {
1953         PRESUSPEND,
1954         PRESUSPEND_UNDO,
1955         POSTSUSPEND,
1956 };
1957
1958 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1959 {
1960         int i = t->num_targets;
1961         struct dm_target *ti = t->targets;
1962
1963         lockdep_assert_held(&t->md->suspend_lock);
1964
1965         while (i--) {
1966                 switch (mode) {
1967                 case PRESUSPEND:
1968                         if (ti->type->presuspend)
1969                                 ti->type->presuspend(ti);
1970                         break;
1971                 case PRESUSPEND_UNDO:
1972                         if (ti->type->presuspend_undo)
1973                                 ti->type->presuspend_undo(ti);
1974                         break;
1975                 case POSTSUSPEND:
1976                         if (ti->type->postsuspend)
1977                                 ti->type->postsuspend(ti);
1978                         break;
1979                 }
1980                 ti++;
1981         }
1982 }
1983
1984 void dm_table_presuspend_targets(struct dm_table *t)
1985 {
1986         if (!t)
1987                 return;
1988
1989         suspend_targets(t, PRESUSPEND);
1990 }
1991
1992 void dm_table_presuspend_undo_targets(struct dm_table *t)
1993 {
1994         if (!t)
1995                 return;
1996
1997         suspend_targets(t, PRESUSPEND_UNDO);
1998 }
1999
2000 void dm_table_postsuspend_targets(struct dm_table *t)
2001 {
2002         if (!t)
2003                 return;
2004
2005         suspend_targets(t, POSTSUSPEND);
2006 }
2007
2008 int dm_table_resume_targets(struct dm_table *t)
2009 {
2010         int i, r = 0;
2011
2012         lockdep_assert_held(&t->md->suspend_lock);
2013
2014         for (i = 0; i < t->num_targets; i++) {
2015                 struct dm_target *ti = t->targets + i;
2016
2017                 if (!ti->type->preresume)
2018                         continue;
2019
2020                 r = ti->type->preresume(ti);
2021                 if (r) {
2022                         DMERR("%s: %s: preresume failed, error = %d",
2023                               dm_device_name(t->md), ti->type->name, r);
2024                         return r;
2025                 }
2026         }
2027
2028         for (i = 0; i < t->num_targets; i++) {
2029                 struct dm_target *ti = t->targets + i;
2030
2031                 if (ti->type->resume)
2032                         ti->type->resume(ti);
2033         }
2034
2035         return 0;
2036 }
2037
2038 struct mapped_device *dm_table_get_md(struct dm_table *t)
2039 {
2040         return t->md;
2041 }
2042 EXPORT_SYMBOL(dm_table_get_md);
2043
2044 const char *dm_table_device_name(struct dm_table *t)
2045 {
2046         return dm_device_name(t->md);
2047 }
2048 EXPORT_SYMBOL_GPL(dm_table_device_name);
2049
2050 void dm_table_run_md_queue_async(struct dm_table *t)
2051 {
2052         struct mapped_device *md;
2053         struct request_queue *queue;
2054
2055         if (!dm_table_request_based(t))
2056                 return;
2057
2058         md = dm_table_get_md(t);
2059         queue = dm_get_md_queue(md);
2060         if (queue)
2061                 blk_mq_run_hw_queues(queue, true);
2062 }
2063 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2064