Merge tag 'icc-5.10-rc1' of https://git.linaro.org/people/georgi.djakov/linux into...
[linux-2.6-microblaze.git] / drivers / md / dm-table.c
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-core.h"
9
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
21 #include <linux/blk-mq.h>
22 #include <linux/mount.h>
23 #include <linux/dax.h>
24
25 #define DM_MSG_PREFIX "table"
26
27 #define MAX_DEPTH 16
28 #define NODE_SIZE L1_CACHE_BYTES
29 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
30 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
31
32 struct dm_table {
33         struct mapped_device *md;
34         enum dm_queue_mode type;
35
36         /* btree table */
37         unsigned int depth;
38         unsigned int counts[MAX_DEPTH]; /* in nodes */
39         sector_t *index[MAX_DEPTH];
40
41         unsigned int num_targets;
42         unsigned int num_allocated;
43         sector_t *highs;
44         struct dm_target *targets;
45
46         struct target_type *immutable_target_type;
47
48         bool integrity_supported:1;
49         bool singleton:1;
50         unsigned integrity_added:1;
51
52         /*
53          * Indicates the rw permissions for the new logical
54          * device.  This should be a combination of FMODE_READ
55          * and FMODE_WRITE.
56          */
57         fmode_t mode;
58
59         /* a list of devices used by this table */
60         struct list_head devices;
61
62         /* events get handed up using this callback */
63         void (*event_fn)(void *);
64         void *event_context;
65
66         struct dm_md_mempools *mempools;
67 };
68
69 /*
70  * Similar to ceiling(log_size(n))
71  */
72 static unsigned int int_log(unsigned int n, unsigned int base)
73 {
74         int result = 0;
75
76         while (n > 1) {
77                 n = dm_div_up(n, base);
78                 result++;
79         }
80
81         return result;
82 }
83
84 /*
85  * Calculate the index of the child node of the n'th node k'th key.
86  */
87 static inline unsigned int get_child(unsigned int n, unsigned int k)
88 {
89         return (n * CHILDREN_PER_NODE) + k;
90 }
91
92 /*
93  * Return the n'th node of level l from table t.
94  */
95 static inline sector_t *get_node(struct dm_table *t,
96                                  unsigned int l, unsigned int n)
97 {
98         return t->index[l] + (n * KEYS_PER_NODE);
99 }
100
101 /*
102  * Return the highest key that you could lookup from the n'th
103  * node on level l of the btree.
104  */
105 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
106 {
107         for (; l < t->depth - 1; l++)
108                 n = get_child(n, CHILDREN_PER_NODE - 1);
109
110         if (n >= t->counts[l])
111                 return (sector_t) - 1;
112
113         return get_node(t, l, n)[KEYS_PER_NODE - 1];
114 }
115
116 /*
117  * Fills in a level of the btree based on the highs of the level
118  * below it.
119  */
120 static int setup_btree_index(unsigned int l, struct dm_table *t)
121 {
122         unsigned int n, k;
123         sector_t *node;
124
125         for (n = 0U; n < t->counts[l]; n++) {
126                 node = get_node(t, l, n);
127
128                 for (k = 0U; k < KEYS_PER_NODE; k++)
129                         node[k] = high(t, l + 1, get_child(n, k));
130         }
131
132         return 0;
133 }
134
135 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
136 {
137         unsigned long size;
138         void *addr;
139
140         /*
141          * Check that we're not going to overflow.
142          */
143         if (nmemb > (ULONG_MAX / elem_size))
144                 return NULL;
145
146         size = nmemb * elem_size;
147         addr = vzalloc(size);
148
149         return addr;
150 }
151 EXPORT_SYMBOL(dm_vcalloc);
152
153 /*
154  * highs, and targets are managed as dynamic arrays during a
155  * table load.
156  */
157 static int alloc_targets(struct dm_table *t, unsigned int num)
158 {
159         sector_t *n_highs;
160         struct dm_target *n_targets;
161
162         /*
163          * Allocate both the target array and offset array at once.
164          */
165         n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
166                                           sizeof(sector_t));
167         if (!n_highs)
168                 return -ENOMEM;
169
170         n_targets = (struct dm_target *) (n_highs + num);
171
172         memset(n_highs, -1, sizeof(*n_highs) * num);
173         vfree(t->highs);
174
175         t->num_allocated = num;
176         t->highs = n_highs;
177         t->targets = n_targets;
178
179         return 0;
180 }
181
182 int dm_table_create(struct dm_table **result, fmode_t mode,
183                     unsigned num_targets, struct mapped_device *md)
184 {
185         struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
186
187         if (!t)
188                 return -ENOMEM;
189
190         INIT_LIST_HEAD(&t->devices);
191
192         if (!num_targets)
193                 num_targets = KEYS_PER_NODE;
194
195         num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
196
197         if (!num_targets) {
198                 kfree(t);
199                 return -ENOMEM;
200         }
201
202         if (alloc_targets(t, num_targets)) {
203                 kfree(t);
204                 return -ENOMEM;
205         }
206
207         t->type = DM_TYPE_NONE;
208         t->mode = mode;
209         t->md = md;
210         *result = t;
211         return 0;
212 }
213
214 static void free_devices(struct list_head *devices, struct mapped_device *md)
215 {
216         struct list_head *tmp, *next;
217
218         list_for_each_safe(tmp, next, devices) {
219                 struct dm_dev_internal *dd =
220                     list_entry(tmp, struct dm_dev_internal, list);
221                 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
222                        dm_device_name(md), dd->dm_dev->name);
223                 dm_put_table_device(md, dd->dm_dev);
224                 kfree(dd);
225         }
226 }
227
228 void dm_table_destroy(struct dm_table *t)
229 {
230         unsigned int i;
231
232         if (!t)
233                 return;
234
235         /* free the indexes */
236         if (t->depth >= 2)
237                 vfree(t->index[t->depth - 2]);
238
239         /* free the targets */
240         for (i = 0; i < t->num_targets; i++) {
241                 struct dm_target *tgt = t->targets + i;
242
243                 if (tgt->type->dtr)
244                         tgt->type->dtr(tgt);
245
246                 dm_put_target_type(tgt->type);
247         }
248
249         vfree(t->highs);
250
251         /* free the device list */
252         free_devices(&t->devices, t->md);
253
254         dm_free_md_mempools(t->mempools);
255
256         kfree(t);
257 }
258
259 /*
260  * See if we've already got a device in the list.
261  */
262 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
263 {
264         struct dm_dev_internal *dd;
265
266         list_for_each_entry (dd, l, list)
267                 if (dd->dm_dev->bdev->bd_dev == dev)
268                         return dd;
269
270         return NULL;
271 }
272
273 /*
274  * If possible, this checks an area of a destination device is invalid.
275  */
276 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
277                                   sector_t start, sector_t len, void *data)
278 {
279         struct queue_limits *limits = data;
280         struct block_device *bdev = dev->bdev;
281         sector_t dev_size =
282                 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
283         unsigned short logical_block_size_sectors =
284                 limits->logical_block_size >> SECTOR_SHIFT;
285         char b[BDEVNAME_SIZE];
286
287         if (!dev_size)
288                 return 0;
289
290         if ((start >= dev_size) || (start + len > dev_size)) {
291                 DMWARN("%s: %s too small for target: "
292                        "start=%llu, len=%llu, dev_size=%llu",
293                        dm_device_name(ti->table->md), bdevname(bdev, b),
294                        (unsigned long long)start,
295                        (unsigned long long)len,
296                        (unsigned long long)dev_size);
297                 return 1;
298         }
299
300         /*
301          * If the target is mapped to zoned block device(s), check
302          * that the zones are not partially mapped.
303          */
304         if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
305                 unsigned int zone_sectors = bdev_zone_sectors(bdev);
306
307                 if (start & (zone_sectors - 1)) {
308                         DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
309                                dm_device_name(ti->table->md),
310                                (unsigned long long)start,
311                                zone_sectors, bdevname(bdev, b));
312                         return 1;
313                 }
314
315                 /*
316                  * Note: The last zone of a zoned block device may be smaller
317                  * than other zones. So for a target mapping the end of a
318                  * zoned block device with such a zone, len would not be zone
319                  * aligned. We do not allow such last smaller zone to be part
320                  * of the mapping here to ensure that mappings with multiple
321                  * devices do not end up with a smaller zone in the middle of
322                  * the sector range.
323                  */
324                 if (len & (zone_sectors - 1)) {
325                         DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
326                                dm_device_name(ti->table->md),
327                                (unsigned long long)len,
328                                zone_sectors, bdevname(bdev, b));
329                         return 1;
330                 }
331         }
332
333         if (logical_block_size_sectors <= 1)
334                 return 0;
335
336         if (start & (logical_block_size_sectors - 1)) {
337                 DMWARN("%s: start=%llu not aligned to h/w "
338                        "logical block size %u of %s",
339                        dm_device_name(ti->table->md),
340                        (unsigned long long)start,
341                        limits->logical_block_size, bdevname(bdev, b));
342                 return 1;
343         }
344
345         if (len & (logical_block_size_sectors - 1)) {
346                 DMWARN("%s: len=%llu not aligned to h/w "
347                        "logical block size %u of %s",
348                        dm_device_name(ti->table->md),
349                        (unsigned long long)len,
350                        limits->logical_block_size, bdevname(bdev, b));
351                 return 1;
352         }
353
354         return 0;
355 }
356
357 /*
358  * This upgrades the mode on an already open dm_dev, being
359  * careful to leave things as they were if we fail to reopen the
360  * device and not to touch the existing bdev field in case
361  * it is accessed concurrently.
362  */
363 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
364                         struct mapped_device *md)
365 {
366         int r;
367         struct dm_dev *old_dev, *new_dev;
368
369         old_dev = dd->dm_dev;
370
371         r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
372                                 dd->dm_dev->mode | new_mode, &new_dev);
373         if (r)
374                 return r;
375
376         dd->dm_dev = new_dev;
377         dm_put_table_device(md, old_dev);
378
379         return 0;
380 }
381
382 /*
383  * Convert the path to a device
384  */
385 dev_t dm_get_dev_t(const char *path)
386 {
387         dev_t dev;
388         struct block_device *bdev;
389
390         bdev = lookup_bdev(path);
391         if (IS_ERR(bdev))
392                 dev = name_to_dev_t(path);
393         else {
394                 dev = bdev->bd_dev;
395                 bdput(bdev);
396         }
397
398         return dev;
399 }
400 EXPORT_SYMBOL_GPL(dm_get_dev_t);
401
402 /*
403  * Add a device to the list, or just increment the usage count if
404  * it's already present.
405  */
406 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
407                   struct dm_dev **result)
408 {
409         int r;
410         dev_t dev;
411         struct dm_dev_internal *dd;
412         struct dm_table *t = ti->table;
413
414         BUG_ON(!t);
415
416         dev = dm_get_dev_t(path);
417         if (!dev)
418                 return -ENODEV;
419
420         dd = find_device(&t->devices, dev);
421         if (!dd) {
422                 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
423                 if (!dd)
424                         return -ENOMEM;
425
426                 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
427                         kfree(dd);
428                         return r;
429                 }
430
431                 refcount_set(&dd->count, 1);
432                 list_add(&dd->list, &t->devices);
433                 goto out;
434
435         } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
436                 r = upgrade_mode(dd, mode, t->md);
437                 if (r)
438                         return r;
439         }
440         refcount_inc(&dd->count);
441 out:
442         *result = dd->dm_dev;
443         return 0;
444 }
445 EXPORT_SYMBOL(dm_get_device);
446
447 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
448                                 sector_t start, sector_t len, void *data)
449 {
450         struct queue_limits *limits = data;
451         struct block_device *bdev = dev->bdev;
452         struct request_queue *q = bdev_get_queue(bdev);
453         char b[BDEVNAME_SIZE];
454
455         if (unlikely(!q)) {
456                 DMWARN("%s: Cannot set limits for nonexistent device %s",
457                        dm_device_name(ti->table->md), bdevname(bdev, b));
458                 return 0;
459         }
460
461         if (blk_stack_limits(limits, &q->limits,
462                         get_start_sect(bdev) + start) < 0)
463                 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
464                        "physical_block_size=%u, logical_block_size=%u, "
465                        "alignment_offset=%u, start=%llu",
466                        dm_device_name(ti->table->md), bdevname(bdev, b),
467                        q->limits.physical_block_size,
468                        q->limits.logical_block_size,
469                        q->limits.alignment_offset,
470                        (unsigned long long) start << SECTOR_SHIFT);
471         return 0;
472 }
473
474 /*
475  * Decrement a device's use count and remove it if necessary.
476  */
477 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
478 {
479         int found = 0;
480         struct list_head *devices = &ti->table->devices;
481         struct dm_dev_internal *dd;
482
483         list_for_each_entry(dd, devices, list) {
484                 if (dd->dm_dev == d) {
485                         found = 1;
486                         break;
487                 }
488         }
489         if (!found) {
490                 DMWARN("%s: device %s not in table devices list",
491                        dm_device_name(ti->table->md), d->name);
492                 return;
493         }
494         if (refcount_dec_and_test(&dd->count)) {
495                 dm_put_table_device(ti->table->md, d);
496                 list_del(&dd->list);
497                 kfree(dd);
498         }
499 }
500 EXPORT_SYMBOL(dm_put_device);
501
502 /*
503  * Checks to see if the target joins onto the end of the table.
504  */
505 static int adjoin(struct dm_table *table, struct dm_target *ti)
506 {
507         struct dm_target *prev;
508
509         if (!table->num_targets)
510                 return !ti->begin;
511
512         prev = &table->targets[table->num_targets - 1];
513         return (ti->begin == (prev->begin + prev->len));
514 }
515
516 /*
517  * Used to dynamically allocate the arg array.
518  *
519  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
520  * process messages even if some device is suspended. These messages have a
521  * small fixed number of arguments.
522  *
523  * On the other hand, dm-switch needs to process bulk data using messages and
524  * excessive use of GFP_NOIO could cause trouble.
525  */
526 static char **realloc_argv(unsigned *size, char **old_argv)
527 {
528         char **argv;
529         unsigned new_size;
530         gfp_t gfp;
531
532         if (*size) {
533                 new_size = *size * 2;
534                 gfp = GFP_KERNEL;
535         } else {
536                 new_size = 8;
537                 gfp = GFP_NOIO;
538         }
539         argv = kmalloc_array(new_size, sizeof(*argv), gfp);
540         if (argv && old_argv) {
541                 memcpy(argv, old_argv, *size * sizeof(*argv));
542                 *size = new_size;
543         }
544
545         kfree(old_argv);
546         return argv;
547 }
548
549 /*
550  * Destructively splits up the argument list to pass to ctr.
551  */
552 int dm_split_args(int *argc, char ***argvp, char *input)
553 {
554         char *start, *end = input, *out, **argv = NULL;
555         unsigned array_size = 0;
556
557         *argc = 0;
558
559         if (!input) {
560                 *argvp = NULL;
561                 return 0;
562         }
563
564         argv = realloc_argv(&array_size, argv);
565         if (!argv)
566                 return -ENOMEM;
567
568         while (1) {
569                 /* Skip whitespace */
570                 start = skip_spaces(end);
571
572                 if (!*start)
573                         break;  /* success, we hit the end */
574
575                 /* 'out' is used to remove any back-quotes */
576                 end = out = start;
577                 while (*end) {
578                         /* Everything apart from '\0' can be quoted */
579                         if (*end == '\\' && *(end + 1)) {
580                                 *out++ = *(end + 1);
581                                 end += 2;
582                                 continue;
583                         }
584
585                         if (isspace(*end))
586                                 break;  /* end of token */
587
588                         *out++ = *end++;
589                 }
590
591                 /* have we already filled the array ? */
592                 if ((*argc + 1) > array_size) {
593                         argv = realloc_argv(&array_size, argv);
594                         if (!argv)
595                                 return -ENOMEM;
596                 }
597
598                 /* we know this is whitespace */
599                 if (*end)
600                         end++;
601
602                 /* terminate the string and put it in the array */
603                 *out = '\0';
604                 argv[*argc] = start;
605                 (*argc)++;
606         }
607
608         *argvp = argv;
609         return 0;
610 }
611
612 /*
613  * Impose necessary and sufficient conditions on a devices's table such
614  * that any incoming bio which respects its logical_block_size can be
615  * processed successfully.  If it falls across the boundary between
616  * two or more targets, the size of each piece it gets split into must
617  * be compatible with the logical_block_size of the target processing it.
618  */
619 static int validate_hardware_logical_block_alignment(struct dm_table *table,
620                                                  struct queue_limits *limits)
621 {
622         /*
623          * This function uses arithmetic modulo the logical_block_size
624          * (in units of 512-byte sectors).
625          */
626         unsigned short device_logical_block_size_sects =
627                 limits->logical_block_size >> SECTOR_SHIFT;
628
629         /*
630          * Offset of the start of the next table entry, mod logical_block_size.
631          */
632         unsigned short next_target_start = 0;
633
634         /*
635          * Given an aligned bio that extends beyond the end of a
636          * target, how many sectors must the next target handle?
637          */
638         unsigned short remaining = 0;
639
640         struct dm_target *ti;
641         struct queue_limits ti_limits;
642         unsigned i;
643
644         /*
645          * Check each entry in the table in turn.
646          */
647         for (i = 0; i < dm_table_get_num_targets(table); i++) {
648                 ti = dm_table_get_target(table, i);
649
650                 blk_set_stacking_limits(&ti_limits);
651
652                 /* combine all target devices' limits */
653                 if (ti->type->iterate_devices)
654                         ti->type->iterate_devices(ti, dm_set_device_limits,
655                                                   &ti_limits);
656
657                 /*
658                  * If the remaining sectors fall entirely within this
659                  * table entry are they compatible with its logical_block_size?
660                  */
661                 if (remaining < ti->len &&
662                     remaining & ((ti_limits.logical_block_size >>
663                                   SECTOR_SHIFT) - 1))
664                         break;  /* Error */
665
666                 next_target_start =
667                     (unsigned short) ((next_target_start + ti->len) &
668                                       (device_logical_block_size_sects - 1));
669                 remaining = next_target_start ?
670                     device_logical_block_size_sects - next_target_start : 0;
671         }
672
673         if (remaining) {
674                 DMWARN("%s: table line %u (start sect %llu len %llu) "
675                        "not aligned to h/w logical block size %u",
676                        dm_device_name(table->md), i,
677                        (unsigned long long) ti->begin,
678                        (unsigned long long) ti->len,
679                        limits->logical_block_size);
680                 return -EINVAL;
681         }
682
683         return 0;
684 }
685
686 int dm_table_add_target(struct dm_table *t, const char *type,
687                         sector_t start, sector_t len, char *params)
688 {
689         int r = -EINVAL, argc;
690         char **argv;
691         struct dm_target *tgt;
692
693         if (t->singleton) {
694                 DMERR("%s: target type %s must appear alone in table",
695                       dm_device_name(t->md), t->targets->type->name);
696                 return -EINVAL;
697         }
698
699         BUG_ON(t->num_targets >= t->num_allocated);
700
701         tgt = t->targets + t->num_targets;
702         memset(tgt, 0, sizeof(*tgt));
703
704         if (!len) {
705                 DMERR("%s: zero-length target", dm_device_name(t->md));
706                 return -EINVAL;
707         }
708
709         tgt->type = dm_get_target_type(type);
710         if (!tgt->type) {
711                 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
712                 return -EINVAL;
713         }
714
715         if (dm_target_needs_singleton(tgt->type)) {
716                 if (t->num_targets) {
717                         tgt->error = "singleton target type must appear alone in table";
718                         goto bad;
719                 }
720                 t->singleton = true;
721         }
722
723         if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
724                 tgt->error = "target type may not be included in a read-only table";
725                 goto bad;
726         }
727
728         if (t->immutable_target_type) {
729                 if (t->immutable_target_type != tgt->type) {
730                         tgt->error = "immutable target type cannot be mixed with other target types";
731                         goto bad;
732                 }
733         } else if (dm_target_is_immutable(tgt->type)) {
734                 if (t->num_targets) {
735                         tgt->error = "immutable target type cannot be mixed with other target types";
736                         goto bad;
737                 }
738                 t->immutable_target_type = tgt->type;
739         }
740
741         if (dm_target_has_integrity(tgt->type))
742                 t->integrity_added = 1;
743
744         tgt->table = t;
745         tgt->begin = start;
746         tgt->len = len;
747         tgt->error = "Unknown error";
748
749         /*
750          * Does this target adjoin the previous one ?
751          */
752         if (!adjoin(t, tgt)) {
753                 tgt->error = "Gap in table";
754                 goto bad;
755         }
756
757         r = dm_split_args(&argc, &argv, params);
758         if (r) {
759                 tgt->error = "couldn't split parameters (insufficient memory)";
760                 goto bad;
761         }
762
763         r = tgt->type->ctr(tgt, argc, argv);
764         kfree(argv);
765         if (r)
766                 goto bad;
767
768         t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
769
770         if (!tgt->num_discard_bios && tgt->discards_supported)
771                 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
772                        dm_device_name(t->md), type);
773
774         return 0;
775
776  bad:
777         DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
778         dm_put_target_type(tgt->type);
779         return r;
780 }
781
782 /*
783  * Target argument parsing helpers.
784  */
785 static int validate_next_arg(const struct dm_arg *arg,
786                              struct dm_arg_set *arg_set,
787                              unsigned *value, char **error, unsigned grouped)
788 {
789         const char *arg_str = dm_shift_arg(arg_set);
790         char dummy;
791
792         if (!arg_str ||
793             (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
794             (*value < arg->min) ||
795             (*value > arg->max) ||
796             (grouped && arg_set->argc < *value)) {
797                 *error = arg->error;
798                 return -EINVAL;
799         }
800
801         return 0;
802 }
803
804 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
805                 unsigned *value, char **error)
806 {
807         return validate_next_arg(arg, arg_set, value, error, 0);
808 }
809 EXPORT_SYMBOL(dm_read_arg);
810
811 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
812                       unsigned *value, char **error)
813 {
814         return validate_next_arg(arg, arg_set, value, error, 1);
815 }
816 EXPORT_SYMBOL(dm_read_arg_group);
817
818 const char *dm_shift_arg(struct dm_arg_set *as)
819 {
820         char *r;
821
822         if (as->argc) {
823                 as->argc--;
824                 r = *as->argv;
825                 as->argv++;
826                 return r;
827         }
828
829         return NULL;
830 }
831 EXPORT_SYMBOL(dm_shift_arg);
832
833 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
834 {
835         BUG_ON(as->argc < num_args);
836         as->argc -= num_args;
837         as->argv += num_args;
838 }
839 EXPORT_SYMBOL(dm_consume_args);
840
841 static bool __table_type_bio_based(enum dm_queue_mode table_type)
842 {
843         return (table_type == DM_TYPE_BIO_BASED ||
844                 table_type == DM_TYPE_DAX_BIO_BASED ||
845                 table_type == DM_TYPE_NVME_BIO_BASED);
846 }
847
848 static bool __table_type_request_based(enum dm_queue_mode table_type)
849 {
850         return table_type == DM_TYPE_REQUEST_BASED;
851 }
852
853 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
854 {
855         t->type = type;
856 }
857 EXPORT_SYMBOL_GPL(dm_table_set_type);
858
859 /* validate the dax capability of the target device span */
860 int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
861                         sector_t start, sector_t len, void *data)
862 {
863         int blocksize = *(int *) data;
864
865         return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
866                                        start, len);
867 }
868
869 /* Check devices support synchronous DAX */
870 static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
871                                   sector_t start, sector_t len, void *data)
872 {
873         return dev->dax_dev && dax_synchronous(dev->dax_dev);
874 }
875
876 bool dm_table_supports_dax(struct dm_table *t,
877                            iterate_devices_callout_fn iterate_fn, int *blocksize)
878 {
879         struct dm_target *ti;
880         unsigned i;
881
882         /* Ensure that all targets support DAX. */
883         for (i = 0; i < dm_table_get_num_targets(t); i++) {
884                 ti = dm_table_get_target(t, i);
885
886                 if (!ti->type->direct_access)
887                         return false;
888
889                 if (!ti->type->iterate_devices ||
890                     !ti->type->iterate_devices(ti, iterate_fn, blocksize))
891                         return false;
892         }
893
894         return true;
895 }
896
897 static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
898
899 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
900                                   sector_t start, sector_t len, void *data)
901 {
902         struct block_device *bdev = dev->bdev;
903         struct request_queue *q = bdev_get_queue(bdev);
904
905         /* request-based cannot stack on partitions! */
906         if (bdev != bdev->bd_contains)
907                 return false;
908
909         return queue_is_mq(q);
910 }
911
912 static int dm_table_determine_type(struct dm_table *t)
913 {
914         unsigned i;
915         unsigned bio_based = 0, request_based = 0, hybrid = 0;
916         struct dm_target *tgt;
917         struct list_head *devices = dm_table_get_devices(t);
918         enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
919         int page_size = PAGE_SIZE;
920
921         if (t->type != DM_TYPE_NONE) {
922                 /* target already set the table's type */
923                 if (t->type == DM_TYPE_BIO_BASED) {
924                         /* possibly upgrade to a variant of bio-based */
925                         goto verify_bio_based;
926                 }
927                 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
928                 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
929                 goto verify_rq_based;
930         }
931
932         for (i = 0; i < t->num_targets; i++) {
933                 tgt = t->targets + i;
934                 if (dm_target_hybrid(tgt))
935                         hybrid = 1;
936                 else if (dm_target_request_based(tgt))
937                         request_based = 1;
938                 else
939                         bio_based = 1;
940
941                 if (bio_based && request_based) {
942                         DMERR("Inconsistent table: different target types"
943                               " can't be mixed up");
944                         return -EINVAL;
945                 }
946         }
947
948         if (hybrid && !bio_based && !request_based) {
949                 /*
950                  * The targets can work either way.
951                  * Determine the type from the live device.
952                  * Default to bio-based if device is new.
953                  */
954                 if (__table_type_request_based(live_md_type))
955                         request_based = 1;
956                 else
957                         bio_based = 1;
958         }
959
960         if (bio_based) {
961 verify_bio_based:
962                 /* We must use this table as bio-based */
963                 t->type = DM_TYPE_BIO_BASED;
964                 if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
965                     (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
966                         t->type = DM_TYPE_DAX_BIO_BASED;
967                 } else {
968                         /* Check if upgrading to NVMe bio-based is valid or required */
969                         tgt = dm_table_get_immutable_target(t);
970                         if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
971                                 t->type = DM_TYPE_NVME_BIO_BASED;
972                                 goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
973                         } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
974                                 t->type = DM_TYPE_NVME_BIO_BASED;
975                         }
976                 }
977                 return 0;
978         }
979
980         BUG_ON(!request_based); /* No targets in this table */
981
982         t->type = DM_TYPE_REQUEST_BASED;
983
984 verify_rq_based:
985         /*
986          * Request-based dm supports only tables that have a single target now.
987          * To support multiple targets, request splitting support is needed,
988          * and that needs lots of changes in the block-layer.
989          * (e.g. request completion process for partial completion.)
990          */
991         if (t->num_targets > 1) {
992                 DMERR("%s DM doesn't support multiple targets",
993                       t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
994                 return -EINVAL;
995         }
996
997         if (list_empty(devices)) {
998                 int srcu_idx;
999                 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
1000
1001                 /* inherit live table's type */
1002                 if (live_table)
1003                         t->type = live_table->type;
1004                 dm_put_live_table(t->md, srcu_idx);
1005                 return 0;
1006         }
1007
1008         tgt = dm_table_get_immutable_target(t);
1009         if (!tgt) {
1010                 DMERR("table load rejected: immutable target is required");
1011                 return -EINVAL;
1012         } else if (tgt->max_io_len) {
1013                 DMERR("table load rejected: immutable target that splits IO is not supported");
1014                 return -EINVAL;
1015         }
1016
1017         /* Non-request-stackable devices can't be used for request-based dm */
1018         if (!tgt->type->iterate_devices ||
1019             !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
1020                 DMERR("table load rejected: including non-request-stackable devices");
1021                 return -EINVAL;
1022         }
1023
1024         return 0;
1025 }
1026
1027 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
1028 {
1029         return t->type;
1030 }
1031
1032 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
1033 {
1034         return t->immutable_target_type;
1035 }
1036
1037 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
1038 {
1039         /* Immutable target is implicitly a singleton */
1040         if (t->num_targets > 1 ||
1041             !dm_target_is_immutable(t->targets[0].type))
1042                 return NULL;
1043
1044         return t->targets;
1045 }
1046
1047 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1048 {
1049         struct dm_target *ti;
1050         unsigned i;
1051
1052         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1053                 ti = dm_table_get_target(t, i);
1054                 if (dm_target_is_wildcard(ti->type))
1055                         return ti;
1056         }
1057
1058         return NULL;
1059 }
1060
1061 bool dm_table_bio_based(struct dm_table *t)
1062 {
1063         return __table_type_bio_based(dm_table_get_type(t));
1064 }
1065
1066 bool dm_table_request_based(struct dm_table *t)
1067 {
1068         return __table_type_request_based(dm_table_get_type(t));
1069 }
1070
1071 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1072 {
1073         enum dm_queue_mode type = dm_table_get_type(t);
1074         unsigned per_io_data_size = 0;
1075         unsigned min_pool_size = 0;
1076         struct dm_target *ti;
1077         unsigned i;
1078
1079         if (unlikely(type == DM_TYPE_NONE)) {
1080                 DMWARN("no table type is set, can't allocate mempools");
1081                 return -EINVAL;
1082         }
1083
1084         if (__table_type_bio_based(type))
1085                 for (i = 0; i < t->num_targets; i++) {
1086                         ti = t->targets + i;
1087                         per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1088                         min_pool_size = max(min_pool_size, ti->num_flush_bios);
1089                 }
1090
1091         t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1092                                            per_io_data_size, min_pool_size);
1093         if (!t->mempools)
1094                 return -ENOMEM;
1095
1096         return 0;
1097 }
1098
1099 void dm_table_free_md_mempools(struct dm_table *t)
1100 {
1101         dm_free_md_mempools(t->mempools);
1102         t->mempools = NULL;
1103 }
1104
1105 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1106 {
1107         return t->mempools;
1108 }
1109
1110 static int setup_indexes(struct dm_table *t)
1111 {
1112         int i;
1113         unsigned int total = 0;
1114         sector_t *indexes;
1115
1116         /* allocate the space for *all* the indexes */
1117         for (i = t->depth - 2; i >= 0; i--) {
1118                 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1119                 total += t->counts[i];
1120         }
1121
1122         indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1123         if (!indexes)
1124                 return -ENOMEM;
1125
1126         /* set up internal nodes, bottom-up */
1127         for (i = t->depth - 2; i >= 0; i--) {
1128                 t->index[i] = indexes;
1129                 indexes += (KEYS_PER_NODE * t->counts[i]);
1130                 setup_btree_index(i, t);
1131         }
1132
1133         return 0;
1134 }
1135
1136 /*
1137  * Builds the btree to index the map.
1138  */
1139 static int dm_table_build_index(struct dm_table *t)
1140 {
1141         int r = 0;
1142         unsigned int leaf_nodes;
1143
1144         /* how many indexes will the btree have ? */
1145         leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1146         t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1147
1148         /* leaf layer has already been set up */
1149         t->counts[t->depth - 1] = leaf_nodes;
1150         t->index[t->depth - 1] = t->highs;
1151
1152         if (t->depth >= 2)
1153                 r = setup_indexes(t);
1154
1155         return r;
1156 }
1157
1158 static bool integrity_profile_exists(struct gendisk *disk)
1159 {
1160         return !!blk_get_integrity(disk);
1161 }
1162
1163 /*
1164  * Get a disk whose integrity profile reflects the table's profile.
1165  * Returns NULL if integrity support was inconsistent or unavailable.
1166  */
1167 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1168 {
1169         struct list_head *devices = dm_table_get_devices(t);
1170         struct dm_dev_internal *dd = NULL;
1171         struct gendisk *prev_disk = NULL, *template_disk = NULL;
1172         unsigned i;
1173
1174         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1175                 struct dm_target *ti = dm_table_get_target(t, i);
1176                 if (!dm_target_passes_integrity(ti->type))
1177                         goto no_integrity;
1178         }
1179
1180         list_for_each_entry(dd, devices, list) {
1181                 template_disk = dd->dm_dev->bdev->bd_disk;
1182                 if (!integrity_profile_exists(template_disk))
1183                         goto no_integrity;
1184                 else if (prev_disk &&
1185                          blk_integrity_compare(prev_disk, template_disk) < 0)
1186                         goto no_integrity;
1187                 prev_disk = template_disk;
1188         }
1189
1190         return template_disk;
1191
1192 no_integrity:
1193         if (prev_disk)
1194                 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1195                        dm_device_name(t->md),
1196                        prev_disk->disk_name,
1197                        template_disk->disk_name);
1198         return NULL;
1199 }
1200
1201 /*
1202  * Register the mapped device for blk_integrity support if the
1203  * underlying devices have an integrity profile.  But all devices may
1204  * not have matching profiles (checking all devices isn't reliable
1205  * during table load because this table may use other DM device(s) which
1206  * must be resumed before they will have an initialized integity
1207  * profile).  Consequently, stacked DM devices force a 2 stage integrity
1208  * profile validation: First pass during table load, final pass during
1209  * resume.
1210  */
1211 static int dm_table_register_integrity(struct dm_table *t)
1212 {
1213         struct mapped_device *md = t->md;
1214         struct gendisk *template_disk = NULL;
1215
1216         /* If target handles integrity itself do not register it here. */
1217         if (t->integrity_added)
1218                 return 0;
1219
1220         template_disk = dm_table_get_integrity_disk(t);
1221         if (!template_disk)
1222                 return 0;
1223
1224         if (!integrity_profile_exists(dm_disk(md))) {
1225                 t->integrity_supported = true;
1226                 /*
1227                  * Register integrity profile during table load; we can do
1228                  * this because the final profile must match during resume.
1229                  */
1230                 blk_integrity_register(dm_disk(md),
1231                                        blk_get_integrity(template_disk));
1232                 return 0;
1233         }
1234
1235         /*
1236          * If DM device already has an initialized integrity
1237          * profile the new profile should not conflict.
1238          */
1239         if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1240                 DMWARN("%s: conflict with existing integrity profile: "
1241                        "%s profile mismatch",
1242                        dm_device_name(t->md),
1243                        template_disk->disk_name);
1244                 return 1;
1245         }
1246
1247         /* Preserve existing integrity profile */
1248         t->integrity_supported = true;
1249         return 0;
1250 }
1251
1252 /*
1253  * Prepares the table for use by building the indices,
1254  * setting the type, and allocating mempools.
1255  */
1256 int dm_table_complete(struct dm_table *t)
1257 {
1258         int r;
1259
1260         r = dm_table_determine_type(t);
1261         if (r) {
1262                 DMERR("unable to determine table type");
1263                 return r;
1264         }
1265
1266         r = dm_table_build_index(t);
1267         if (r) {
1268                 DMERR("unable to build btrees");
1269                 return r;
1270         }
1271
1272         r = dm_table_register_integrity(t);
1273         if (r) {
1274                 DMERR("could not register integrity profile.");
1275                 return r;
1276         }
1277
1278         r = dm_table_alloc_md_mempools(t, t->md);
1279         if (r)
1280                 DMERR("unable to allocate mempools");
1281
1282         return r;
1283 }
1284
1285 static DEFINE_MUTEX(_event_lock);
1286 void dm_table_event_callback(struct dm_table *t,
1287                              void (*fn)(void *), void *context)
1288 {
1289         mutex_lock(&_event_lock);
1290         t->event_fn = fn;
1291         t->event_context = context;
1292         mutex_unlock(&_event_lock);
1293 }
1294
1295 void dm_table_event(struct dm_table *t)
1296 {
1297         /*
1298          * You can no longer call dm_table_event() from interrupt
1299          * context, use a bottom half instead.
1300          */
1301         BUG_ON(in_interrupt());
1302
1303         mutex_lock(&_event_lock);
1304         if (t->event_fn)
1305                 t->event_fn(t->event_context);
1306         mutex_unlock(&_event_lock);
1307 }
1308 EXPORT_SYMBOL(dm_table_event);
1309
1310 inline sector_t dm_table_get_size(struct dm_table *t)
1311 {
1312         return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1313 }
1314 EXPORT_SYMBOL(dm_table_get_size);
1315
1316 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1317 {
1318         if (index >= t->num_targets)
1319                 return NULL;
1320
1321         return t->targets + index;
1322 }
1323
1324 /*
1325  * Search the btree for the correct target.
1326  *
1327  * Caller should check returned pointer for NULL
1328  * to trap I/O beyond end of device.
1329  */
1330 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1331 {
1332         unsigned int l, n = 0, k = 0;
1333         sector_t *node;
1334
1335         if (unlikely(sector >= dm_table_get_size(t)))
1336                 return NULL;
1337
1338         for (l = 0; l < t->depth; l++) {
1339                 n = get_child(n, k);
1340                 node = get_node(t, l, n);
1341
1342                 for (k = 0; k < KEYS_PER_NODE; k++)
1343                         if (node[k] >= sector)
1344                                 break;
1345         }
1346
1347         return &t->targets[(KEYS_PER_NODE * n) + k];
1348 }
1349
1350 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1351                         sector_t start, sector_t len, void *data)
1352 {
1353         unsigned *num_devices = data;
1354
1355         (*num_devices)++;
1356
1357         return 0;
1358 }
1359
1360 /*
1361  * Check whether a table has no data devices attached using each
1362  * target's iterate_devices method.
1363  * Returns false if the result is unknown because a target doesn't
1364  * support iterate_devices.
1365  */
1366 bool dm_table_has_no_data_devices(struct dm_table *table)
1367 {
1368         struct dm_target *ti;
1369         unsigned i, num_devices;
1370
1371         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1372                 ti = dm_table_get_target(table, i);
1373
1374                 if (!ti->type->iterate_devices)
1375                         return false;
1376
1377                 num_devices = 0;
1378                 ti->type->iterate_devices(ti, count_device, &num_devices);
1379                 if (num_devices)
1380                         return false;
1381         }
1382
1383         return true;
1384 }
1385
1386 static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1387                                  sector_t start, sector_t len, void *data)
1388 {
1389         struct request_queue *q = bdev_get_queue(dev->bdev);
1390         enum blk_zoned_model *zoned_model = data;
1391
1392         return q && blk_queue_zoned_model(q) == *zoned_model;
1393 }
1394
1395 static bool dm_table_supports_zoned_model(struct dm_table *t,
1396                                           enum blk_zoned_model zoned_model)
1397 {
1398         struct dm_target *ti;
1399         unsigned i;
1400
1401         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1402                 ti = dm_table_get_target(t, i);
1403
1404                 if (zoned_model == BLK_ZONED_HM &&
1405                     !dm_target_supports_zoned_hm(ti->type))
1406                         return false;
1407
1408                 if (!ti->type->iterate_devices ||
1409                     !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
1410                         return false;
1411         }
1412
1413         return true;
1414 }
1415
1416 static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1417                                        sector_t start, sector_t len, void *data)
1418 {
1419         struct request_queue *q = bdev_get_queue(dev->bdev);
1420         unsigned int *zone_sectors = data;
1421
1422         return q && blk_queue_zone_sectors(q) == *zone_sectors;
1423 }
1424
1425 static bool dm_table_matches_zone_sectors(struct dm_table *t,
1426                                           unsigned int zone_sectors)
1427 {
1428         struct dm_target *ti;
1429         unsigned i;
1430
1431         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1432                 ti = dm_table_get_target(t, i);
1433
1434                 if (!ti->type->iterate_devices ||
1435                     !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
1436                         return false;
1437         }
1438
1439         return true;
1440 }
1441
1442 static int validate_hardware_zoned_model(struct dm_table *table,
1443                                          enum blk_zoned_model zoned_model,
1444                                          unsigned int zone_sectors)
1445 {
1446         if (zoned_model == BLK_ZONED_NONE)
1447                 return 0;
1448
1449         if (!dm_table_supports_zoned_model(table, zoned_model)) {
1450                 DMERR("%s: zoned model is not consistent across all devices",
1451                       dm_device_name(table->md));
1452                 return -EINVAL;
1453         }
1454
1455         /* Check zone size validity and compatibility */
1456         if (!zone_sectors || !is_power_of_2(zone_sectors))
1457                 return -EINVAL;
1458
1459         if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
1460                 DMERR("%s: zone sectors is not consistent across all devices",
1461                       dm_device_name(table->md));
1462                 return -EINVAL;
1463         }
1464
1465         return 0;
1466 }
1467
1468 /*
1469  * Establish the new table's queue_limits and validate them.
1470  */
1471 int dm_calculate_queue_limits(struct dm_table *table,
1472                               struct queue_limits *limits)
1473 {
1474         struct dm_target *ti;
1475         struct queue_limits ti_limits;
1476         unsigned i;
1477         enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1478         unsigned int zone_sectors = 0;
1479
1480         blk_set_stacking_limits(limits);
1481
1482         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1483                 blk_set_stacking_limits(&ti_limits);
1484
1485                 ti = dm_table_get_target(table, i);
1486
1487                 if (!ti->type->iterate_devices)
1488                         goto combine_limits;
1489
1490                 /*
1491                  * Combine queue limits of all the devices this target uses.
1492                  */
1493                 ti->type->iterate_devices(ti, dm_set_device_limits,
1494                                           &ti_limits);
1495
1496                 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1497                         /*
1498                          * After stacking all limits, validate all devices
1499                          * in table support this zoned model and zone sectors.
1500                          */
1501                         zoned_model = ti_limits.zoned;
1502                         zone_sectors = ti_limits.chunk_sectors;
1503                 }
1504
1505                 /* Set I/O hints portion of queue limits */
1506                 if (ti->type->io_hints)
1507                         ti->type->io_hints(ti, &ti_limits);
1508
1509                 /*
1510                  * Check each device area is consistent with the target's
1511                  * overall queue limits.
1512                  */
1513                 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1514                                               &ti_limits))
1515                         return -EINVAL;
1516
1517 combine_limits:
1518                 /*
1519                  * Merge this target's queue limits into the overall limits
1520                  * for the table.
1521                  */
1522                 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1523                         DMWARN("%s: adding target device "
1524                                "(start sect %llu len %llu) "
1525                                "caused an alignment inconsistency",
1526                                dm_device_name(table->md),
1527                                (unsigned long long) ti->begin,
1528                                (unsigned long long) ti->len);
1529         }
1530
1531         /*
1532          * Verify that the zoned model and zone sectors, as determined before
1533          * any .io_hints override, are the same across all devices in the table.
1534          * - this is especially relevant if .io_hints is emulating a disk-managed
1535          *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1536          * BUT...
1537          */
1538         if (limits->zoned != BLK_ZONED_NONE) {
1539                 /*
1540                  * ...IF the above limits stacking determined a zoned model
1541                  * validate that all of the table's devices conform to it.
1542                  */
1543                 zoned_model = limits->zoned;
1544                 zone_sectors = limits->chunk_sectors;
1545         }
1546         if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1547                 return -EINVAL;
1548
1549         return validate_hardware_logical_block_alignment(table, limits);
1550 }
1551
1552 /*
1553  * Verify that all devices have an integrity profile that matches the
1554  * DM device's registered integrity profile.  If the profiles don't
1555  * match then unregister the DM device's integrity profile.
1556  */
1557 static void dm_table_verify_integrity(struct dm_table *t)
1558 {
1559         struct gendisk *template_disk = NULL;
1560
1561         if (t->integrity_added)
1562                 return;
1563
1564         if (t->integrity_supported) {
1565                 /*
1566                  * Verify that the original integrity profile
1567                  * matches all the devices in this table.
1568                  */
1569                 template_disk = dm_table_get_integrity_disk(t);
1570                 if (template_disk &&
1571                     blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1572                         return;
1573         }
1574
1575         if (integrity_profile_exists(dm_disk(t->md))) {
1576                 DMWARN("%s: unable to establish an integrity profile",
1577                        dm_device_name(t->md));
1578                 blk_integrity_unregister(dm_disk(t->md));
1579         }
1580 }
1581
1582 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1583                                 sector_t start, sector_t len, void *data)
1584 {
1585         unsigned long flush = (unsigned long) data;
1586         struct request_queue *q = bdev_get_queue(dev->bdev);
1587
1588         return q && (q->queue_flags & flush);
1589 }
1590
1591 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1592 {
1593         struct dm_target *ti;
1594         unsigned i;
1595
1596         /*
1597          * Require at least one underlying device to support flushes.
1598          * t->devices includes internal dm devices such as mirror logs
1599          * so we need to use iterate_devices here, which targets
1600          * supporting flushes must provide.
1601          */
1602         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1603                 ti = dm_table_get_target(t, i);
1604
1605                 if (!ti->num_flush_bios)
1606                         continue;
1607
1608                 if (ti->flush_supported)
1609                         return true;
1610
1611                 if (ti->type->iterate_devices &&
1612                     ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1613                         return true;
1614         }
1615
1616         return false;
1617 }
1618
1619 static int device_dax_write_cache_enabled(struct dm_target *ti,
1620                                           struct dm_dev *dev, sector_t start,
1621                                           sector_t len, void *data)
1622 {
1623         struct dax_device *dax_dev = dev->dax_dev;
1624
1625         if (!dax_dev)
1626                 return false;
1627
1628         if (dax_write_cache_enabled(dax_dev))
1629                 return true;
1630         return false;
1631 }
1632
1633 static int dm_table_supports_dax_write_cache(struct dm_table *t)
1634 {
1635         struct dm_target *ti;
1636         unsigned i;
1637
1638         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1639                 ti = dm_table_get_target(t, i);
1640
1641                 if (ti->type->iterate_devices &&
1642                     ti->type->iterate_devices(ti,
1643                                 device_dax_write_cache_enabled, NULL))
1644                         return true;
1645         }
1646
1647         return false;
1648 }
1649
1650 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1651                             sector_t start, sector_t len, void *data)
1652 {
1653         struct request_queue *q = bdev_get_queue(dev->bdev);
1654
1655         return q && blk_queue_nonrot(q);
1656 }
1657
1658 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1659                              sector_t start, sector_t len, void *data)
1660 {
1661         struct request_queue *q = bdev_get_queue(dev->bdev);
1662
1663         return q && !blk_queue_add_random(q);
1664 }
1665
1666 static bool dm_table_all_devices_attribute(struct dm_table *t,
1667                                            iterate_devices_callout_fn func)
1668 {
1669         struct dm_target *ti;
1670         unsigned i;
1671
1672         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1673                 ti = dm_table_get_target(t, i);
1674
1675                 if (!ti->type->iterate_devices ||
1676                     !ti->type->iterate_devices(ti, func, NULL))
1677                         return false;
1678         }
1679
1680         return true;
1681 }
1682
1683 static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
1684                                         sector_t start, sector_t len, void *data)
1685 {
1686         char b[BDEVNAME_SIZE];
1687
1688         /* For now, NVMe devices are the only devices of this class */
1689         return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1690 }
1691
1692 static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
1693 {
1694         return dm_table_all_devices_attribute(t, device_no_partial_completion);
1695 }
1696
1697 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1698                                          sector_t start, sector_t len, void *data)
1699 {
1700         struct request_queue *q = bdev_get_queue(dev->bdev);
1701
1702         return q && !q->limits.max_write_same_sectors;
1703 }
1704
1705 static bool dm_table_supports_write_same(struct dm_table *t)
1706 {
1707         struct dm_target *ti;
1708         unsigned i;
1709
1710         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1711                 ti = dm_table_get_target(t, i);
1712
1713                 if (!ti->num_write_same_bios)
1714                         return false;
1715
1716                 if (!ti->type->iterate_devices ||
1717                     ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1718                         return false;
1719         }
1720
1721         return true;
1722 }
1723
1724 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1725                                            sector_t start, sector_t len, void *data)
1726 {
1727         struct request_queue *q = bdev_get_queue(dev->bdev);
1728
1729         return q && !q->limits.max_write_zeroes_sectors;
1730 }
1731
1732 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1733 {
1734         struct dm_target *ti;
1735         unsigned i = 0;
1736
1737         while (i < dm_table_get_num_targets(t)) {
1738                 ti = dm_table_get_target(t, i++);
1739
1740                 if (!ti->num_write_zeroes_bios)
1741                         return false;
1742
1743                 if (!ti->type->iterate_devices ||
1744                     ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1745                         return false;
1746         }
1747
1748         return true;
1749 }
1750
1751 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1752                                       sector_t start, sector_t len, void *data)
1753 {
1754         struct request_queue *q = bdev_get_queue(dev->bdev);
1755
1756         return q && !blk_queue_discard(q);
1757 }
1758
1759 static bool dm_table_supports_discards(struct dm_table *t)
1760 {
1761         struct dm_target *ti;
1762         unsigned i;
1763
1764         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1765                 ti = dm_table_get_target(t, i);
1766
1767                 if (!ti->num_discard_bios)
1768                         return false;
1769
1770                 /*
1771                  * Either the target provides discard support (as implied by setting
1772                  * 'discards_supported') or it relies on _all_ data devices having
1773                  * discard support.
1774                  */
1775                 if (!ti->discards_supported &&
1776                     (!ti->type->iterate_devices ||
1777                      ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1778                         return false;
1779         }
1780
1781         return true;
1782 }
1783
1784 static int device_not_secure_erase_capable(struct dm_target *ti,
1785                                            struct dm_dev *dev, sector_t start,
1786                                            sector_t len, void *data)
1787 {
1788         struct request_queue *q = bdev_get_queue(dev->bdev);
1789
1790         return q && !blk_queue_secure_erase(q);
1791 }
1792
1793 static bool dm_table_supports_secure_erase(struct dm_table *t)
1794 {
1795         struct dm_target *ti;
1796         unsigned int i;
1797
1798         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1799                 ti = dm_table_get_target(t, i);
1800
1801                 if (!ti->num_secure_erase_bios)
1802                         return false;
1803
1804                 if (!ti->type->iterate_devices ||
1805                     ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1806                         return false;
1807         }
1808
1809         return true;
1810 }
1811
1812 static int device_requires_stable_pages(struct dm_target *ti,
1813                                         struct dm_dev *dev, sector_t start,
1814                                         sector_t len, void *data)
1815 {
1816         struct request_queue *q = bdev_get_queue(dev->bdev);
1817
1818         return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1819 }
1820
1821 /*
1822  * If any underlying device requires stable pages, a table must require
1823  * them as well.  Only targets that support iterate_devices are considered:
1824  * don't want error, zero, etc to require stable pages.
1825  */
1826 static bool dm_table_requires_stable_pages(struct dm_table *t)
1827 {
1828         struct dm_target *ti;
1829         unsigned i;
1830
1831         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1832                 ti = dm_table_get_target(t, i);
1833
1834                 if (ti->type->iterate_devices &&
1835                     ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1836                         return true;
1837         }
1838
1839         return false;
1840 }
1841
1842 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1843                                struct queue_limits *limits)
1844 {
1845         bool wc = false, fua = false;
1846         int page_size = PAGE_SIZE;
1847
1848         /*
1849          * Copy table's limits to the DM device's request_queue
1850          */
1851         q->limits = *limits;
1852
1853         if (!dm_table_supports_discards(t)) {
1854                 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1855                 /* Must also clear discard limits... */
1856                 q->limits.max_discard_sectors = 0;
1857                 q->limits.max_hw_discard_sectors = 0;
1858                 q->limits.discard_granularity = 0;
1859                 q->limits.discard_alignment = 0;
1860                 q->limits.discard_misaligned = 0;
1861         } else
1862                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1863
1864         if (dm_table_supports_secure_erase(t))
1865                 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
1866
1867         if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1868                 wc = true;
1869                 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1870                         fua = true;
1871         }
1872         blk_queue_write_cache(q, wc, fua);
1873
1874         if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
1875                 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1876                 if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
1877                         set_dax_synchronous(t->md->dax_dev);
1878         }
1879         else
1880                 blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
1881
1882         if (dm_table_supports_dax_write_cache(t))
1883                 dax_write_cache(t->md->dax_dev, true);
1884
1885         /* Ensure that all underlying devices are non-rotational. */
1886         if (dm_table_all_devices_attribute(t, device_is_nonrot))
1887                 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1888         else
1889                 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1890
1891         if (!dm_table_supports_write_same(t))
1892                 q->limits.max_write_same_sectors = 0;
1893         if (!dm_table_supports_write_zeroes(t))
1894                 q->limits.max_write_zeroes_sectors = 0;
1895
1896         dm_table_verify_integrity(t);
1897
1898         /*
1899          * Some devices don't use blk_integrity but still want stable pages
1900          * because they do their own checksumming.
1901          */
1902         if (dm_table_requires_stable_pages(t))
1903                 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1904         else
1905                 q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1906
1907         /*
1908          * Determine whether or not this queue's I/O timings contribute
1909          * to the entropy pool, Only request-based targets use this.
1910          * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1911          * have it set.
1912          */
1913         if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1914                 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
1915
1916         /*
1917          * For a zoned target, the number of zones should be updated for the
1918          * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
1919          * target, this is all that is needed.
1920          */
1921 #ifdef CONFIG_BLK_DEV_ZONED
1922         if (blk_queue_is_zoned(q)) {
1923                 WARN_ON_ONCE(queue_is_mq(q));
1924                 q->nr_zones = blkdev_nr_zones(t->md->disk);
1925         }
1926 #endif
1927
1928         /* Allow reads to exceed readahead limits */
1929         q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
1930 }
1931
1932 unsigned int dm_table_get_num_targets(struct dm_table *t)
1933 {
1934         return t->num_targets;
1935 }
1936
1937 struct list_head *dm_table_get_devices(struct dm_table *t)
1938 {
1939         return &t->devices;
1940 }
1941
1942 fmode_t dm_table_get_mode(struct dm_table *t)
1943 {
1944         return t->mode;
1945 }
1946 EXPORT_SYMBOL(dm_table_get_mode);
1947
1948 enum suspend_mode {
1949         PRESUSPEND,
1950         PRESUSPEND_UNDO,
1951         POSTSUSPEND,
1952 };
1953
1954 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1955 {
1956         int i = t->num_targets;
1957         struct dm_target *ti = t->targets;
1958
1959         lockdep_assert_held(&t->md->suspend_lock);
1960
1961         while (i--) {
1962                 switch (mode) {
1963                 case PRESUSPEND:
1964                         if (ti->type->presuspend)
1965                                 ti->type->presuspend(ti);
1966                         break;
1967                 case PRESUSPEND_UNDO:
1968                         if (ti->type->presuspend_undo)
1969                                 ti->type->presuspend_undo(ti);
1970                         break;
1971                 case POSTSUSPEND:
1972                         if (ti->type->postsuspend)
1973                                 ti->type->postsuspend(ti);
1974                         break;
1975                 }
1976                 ti++;
1977         }
1978 }
1979
1980 void dm_table_presuspend_targets(struct dm_table *t)
1981 {
1982         if (!t)
1983                 return;
1984
1985         suspend_targets(t, PRESUSPEND);
1986 }
1987
1988 void dm_table_presuspend_undo_targets(struct dm_table *t)
1989 {
1990         if (!t)
1991                 return;
1992
1993         suspend_targets(t, PRESUSPEND_UNDO);
1994 }
1995
1996 void dm_table_postsuspend_targets(struct dm_table *t)
1997 {
1998         if (!t)
1999                 return;
2000
2001         suspend_targets(t, POSTSUSPEND);
2002 }
2003
2004 int dm_table_resume_targets(struct dm_table *t)
2005 {
2006         int i, r = 0;
2007
2008         lockdep_assert_held(&t->md->suspend_lock);
2009
2010         for (i = 0; i < t->num_targets; i++) {
2011                 struct dm_target *ti = t->targets + i;
2012
2013                 if (!ti->type->preresume)
2014                         continue;
2015
2016                 r = ti->type->preresume(ti);
2017                 if (r) {
2018                         DMERR("%s: %s: preresume failed, error = %d",
2019                               dm_device_name(t->md), ti->type->name, r);
2020                         return r;
2021                 }
2022         }
2023
2024         for (i = 0; i < t->num_targets; i++) {
2025                 struct dm_target *ti = t->targets + i;
2026
2027                 if (ti->type->resume)
2028                         ti->type->resume(ti);
2029         }
2030
2031         return 0;
2032 }
2033
2034 struct mapped_device *dm_table_get_md(struct dm_table *t)
2035 {
2036         return t->md;
2037 }
2038 EXPORT_SYMBOL(dm_table_get_md);
2039
2040 const char *dm_table_device_name(struct dm_table *t)
2041 {
2042         return dm_device_name(t->md);
2043 }
2044 EXPORT_SYMBOL_GPL(dm_table_device_name);
2045
2046 void dm_table_run_md_queue_async(struct dm_table *t)
2047 {
2048         struct mapped_device *md;
2049         struct request_queue *queue;
2050
2051         if (!dm_table_request_based(t))
2052                 return;
2053
2054         md = dm_table_get_md(t);
2055         queue = dm_get_md_queue(md);
2056         if (queue)
2057                 blk_mq_run_hw_queues(queue, true);
2058 }
2059 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2060