Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / drivers / md / dm-table.c
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-core.h"
9
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/namei.h>
15 #include <linux/ctype.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include <linux/blk-mq.h>
23 #include <linux/mount.h>
24 #include <linux/dax.h>
25
26 #define DM_MSG_PREFIX "table"
27
28 #define NODE_SIZE L1_CACHE_BYTES
29 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
30 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
31
32 /*
33  * Similar to ceiling(log_size(n))
34  */
35 static unsigned int int_log(unsigned int n, unsigned int base)
36 {
37         int result = 0;
38
39         while (n > 1) {
40                 n = dm_div_up(n, base);
41                 result++;
42         }
43
44         return result;
45 }
46
47 /*
48  * Calculate the index of the child node of the n'th node k'th key.
49  */
50 static inline unsigned int get_child(unsigned int n, unsigned int k)
51 {
52         return (n * CHILDREN_PER_NODE) + k;
53 }
54
55 /*
56  * Return the n'th node of level l from table t.
57  */
58 static inline sector_t *get_node(struct dm_table *t,
59                                  unsigned int l, unsigned int n)
60 {
61         return t->index[l] + (n * KEYS_PER_NODE);
62 }
63
64 /*
65  * Return the highest key that you could lookup from the n'th
66  * node on level l of the btree.
67  */
68 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
69 {
70         for (; l < t->depth - 1; l++)
71                 n = get_child(n, CHILDREN_PER_NODE - 1);
72
73         if (n >= t->counts[l])
74                 return (sector_t) - 1;
75
76         return get_node(t, l, n)[KEYS_PER_NODE - 1];
77 }
78
79 /*
80  * Fills in a level of the btree based on the highs of the level
81  * below it.
82  */
83 static int setup_btree_index(unsigned int l, struct dm_table *t)
84 {
85         unsigned int n, k;
86         sector_t *node;
87
88         for (n = 0U; n < t->counts[l]; n++) {
89                 node = get_node(t, l, n);
90
91                 for (k = 0U; k < KEYS_PER_NODE; k++)
92                         node[k] = high(t, l + 1, get_child(n, k));
93         }
94
95         return 0;
96 }
97
98 /*
99  * highs, and targets are managed as dynamic arrays during a
100  * table load.
101  */
102 static int alloc_targets(struct dm_table *t, unsigned int num)
103 {
104         sector_t *n_highs;
105         struct dm_target *n_targets;
106
107         /*
108          * Allocate both the target array and offset array at once.
109          */
110         n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
111                            GFP_KERNEL);
112         if (!n_highs)
113                 return -ENOMEM;
114
115         n_targets = (struct dm_target *) (n_highs + num);
116
117         memset(n_highs, -1, sizeof(*n_highs) * num);
118         kvfree(t->highs);
119
120         t->num_allocated = num;
121         t->highs = n_highs;
122         t->targets = n_targets;
123
124         return 0;
125 }
126
127 int dm_table_create(struct dm_table **result, fmode_t mode,
128                     unsigned num_targets, struct mapped_device *md)
129 {
130         struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
131
132         if (!t)
133                 return -ENOMEM;
134
135         INIT_LIST_HEAD(&t->devices);
136
137         if (!num_targets)
138                 num_targets = KEYS_PER_NODE;
139
140         num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
141
142         if (!num_targets) {
143                 kfree(t);
144                 return -ENOMEM;
145         }
146
147         if (alloc_targets(t, num_targets)) {
148                 kfree(t);
149                 return -ENOMEM;
150         }
151
152         t->type = DM_TYPE_NONE;
153         t->mode = mode;
154         t->md = md;
155         *result = t;
156         return 0;
157 }
158
159 static void free_devices(struct list_head *devices, struct mapped_device *md)
160 {
161         struct list_head *tmp, *next;
162
163         list_for_each_safe(tmp, next, devices) {
164                 struct dm_dev_internal *dd =
165                     list_entry(tmp, struct dm_dev_internal, list);
166                 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
167                        dm_device_name(md), dd->dm_dev->name);
168                 dm_put_table_device(md, dd->dm_dev);
169                 kfree(dd);
170         }
171 }
172
173 static void dm_table_destroy_crypto_profile(struct dm_table *t);
174
175 void dm_table_destroy(struct dm_table *t)
176 {
177         unsigned int i;
178
179         if (!t)
180                 return;
181
182         /* free the indexes */
183         if (t->depth >= 2)
184                 kvfree(t->index[t->depth - 2]);
185
186         /* free the targets */
187         for (i = 0; i < t->num_targets; i++) {
188                 struct dm_target *tgt = t->targets + i;
189
190                 if (tgt->type->dtr)
191                         tgt->type->dtr(tgt);
192
193                 dm_put_target_type(tgt->type);
194         }
195
196         kvfree(t->highs);
197
198         /* free the device list */
199         free_devices(&t->devices, t->md);
200
201         dm_free_md_mempools(t->mempools);
202
203         dm_table_destroy_crypto_profile(t);
204
205         kfree(t);
206 }
207
208 /*
209  * See if we've already got a device in the list.
210  */
211 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
212 {
213         struct dm_dev_internal *dd;
214
215         list_for_each_entry (dd, l, list)
216                 if (dd->dm_dev->bdev->bd_dev == dev)
217                         return dd;
218
219         return NULL;
220 }
221
222 /*
223  * If possible, this checks an area of a destination device is invalid.
224  */
225 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
226                                   sector_t start, sector_t len, void *data)
227 {
228         struct queue_limits *limits = data;
229         struct block_device *bdev = dev->bdev;
230         sector_t dev_size = bdev_nr_sectors(bdev);
231         unsigned short logical_block_size_sectors =
232                 limits->logical_block_size >> SECTOR_SHIFT;
233         char b[BDEVNAME_SIZE];
234
235         if (!dev_size)
236                 return 0;
237
238         if ((start >= dev_size) || (start + len > dev_size)) {
239                 DMWARN("%s: %s too small for target: "
240                        "start=%llu, len=%llu, dev_size=%llu",
241                        dm_device_name(ti->table->md), bdevname(bdev, b),
242                        (unsigned long long)start,
243                        (unsigned long long)len,
244                        (unsigned long long)dev_size);
245                 return 1;
246         }
247
248         /*
249          * If the target is mapped to zoned block device(s), check
250          * that the zones are not partially mapped.
251          */
252         if (bdev_is_zoned(bdev)) {
253                 unsigned int zone_sectors = bdev_zone_sectors(bdev);
254
255                 if (start & (zone_sectors - 1)) {
256                         DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
257                                dm_device_name(ti->table->md),
258                                (unsigned long long)start,
259                                zone_sectors, bdevname(bdev, b));
260                         return 1;
261                 }
262
263                 /*
264                  * Note: The last zone of a zoned block device may be smaller
265                  * than other zones. So for a target mapping the end of a
266                  * zoned block device with such a zone, len would not be zone
267                  * aligned. We do not allow such last smaller zone to be part
268                  * of the mapping here to ensure that mappings with multiple
269                  * devices do not end up with a smaller zone in the middle of
270                  * the sector range.
271                  */
272                 if (len & (zone_sectors - 1)) {
273                         DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
274                                dm_device_name(ti->table->md),
275                                (unsigned long long)len,
276                                zone_sectors, bdevname(bdev, b));
277                         return 1;
278                 }
279         }
280
281         if (logical_block_size_sectors <= 1)
282                 return 0;
283
284         if (start & (logical_block_size_sectors - 1)) {
285                 DMWARN("%s: start=%llu not aligned to h/w "
286                        "logical block size %u of %s",
287                        dm_device_name(ti->table->md),
288                        (unsigned long long)start,
289                        limits->logical_block_size, bdevname(bdev, b));
290                 return 1;
291         }
292
293         if (len & (logical_block_size_sectors - 1)) {
294                 DMWARN("%s: len=%llu not aligned to h/w "
295                        "logical block size %u of %s",
296                        dm_device_name(ti->table->md),
297                        (unsigned long long)len,
298                        limits->logical_block_size, bdevname(bdev, b));
299                 return 1;
300         }
301
302         return 0;
303 }
304
305 /*
306  * This upgrades the mode on an already open dm_dev, being
307  * careful to leave things as they were if we fail to reopen the
308  * device and not to touch the existing bdev field in case
309  * it is accessed concurrently.
310  */
311 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
312                         struct mapped_device *md)
313 {
314         int r;
315         struct dm_dev *old_dev, *new_dev;
316
317         old_dev = dd->dm_dev;
318
319         r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
320                                 dd->dm_dev->mode | new_mode, &new_dev);
321         if (r)
322                 return r;
323
324         dd->dm_dev = new_dev;
325         dm_put_table_device(md, old_dev);
326
327         return 0;
328 }
329
330 /*
331  * Convert the path to a device
332  */
333 dev_t dm_get_dev_t(const char *path)
334 {
335         dev_t dev;
336
337         if (lookup_bdev(path, &dev))
338                 dev = name_to_dev_t(path);
339         return dev;
340 }
341 EXPORT_SYMBOL_GPL(dm_get_dev_t);
342
343 /*
344  * Add a device to the list, or just increment the usage count if
345  * it's already present.
346  */
347 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
348                   struct dm_dev **result)
349 {
350         int r;
351         dev_t dev;
352         unsigned int major, minor;
353         char dummy;
354         struct dm_dev_internal *dd;
355         struct dm_table *t = ti->table;
356
357         BUG_ON(!t);
358
359         if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
360                 /* Extract the major/minor numbers */
361                 dev = MKDEV(major, minor);
362                 if (MAJOR(dev) != major || MINOR(dev) != minor)
363                         return -EOVERFLOW;
364         } else {
365                 dev = dm_get_dev_t(path);
366                 if (!dev)
367                         return -ENODEV;
368         }
369
370         dd = find_device(&t->devices, dev);
371         if (!dd) {
372                 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
373                 if (!dd)
374                         return -ENOMEM;
375
376                 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
377                         kfree(dd);
378                         return r;
379                 }
380
381                 refcount_set(&dd->count, 1);
382                 list_add(&dd->list, &t->devices);
383                 goto out;
384
385         } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
386                 r = upgrade_mode(dd, mode, t->md);
387                 if (r)
388                         return r;
389         }
390         refcount_inc(&dd->count);
391 out:
392         *result = dd->dm_dev;
393         return 0;
394 }
395 EXPORT_SYMBOL(dm_get_device);
396
397 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
398                                 sector_t start, sector_t len, void *data)
399 {
400         struct queue_limits *limits = data;
401         struct block_device *bdev = dev->bdev;
402         struct request_queue *q = bdev_get_queue(bdev);
403         char b[BDEVNAME_SIZE];
404
405         if (unlikely(!q)) {
406                 DMWARN("%s: Cannot set limits for nonexistent device %s",
407                        dm_device_name(ti->table->md), bdevname(bdev, b));
408                 return 0;
409         }
410
411         if (blk_stack_limits(limits, &q->limits,
412                         get_start_sect(bdev) + start) < 0)
413                 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
414                        "physical_block_size=%u, logical_block_size=%u, "
415                        "alignment_offset=%u, start=%llu",
416                        dm_device_name(ti->table->md), bdevname(bdev, b),
417                        q->limits.physical_block_size,
418                        q->limits.logical_block_size,
419                        q->limits.alignment_offset,
420                        (unsigned long long) start << SECTOR_SHIFT);
421         return 0;
422 }
423
424 /*
425  * Decrement a device's use count and remove it if necessary.
426  */
427 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
428 {
429         int found = 0;
430         struct list_head *devices = &ti->table->devices;
431         struct dm_dev_internal *dd;
432
433         list_for_each_entry(dd, devices, list) {
434                 if (dd->dm_dev == d) {
435                         found = 1;
436                         break;
437                 }
438         }
439         if (!found) {
440                 DMWARN("%s: device %s not in table devices list",
441                        dm_device_name(ti->table->md), d->name);
442                 return;
443         }
444         if (refcount_dec_and_test(&dd->count)) {
445                 dm_put_table_device(ti->table->md, d);
446                 list_del(&dd->list);
447                 kfree(dd);
448         }
449 }
450 EXPORT_SYMBOL(dm_put_device);
451
452 /*
453  * Checks to see if the target joins onto the end of the table.
454  */
455 static int adjoin(struct dm_table *table, struct dm_target *ti)
456 {
457         struct dm_target *prev;
458
459         if (!table->num_targets)
460                 return !ti->begin;
461
462         prev = &table->targets[table->num_targets - 1];
463         return (ti->begin == (prev->begin + prev->len));
464 }
465
466 /*
467  * Used to dynamically allocate the arg array.
468  *
469  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
470  * process messages even if some device is suspended. These messages have a
471  * small fixed number of arguments.
472  *
473  * On the other hand, dm-switch needs to process bulk data using messages and
474  * excessive use of GFP_NOIO could cause trouble.
475  */
476 static char **realloc_argv(unsigned *size, char **old_argv)
477 {
478         char **argv;
479         unsigned new_size;
480         gfp_t gfp;
481
482         if (*size) {
483                 new_size = *size * 2;
484                 gfp = GFP_KERNEL;
485         } else {
486                 new_size = 8;
487                 gfp = GFP_NOIO;
488         }
489         argv = kmalloc_array(new_size, sizeof(*argv), gfp);
490         if (argv && old_argv) {
491                 memcpy(argv, old_argv, *size * sizeof(*argv));
492                 *size = new_size;
493         }
494
495         kfree(old_argv);
496         return argv;
497 }
498
499 /*
500  * Destructively splits up the argument list to pass to ctr.
501  */
502 int dm_split_args(int *argc, char ***argvp, char *input)
503 {
504         char *start, *end = input, *out, **argv = NULL;
505         unsigned array_size = 0;
506
507         *argc = 0;
508
509         if (!input) {
510                 *argvp = NULL;
511                 return 0;
512         }
513
514         argv = realloc_argv(&array_size, argv);
515         if (!argv)
516                 return -ENOMEM;
517
518         while (1) {
519                 /* Skip whitespace */
520                 start = skip_spaces(end);
521
522                 if (!*start)
523                         break;  /* success, we hit the end */
524
525                 /* 'out' is used to remove any back-quotes */
526                 end = out = start;
527                 while (*end) {
528                         /* Everything apart from '\0' can be quoted */
529                         if (*end == '\\' && *(end + 1)) {
530                                 *out++ = *(end + 1);
531                                 end += 2;
532                                 continue;
533                         }
534
535                         if (isspace(*end))
536                                 break;  /* end of token */
537
538                         *out++ = *end++;
539                 }
540
541                 /* have we already filled the array ? */
542                 if ((*argc + 1) > array_size) {
543                         argv = realloc_argv(&array_size, argv);
544                         if (!argv)
545                                 return -ENOMEM;
546                 }
547
548                 /* we know this is whitespace */
549                 if (*end)
550                         end++;
551
552                 /* terminate the string and put it in the array */
553                 *out = '\0';
554                 argv[*argc] = start;
555                 (*argc)++;
556         }
557
558         *argvp = argv;
559         return 0;
560 }
561
562 /*
563  * Impose necessary and sufficient conditions on a devices's table such
564  * that any incoming bio which respects its logical_block_size can be
565  * processed successfully.  If it falls across the boundary between
566  * two or more targets, the size of each piece it gets split into must
567  * be compatible with the logical_block_size of the target processing it.
568  */
569 static int validate_hardware_logical_block_alignment(struct dm_table *table,
570                                                  struct queue_limits *limits)
571 {
572         /*
573          * This function uses arithmetic modulo the logical_block_size
574          * (in units of 512-byte sectors).
575          */
576         unsigned short device_logical_block_size_sects =
577                 limits->logical_block_size >> SECTOR_SHIFT;
578
579         /*
580          * Offset of the start of the next table entry, mod logical_block_size.
581          */
582         unsigned short next_target_start = 0;
583
584         /*
585          * Given an aligned bio that extends beyond the end of a
586          * target, how many sectors must the next target handle?
587          */
588         unsigned short remaining = 0;
589
590         struct dm_target *ti;
591         struct queue_limits ti_limits;
592         unsigned i;
593
594         /*
595          * Check each entry in the table in turn.
596          */
597         for (i = 0; i < dm_table_get_num_targets(table); i++) {
598                 ti = dm_table_get_target(table, i);
599
600                 blk_set_stacking_limits(&ti_limits);
601
602                 /* combine all target devices' limits */
603                 if (ti->type->iterate_devices)
604                         ti->type->iterate_devices(ti, dm_set_device_limits,
605                                                   &ti_limits);
606
607                 /*
608                  * If the remaining sectors fall entirely within this
609                  * table entry are they compatible with its logical_block_size?
610                  */
611                 if (remaining < ti->len &&
612                     remaining & ((ti_limits.logical_block_size >>
613                                   SECTOR_SHIFT) - 1))
614                         break;  /* Error */
615
616                 next_target_start =
617                     (unsigned short) ((next_target_start + ti->len) &
618                                       (device_logical_block_size_sects - 1));
619                 remaining = next_target_start ?
620                     device_logical_block_size_sects - next_target_start : 0;
621         }
622
623         if (remaining) {
624                 DMWARN("%s: table line %u (start sect %llu len %llu) "
625                        "not aligned to h/w logical block size %u",
626                        dm_device_name(table->md), i,
627                        (unsigned long long) ti->begin,
628                        (unsigned long long) ti->len,
629                        limits->logical_block_size);
630                 return -EINVAL;
631         }
632
633         return 0;
634 }
635
636 int dm_table_add_target(struct dm_table *t, const char *type,
637                         sector_t start, sector_t len, char *params)
638 {
639         int r = -EINVAL, argc;
640         char **argv;
641         struct dm_target *tgt;
642
643         if (t->singleton) {
644                 DMERR("%s: target type %s must appear alone in table",
645                       dm_device_name(t->md), t->targets->type->name);
646                 return -EINVAL;
647         }
648
649         BUG_ON(t->num_targets >= t->num_allocated);
650
651         tgt = t->targets + t->num_targets;
652         memset(tgt, 0, sizeof(*tgt));
653
654         if (!len) {
655                 DMERR("%s: zero-length target", dm_device_name(t->md));
656                 return -EINVAL;
657         }
658
659         tgt->type = dm_get_target_type(type);
660         if (!tgt->type) {
661                 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
662                 return -EINVAL;
663         }
664
665         if (dm_target_needs_singleton(tgt->type)) {
666                 if (t->num_targets) {
667                         tgt->error = "singleton target type must appear alone in table";
668                         goto bad;
669                 }
670                 t->singleton = true;
671         }
672
673         if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
674                 tgt->error = "target type may not be included in a read-only table";
675                 goto bad;
676         }
677
678         if (t->immutable_target_type) {
679                 if (t->immutable_target_type != tgt->type) {
680                         tgt->error = "immutable target type cannot be mixed with other target types";
681                         goto bad;
682                 }
683         } else if (dm_target_is_immutable(tgt->type)) {
684                 if (t->num_targets) {
685                         tgt->error = "immutable target type cannot be mixed with other target types";
686                         goto bad;
687                 }
688                 t->immutable_target_type = tgt->type;
689         }
690
691         if (dm_target_has_integrity(tgt->type))
692                 t->integrity_added = 1;
693
694         tgt->table = t;
695         tgt->begin = start;
696         tgt->len = len;
697         tgt->error = "Unknown error";
698
699         /*
700          * Does this target adjoin the previous one ?
701          */
702         if (!adjoin(t, tgt)) {
703                 tgt->error = "Gap in table";
704                 goto bad;
705         }
706
707         r = dm_split_args(&argc, &argv, params);
708         if (r) {
709                 tgt->error = "couldn't split parameters";
710                 goto bad;
711         }
712
713         r = tgt->type->ctr(tgt, argc, argv);
714         kfree(argv);
715         if (r)
716                 goto bad;
717
718         t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
719
720         if (!tgt->num_discard_bios && tgt->discards_supported)
721                 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
722                        dm_device_name(t->md), type);
723
724         return 0;
725
726  bad:
727         DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, tgt->error, ERR_PTR(r));
728         dm_put_target_type(tgt->type);
729         return r;
730 }
731
732 /*
733  * Target argument parsing helpers.
734  */
735 static int validate_next_arg(const struct dm_arg *arg,
736                              struct dm_arg_set *arg_set,
737                              unsigned *value, char **error, unsigned grouped)
738 {
739         const char *arg_str = dm_shift_arg(arg_set);
740         char dummy;
741
742         if (!arg_str ||
743             (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
744             (*value < arg->min) ||
745             (*value > arg->max) ||
746             (grouped && arg_set->argc < *value)) {
747                 *error = arg->error;
748                 return -EINVAL;
749         }
750
751         return 0;
752 }
753
754 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
755                 unsigned *value, char **error)
756 {
757         return validate_next_arg(arg, arg_set, value, error, 0);
758 }
759 EXPORT_SYMBOL(dm_read_arg);
760
761 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
762                       unsigned *value, char **error)
763 {
764         return validate_next_arg(arg, arg_set, value, error, 1);
765 }
766 EXPORT_SYMBOL(dm_read_arg_group);
767
768 const char *dm_shift_arg(struct dm_arg_set *as)
769 {
770         char *r;
771
772         if (as->argc) {
773                 as->argc--;
774                 r = *as->argv;
775                 as->argv++;
776                 return r;
777         }
778
779         return NULL;
780 }
781 EXPORT_SYMBOL(dm_shift_arg);
782
783 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
784 {
785         BUG_ON(as->argc < num_args);
786         as->argc -= num_args;
787         as->argv += num_args;
788 }
789 EXPORT_SYMBOL(dm_consume_args);
790
791 static bool __table_type_bio_based(enum dm_queue_mode table_type)
792 {
793         return (table_type == DM_TYPE_BIO_BASED ||
794                 table_type == DM_TYPE_DAX_BIO_BASED);
795 }
796
797 static bool __table_type_request_based(enum dm_queue_mode table_type)
798 {
799         return table_type == DM_TYPE_REQUEST_BASED;
800 }
801
802 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
803 {
804         t->type = type;
805 }
806 EXPORT_SYMBOL_GPL(dm_table_set_type);
807
808 /* validate the dax capability of the target device span */
809 static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
810                         sector_t start, sector_t len, void *data)
811 {
812         if (dev->dax_dev)
813                 return false;
814
815         DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
816         return true;
817 }
818
819 /* Check devices support synchronous DAX */
820 static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
821                                               sector_t start, sector_t len, void *data)
822 {
823         return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
824 }
825
826 static bool dm_table_supports_dax(struct dm_table *t,
827                            iterate_devices_callout_fn iterate_fn)
828 {
829         struct dm_target *ti;
830         unsigned i;
831
832         /* Ensure that all targets support DAX. */
833         for (i = 0; i < dm_table_get_num_targets(t); i++) {
834                 ti = dm_table_get_target(t, i);
835
836                 if (!ti->type->direct_access)
837                         return false;
838
839                 if (!ti->type->iterate_devices ||
840                     ti->type->iterate_devices(ti, iterate_fn, NULL))
841                         return false;
842         }
843
844         return true;
845 }
846
847 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
848                                   sector_t start, sector_t len, void *data)
849 {
850         struct block_device *bdev = dev->bdev;
851         struct request_queue *q = bdev_get_queue(bdev);
852
853         /* request-based cannot stack on partitions! */
854         if (bdev_is_partition(bdev))
855                 return false;
856
857         return queue_is_mq(q);
858 }
859
860 static int dm_table_determine_type(struct dm_table *t)
861 {
862         unsigned i;
863         unsigned bio_based = 0, request_based = 0, hybrid = 0;
864         struct dm_target *tgt;
865         struct list_head *devices = dm_table_get_devices(t);
866         enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
867
868         if (t->type != DM_TYPE_NONE) {
869                 /* target already set the table's type */
870                 if (t->type == DM_TYPE_BIO_BASED) {
871                         /* possibly upgrade to a variant of bio-based */
872                         goto verify_bio_based;
873                 }
874                 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
875                 goto verify_rq_based;
876         }
877
878         for (i = 0; i < t->num_targets; i++) {
879                 tgt = t->targets + i;
880                 if (dm_target_hybrid(tgt))
881                         hybrid = 1;
882                 else if (dm_target_request_based(tgt))
883                         request_based = 1;
884                 else
885                         bio_based = 1;
886
887                 if (bio_based && request_based) {
888                         DMERR("Inconsistent table: different target types"
889                               " can't be mixed up");
890                         return -EINVAL;
891                 }
892         }
893
894         if (hybrid && !bio_based && !request_based) {
895                 /*
896                  * The targets can work either way.
897                  * Determine the type from the live device.
898                  * Default to bio-based if device is new.
899                  */
900                 if (__table_type_request_based(live_md_type))
901                         request_based = 1;
902                 else
903                         bio_based = 1;
904         }
905
906         if (bio_based) {
907 verify_bio_based:
908                 /* We must use this table as bio-based */
909                 t->type = DM_TYPE_BIO_BASED;
910                 if (dm_table_supports_dax(t, device_not_dax_capable) ||
911                     (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
912                         t->type = DM_TYPE_DAX_BIO_BASED;
913                 }
914                 return 0;
915         }
916
917         BUG_ON(!request_based); /* No targets in this table */
918
919         t->type = DM_TYPE_REQUEST_BASED;
920
921 verify_rq_based:
922         /*
923          * Request-based dm supports only tables that have a single target now.
924          * To support multiple targets, request splitting support is needed,
925          * and that needs lots of changes in the block-layer.
926          * (e.g. request completion process for partial completion.)
927          */
928         if (t->num_targets > 1) {
929                 DMERR("request-based DM doesn't support multiple targets");
930                 return -EINVAL;
931         }
932
933         if (list_empty(devices)) {
934                 int srcu_idx;
935                 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
936
937                 /* inherit live table's type */
938                 if (live_table)
939                         t->type = live_table->type;
940                 dm_put_live_table(t->md, srcu_idx);
941                 return 0;
942         }
943
944         tgt = dm_table_get_immutable_target(t);
945         if (!tgt) {
946                 DMERR("table load rejected: immutable target is required");
947                 return -EINVAL;
948         } else if (tgt->max_io_len) {
949                 DMERR("table load rejected: immutable target that splits IO is not supported");
950                 return -EINVAL;
951         }
952
953         /* Non-request-stackable devices can't be used for request-based dm */
954         if (!tgt->type->iterate_devices ||
955             !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
956                 DMERR("table load rejected: including non-request-stackable devices");
957                 return -EINVAL;
958         }
959
960         return 0;
961 }
962
963 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
964 {
965         return t->type;
966 }
967
968 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
969 {
970         return t->immutable_target_type;
971 }
972
973 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
974 {
975         /* Immutable target is implicitly a singleton */
976         if (t->num_targets > 1 ||
977             !dm_target_is_immutable(t->targets[0].type))
978                 return NULL;
979
980         return t->targets;
981 }
982
983 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
984 {
985         struct dm_target *ti;
986         unsigned i;
987
988         for (i = 0; i < dm_table_get_num_targets(t); i++) {
989                 ti = dm_table_get_target(t, i);
990                 if (dm_target_is_wildcard(ti->type))
991                         return ti;
992         }
993
994         return NULL;
995 }
996
997 bool dm_table_bio_based(struct dm_table *t)
998 {
999         return __table_type_bio_based(dm_table_get_type(t));
1000 }
1001
1002 bool dm_table_request_based(struct dm_table *t)
1003 {
1004         return __table_type_request_based(dm_table_get_type(t));
1005 }
1006
1007 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1008 {
1009         enum dm_queue_mode type = dm_table_get_type(t);
1010         unsigned per_io_data_size = 0;
1011         unsigned min_pool_size = 0;
1012         struct dm_target *ti;
1013         unsigned i;
1014
1015         if (unlikely(type == DM_TYPE_NONE)) {
1016                 DMWARN("no table type is set, can't allocate mempools");
1017                 return -EINVAL;
1018         }
1019
1020         if (__table_type_bio_based(type))
1021                 for (i = 0; i < t->num_targets; i++) {
1022                         ti = t->targets + i;
1023                         per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1024                         min_pool_size = max(min_pool_size, ti->num_flush_bios);
1025                 }
1026
1027         t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1028                                            per_io_data_size, min_pool_size);
1029         if (!t->mempools)
1030                 return -ENOMEM;
1031
1032         return 0;
1033 }
1034
1035 void dm_table_free_md_mempools(struct dm_table *t)
1036 {
1037         dm_free_md_mempools(t->mempools);
1038         t->mempools = NULL;
1039 }
1040
1041 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1042 {
1043         return t->mempools;
1044 }
1045
1046 static int setup_indexes(struct dm_table *t)
1047 {
1048         int i;
1049         unsigned int total = 0;
1050         sector_t *indexes;
1051
1052         /* allocate the space for *all* the indexes */
1053         for (i = t->depth - 2; i >= 0; i--) {
1054                 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1055                 total += t->counts[i];
1056         }
1057
1058         indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1059         if (!indexes)
1060                 return -ENOMEM;
1061
1062         /* set up internal nodes, bottom-up */
1063         for (i = t->depth - 2; i >= 0; i--) {
1064                 t->index[i] = indexes;
1065                 indexes += (KEYS_PER_NODE * t->counts[i]);
1066                 setup_btree_index(i, t);
1067         }
1068
1069         return 0;
1070 }
1071
1072 /*
1073  * Builds the btree to index the map.
1074  */
1075 static int dm_table_build_index(struct dm_table *t)
1076 {
1077         int r = 0;
1078         unsigned int leaf_nodes;
1079
1080         /* how many indexes will the btree have ? */
1081         leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1082         t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1083
1084         /* leaf layer has already been set up */
1085         t->counts[t->depth - 1] = leaf_nodes;
1086         t->index[t->depth - 1] = t->highs;
1087
1088         if (t->depth >= 2)
1089                 r = setup_indexes(t);
1090
1091         return r;
1092 }
1093
1094 static bool integrity_profile_exists(struct gendisk *disk)
1095 {
1096         return !!blk_get_integrity(disk);
1097 }
1098
1099 /*
1100  * Get a disk whose integrity profile reflects the table's profile.
1101  * Returns NULL if integrity support was inconsistent or unavailable.
1102  */
1103 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1104 {
1105         struct list_head *devices = dm_table_get_devices(t);
1106         struct dm_dev_internal *dd = NULL;
1107         struct gendisk *prev_disk = NULL, *template_disk = NULL;
1108         unsigned i;
1109
1110         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1111                 struct dm_target *ti = dm_table_get_target(t, i);
1112                 if (!dm_target_passes_integrity(ti->type))
1113                         goto no_integrity;
1114         }
1115
1116         list_for_each_entry(dd, devices, list) {
1117                 template_disk = dd->dm_dev->bdev->bd_disk;
1118                 if (!integrity_profile_exists(template_disk))
1119                         goto no_integrity;
1120                 else if (prev_disk &&
1121                          blk_integrity_compare(prev_disk, template_disk) < 0)
1122                         goto no_integrity;
1123                 prev_disk = template_disk;
1124         }
1125
1126         return template_disk;
1127
1128 no_integrity:
1129         if (prev_disk)
1130                 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1131                        dm_device_name(t->md),
1132                        prev_disk->disk_name,
1133                        template_disk->disk_name);
1134         return NULL;
1135 }
1136
1137 /*
1138  * Register the mapped device for blk_integrity support if the
1139  * underlying devices have an integrity profile.  But all devices may
1140  * not have matching profiles (checking all devices isn't reliable
1141  * during table load because this table may use other DM device(s) which
1142  * must be resumed before they will have an initialized integity
1143  * profile).  Consequently, stacked DM devices force a 2 stage integrity
1144  * profile validation: First pass during table load, final pass during
1145  * resume.
1146  */
1147 static int dm_table_register_integrity(struct dm_table *t)
1148 {
1149         struct mapped_device *md = t->md;
1150         struct gendisk *template_disk = NULL;
1151
1152         /* If target handles integrity itself do not register it here. */
1153         if (t->integrity_added)
1154                 return 0;
1155
1156         template_disk = dm_table_get_integrity_disk(t);
1157         if (!template_disk)
1158                 return 0;
1159
1160         if (!integrity_profile_exists(dm_disk(md))) {
1161                 t->integrity_supported = true;
1162                 /*
1163                  * Register integrity profile during table load; we can do
1164                  * this because the final profile must match during resume.
1165                  */
1166                 blk_integrity_register(dm_disk(md),
1167                                        blk_get_integrity(template_disk));
1168                 return 0;
1169         }
1170
1171         /*
1172          * If DM device already has an initialized integrity
1173          * profile the new profile should not conflict.
1174          */
1175         if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1176                 DMWARN("%s: conflict with existing integrity profile: "
1177                        "%s profile mismatch",
1178                        dm_device_name(t->md),
1179                        template_disk->disk_name);
1180                 return 1;
1181         }
1182
1183         /* Preserve existing integrity profile */
1184         t->integrity_supported = true;
1185         return 0;
1186 }
1187
1188 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1189
1190 struct dm_crypto_profile {
1191         struct blk_crypto_profile profile;
1192         struct mapped_device *md;
1193 };
1194
1195 struct dm_keyslot_evict_args {
1196         const struct blk_crypto_key *key;
1197         int err;
1198 };
1199
1200 static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1201                                      sector_t start, sector_t len, void *data)
1202 {
1203         struct dm_keyslot_evict_args *args = data;
1204         int err;
1205
1206         err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
1207         if (!args->err)
1208                 args->err = err;
1209         /* Always try to evict the key from all devices. */
1210         return 0;
1211 }
1212
1213 /*
1214  * When an inline encryption key is evicted from a device-mapper device, evict
1215  * it from all the underlying devices.
1216  */
1217 static int dm_keyslot_evict(struct blk_crypto_profile *profile,
1218                             const struct blk_crypto_key *key, unsigned int slot)
1219 {
1220         struct mapped_device *md =
1221                 container_of(profile, struct dm_crypto_profile, profile)->md;
1222         struct dm_keyslot_evict_args args = { key };
1223         struct dm_table *t;
1224         int srcu_idx;
1225         int i;
1226         struct dm_target *ti;
1227
1228         t = dm_get_live_table(md, &srcu_idx);
1229         if (!t)
1230                 return 0;
1231         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1232                 ti = dm_table_get_target(t, i);
1233                 if (!ti->type->iterate_devices)
1234                         continue;
1235                 ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
1236         }
1237         dm_put_live_table(md, srcu_idx);
1238         return args.err;
1239 }
1240
1241 static int
1242 device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
1243                                      sector_t start, sector_t len, void *data)
1244 {
1245         struct blk_crypto_profile *parent = data;
1246         struct blk_crypto_profile *child =
1247                 bdev_get_queue(dev->bdev)->crypto_profile;
1248
1249         blk_crypto_intersect_capabilities(parent, child);
1250         return 0;
1251 }
1252
1253 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1254 {
1255         struct dm_crypto_profile *dmcp = container_of(profile,
1256                                                       struct dm_crypto_profile,
1257                                                       profile);
1258
1259         if (!profile)
1260                 return;
1261
1262         blk_crypto_profile_destroy(profile);
1263         kfree(dmcp);
1264 }
1265
1266 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1267 {
1268         dm_destroy_crypto_profile(t->crypto_profile);
1269         t->crypto_profile = NULL;
1270 }
1271
1272 /*
1273  * Constructs and initializes t->crypto_profile with a crypto profile that
1274  * represents the common set of crypto capabilities of the devices described by
1275  * the dm_table.  However, if the constructed crypto profile doesn't support all
1276  * crypto capabilities that are supported by the current mapped_device, it
1277  * returns an error instead, since we don't support removing crypto capabilities
1278  * on table changes.  Finally, if the constructed crypto profile is "empty" (has
1279  * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
1280  */
1281 static int dm_table_construct_crypto_profile(struct dm_table *t)
1282 {
1283         struct dm_crypto_profile *dmcp;
1284         struct blk_crypto_profile *profile;
1285         struct dm_target *ti;
1286         unsigned int i;
1287         bool empty_profile = true;
1288
1289         dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
1290         if (!dmcp)
1291                 return -ENOMEM;
1292         dmcp->md = t->md;
1293
1294         profile = &dmcp->profile;
1295         blk_crypto_profile_init(profile, 0);
1296         profile->ll_ops.keyslot_evict = dm_keyslot_evict;
1297         profile->max_dun_bytes_supported = UINT_MAX;
1298         memset(profile->modes_supported, 0xFF,
1299                sizeof(profile->modes_supported));
1300
1301         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1302                 ti = dm_table_get_target(t, i);
1303
1304                 if (!dm_target_passes_crypto(ti->type)) {
1305                         blk_crypto_intersect_capabilities(profile, NULL);
1306                         break;
1307                 }
1308                 if (!ti->type->iterate_devices)
1309                         continue;
1310                 ti->type->iterate_devices(ti,
1311                                           device_intersect_crypto_capabilities,
1312                                           profile);
1313         }
1314
1315         if (t->md->queue &&
1316             !blk_crypto_has_capabilities(profile,
1317                                          t->md->queue->crypto_profile)) {
1318                 DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1319                 dm_destroy_crypto_profile(profile);
1320                 return -EINVAL;
1321         }
1322
1323         /*
1324          * If the new profile doesn't actually support any crypto capabilities,
1325          * we may as well represent it with a NULL profile.
1326          */
1327         for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
1328                 if (profile->modes_supported[i]) {
1329                         empty_profile = false;
1330                         break;
1331                 }
1332         }
1333
1334         if (empty_profile) {
1335                 dm_destroy_crypto_profile(profile);
1336                 profile = NULL;
1337         }
1338
1339         /*
1340          * t->crypto_profile is only set temporarily while the table is being
1341          * set up, and it gets set to NULL after the profile has been
1342          * transferred to the request_queue.
1343          */
1344         t->crypto_profile = profile;
1345
1346         return 0;
1347 }
1348
1349 static void dm_update_crypto_profile(struct request_queue *q,
1350                                      struct dm_table *t)
1351 {
1352         if (!t->crypto_profile)
1353                 return;
1354
1355         /* Make the crypto profile less restrictive. */
1356         if (!q->crypto_profile) {
1357                 blk_crypto_register(t->crypto_profile, q);
1358         } else {
1359                 blk_crypto_update_capabilities(q->crypto_profile,
1360                                                t->crypto_profile);
1361                 dm_destroy_crypto_profile(t->crypto_profile);
1362         }
1363         t->crypto_profile = NULL;
1364 }
1365
1366 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1367
1368 static int dm_table_construct_crypto_profile(struct dm_table *t)
1369 {
1370         return 0;
1371 }
1372
1373 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1374 {
1375 }
1376
1377 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1378 {
1379 }
1380
1381 static void dm_update_crypto_profile(struct request_queue *q,
1382                                      struct dm_table *t)
1383 {
1384 }
1385
1386 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1387
1388 /*
1389  * Prepares the table for use by building the indices,
1390  * setting the type, and allocating mempools.
1391  */
1392 int dm_table_complete(struct dm_table *t)
1393 {
1394         int r;
1395
1396         r = dm_table_determine_type(t);
1397         if (r) {
1398                 DMERR("unable to determine table type");
1399                 return r;
1400         }
1401
1402         r = dm_table_build_index(t);
1403         if (r) {
1404                 DMERR("unable to build btrees");
1405                 return r;
1406         }
1407
1408         r = dm_table_register_integrity(t);
1409         if (r) {
1410                 DMERR("could not register integrity profile.");
1411                 return r;
1412         }
1413
1414         r = dm_table_construct_crypto_profile(t);
1415         if (r) {
1416                 DMERR("could not construct crypto profile.");
1417                 return r;
1418         }
1419
1420         r = dm_table_alloc_md_mempools(t, t->md);
1421         if (r)
1422                 DMERR("unable to allocate mempools");
1423
1424         return r;
1425 }
1426
1427 static DEFINE_MUTEX(_event_lock);
1428 void dm_table_event_callback(struct dm_table *t,
1429                              void (*fn)(void *), void *context)
1430 {
1431         mutex_lock(&_event_lock);
1432         t->event_fn = fn;
1433         t->event_context = context;
1434         mutex_unlock(&_event_lock);
1435 }
1436
1437 void dm_table_event(struct dm_table *t)
1438 {
1439         mutex_lock(&_event_lock);
1440         if (t->event_fn)
1441                 t->event_fn(t->event_context);
1442         mutex_unlock(&_event_lock);
1443 }
1444 EXPORT_SYMBOL(dm_table_event);
1445
1446 inline sector_t dm_table_get_size(struct dm_table *t)
1447 {
1448         return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1449 }
1450 EXPORT_SYMBOL(dm_table_get_size);
1451
1452 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1453 {
1454         if (index >= t->num_targets)
1455                 return NULL;
1456
1457         return t->targets + index;
1458 }
1459
1460 /*
1461  * Search the btree for the correct target.
1462  *
1463  * Caller should check returned pointer for NULL
1464  * to trap I/O beyond end of device.
1465  */
1466 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1467 {
1468         unsigned int l, n = 0, k = 0;
1469         sector_t *node;
1470
1471         if (unlikely(sector >= dm_table_get_size(t)))
1472                 return NULL;
1473
1474         for (l = 0; l < t->depth; l++) {
1475                 n = get_child(n, k);
1476                 node = get_node(t, l, n);
1477
1478                 for (k = 0; k < KEYS_PER_NODE; k++)
1479                         if (node[k] >= sector)
1480                                 break;
1481         }
1482
1483         return &t->targets[(KEYS_PER_NODE * n) + k];
1484 }
1485
1486 /*
1487  * type->iterate_devices() should be called when the sanity check needs to
1488  * iterate and check all underlying data devices. iterate_devices() will
1489  * iterate all underlying data devices until it encounters a non-zero return
1490  * code, returned by whether the input iterate_devices_callout_fn, or
1491  * iterate_devices() itself internally.
1492  *
1493  * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1494  * iterate multiple underlying devices internally, in which case a non-zero
1495  * return code returned by iterate_devices_callout_fn will stop the iteration
1496  * in advance.
1497  *
1498  * Cases requiring _any_ underlying device supporting some kind of attribute,
1499  * should use the iteration structure like dm_table_any_dev_attr(), or call
1500  * it directly. @func should handle semantics of positive examples, e.g.
1501  * capable of something.
1502  *
1503  * Cases requiring _all_ underlying devices supporting some kind of attribute,
1504  * should use the iteration structure like dm_table_supports_nowait() or
1505  * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1506  * uses an @anti_func that handle semantics of counter examples, e.g. not
1507  * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1508  */
1509 static bool dm_table_any_dev_attr(struct dm_table *t,
1510                                   iterate_devices_callout_fn func, void *data)
1511 {
1512         struct dm_target *ti;
1513         unsigned int i;
1514
1515         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1516                 ti = dm_table_get_target(t, i);
1517
1518                 if (ti->type->iterate_devices &&
1519                     ti->type->iterate_devices(ti, func, data))
1520                         return true;
1521         }
1522
1523         return false;
1524 }
1525
1526 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1527                         sector_t start, sector_t len, void *data)
1528 {
1529         unsigned *num_devices = data;
1530
1531         (*num_devices)++;
1532
1533         return 0;
1534 }
1535
1536 /*
1537  * Check whether a table has no data devices attached using each
1538  * target's iterate_devices method.
1539  * Returns false if the result is unknown because a target doesn't
1540  * support iterate_devices.
1541  */
1542 bool dm_table_has_no_data_devices(struct dm_table *table)
1543 {
1544         struct dm_target *ti;
1545         unsigned i, num_devices;
1546
1547         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1548                 ti = dm_table_get_target(table, i);
1549
1550                 if (!ti->type->iterate_devices)
1551                         return false;
1552
1553                 num_devices = 0;
1554                 ti->type->iterate_devices(ti, count_device, &num_devices);
1555                 if (num_devices)
1556                         return false;
1557         }
1558
1559         return true;
1560 }
1561
1562 static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1563                                   sector_t start, sector_t len, void *data)
1564 {
1565         struct request_queue *q = bdev_get_queue(dev->bdev);
1566         enum blk_zoned_model *zoned_model = data;
1567
1568         return blk_queue_zoned_model(q) != *zoned_model;
1569 }
1570
1571 /*
1572  * Check the device zoned model based on the target feature flag. If the target
1573  * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1574  * also accepted but all devices must have the same zoned model. If the target
1575  * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1576  * zoned model with all zoned devices having the same zone size.
1577  */
1578 static bool dm_table_supports_zoned_model(struct dm_table *t,
1579                                           enum blk_zoned_model zoned_model)
1580 {
1581         struct dm_target *ti;
1582         unsigned i;
1583
1584         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1585                 ti = dm_table_get_target(t, i);
1586
1587                 if (dm_target_supports_zoned_hm(ti->type)) {
1588                         if (!ti->type->iterate_devices ||
1589                             ti->type->iterate_devices(ti, device_not_zoned_model,
1590                                                       &zoned_model))
1591                                 return false;
1592                 } else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1593                         if (zoned_model == BLK_ZONED_HM)
1594                                 return false;
1595                 }
1596         }
1597
1598         return true;
1599 }
1600
1601 static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1602                                            sector_t start, sector_t len, void *data)
1603 {
1604         struct request_queue *q = bdev_get_queue(dev->bdev);
1605         unsigned int *zone_sectors = data;
1606
1607         if (!blk_queue_is_zoned(q))
1608                 return 0;
1609
1610         return blk_queue_zone_sectors(q) != *zone_sectors;
1611 }
1612
1613 /*
1614  * Check consistency of zoned model and zone sectors across all targets. For
1615  * zone sectors, if the destination device is a zoned block device, it shall
1616  * have the specified zone_sectors.
1617  */
1618 static int validate_hardware_zoned_model(struct dm_table *table,
1619                                          enum blk_zoned_model zoned_model,
1620                                          unsigned int zone_sectors)
1621 {
1622         if (zoned_model == BLK_ZONED_NONE)
1623                 return 0;
1624
1625         if (!dm_table_supports_zoned_model(table, zoned_model)) {
1626                 DMERR("%s: zoned model is not consistent across all devices",
1627                       dm_device_name(table->md));
1628                 return -EINVAL;
1629         }
1630
1631         /* Check zone size validity and compatibility */
1632         if (!zone_sectors || !is_power_of_2(zone_sectors))
1633                 return -EINVAL;
1634
1635         if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
1636                 DMERR("%s: zone sectors is not consistent across all zoned devices",
1637                       dm_device_name(table->md));
1638                 return -EINVAL;
1639         }
1640
1641         return 0;
1642 }
1643
1644 /*
1645  * Establish the new table's queue_limits and validate them.
1646  */
1647 int dm_calculate_queue_limits(struct dm_table *table,
1648                               struct queue_limits *limits)
1649 {
1650         struct dm_target *ti;
1651         struct queue_limits ti_limits;
1652         unsigned i;
1653         enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1654         unsigned int zone_sectors = 0;
1655
1656         blk_set_stacking_limits(limits);
1657
1658         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1659                 blk_set_stacking_limits(&ti_limits);
1660
1661                 ti = dm_table_get_target(table, i);
1662
1663                 if (!ti->type->iterate_devices)
1664                         goto combine_limits;
1665
1666                 /*
1667                  * Combine queue limits of all the devices this target uses.
1668                  */
1669                 ti->type->iterate_devices(ti, dm_set_device_limits,
1670                                           &ti_limits);
1671
1672                 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1673                         /*
1674                          * After stacking all limits, validate all devices
1675                          * in table support this zoned model and zone sectors.
1676                          */
1677                         zoned_model = ti_limits.zoned;
1678                         zone_sectors = ti_limits.chunk_sectors;
1679                 }
1680
1681                 /* Set I/O hints portion of queue limits */
1682                 if (ti->type->io_hints)
1683                         ti->type->io_hints(ti, &ti_limits);
1684
1685                 /*
1686                  * Check each device area is consistent with the target's
1687                  * overall queue limits.
1688                  */
1689                 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1690                                               &ti_limits))
1691                         return -EINVAL;
1692
1693 combine_limits:
1694                 /*
1695                  * Merge this target's queue limits into the overall limits
1696                  * for the table.
1697                  */
1698                 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1699                         DMWARN("%s: adding target device "
1700                                "(start sect %llu len %llu) "
1701                                "caused an alignment inconsistency",
1702                                dm_device_name(table->md),
1703                                (unsigned long long) ti->begin,
1704                                (unsigned long long) ti->len);
1705         }
1706
1707         /*
1708          * Verify that the zoned model and zone sectors, as determined before
1709          * any .io_hints override, are the same across all devices in the table.
1710          * - this is especially relevant if .io_hints is emulating a disk-managed
1711          *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1712          * BUT...
1713          */
1714         if (limits->zoned != BLK_ZONED_NONE) {
1715                 /*
1716                  * ...IF the above limits stacking determined a zoned model
1717                  * validate that all of the table's devices conform to it.
1718                  */
1719                 zoned_model = limits->zoned;
1720                 zone_sectors = limits->chunk_sectors;
1721         }
1722         if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1723                 return -EINVAL;
1724
1725         return validate_hardware_logical_block_alignment(table, limits);
1726 }
1727
1728 /*
1729  * Verify that all devices have an integrity profile that matches the
1730  * DM device's registered integrity profile.  If the profiles don't
1731  * match then unregister the DM device's integrity profile.
1732  */
1733 static void dm_table_verify_integrity(struct dm_table *t)
1734 {
1735         struct gendisk *template_disk = NULL;
1736
1737         if (t->integrity_added)
1738                 return;
1739
1740         if (t->integrity_supported) {
1741                 /*
1742                  * Verify that the original integrity profile
1743                  * matches all the devices in this table.
1744                  */
1745                 template_disk = dm_table_get_integrity_disk(t);
1746                 if (template_disk &&
1747                     blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1748                         return;
1749         }
1750
1751         if (integrity_profile_exists(dm_disk(t->md))) {
1752                 DMWARN("%s: unable to establish an integrity profile",
1753                        dm_device_name(t->md));
1754                 blk_integrity_unregister(dm_disk(t->md));
1755         }
1756 }
1757
1758 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1759                                 sector_t start, sector_t len, void *data)
1760 {
1761         unsigned long flush = (unsigned long) data;
1762         struct request_queue *q = bdev_get_queue(dev->bdev);
1763
1764         return (q->queue_flags & flush);
1765 }
1766
1767 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1768 {
1769         struct dm_target *ti;
1770         unsigned i;
1771
1772         /*
1773          * Require at least one underlying device to support flushes.
1774          * t->devices includes internal dm devices such as mirror logs
1775          * so we need to use iterate_devices here, which targets
1776          * supporting flushes must provide.
1777          */
1778         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1779                 ti = dm_table_get_target(t, i);
1780
1781                 if (!ti->num_flush_bios)
1782                         continue;
1783
1784                 if (ti->flush_supported)
1785                         return true;
1786
1787                 if (ti->type->iterate_devices &&
1788                     ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1789                         return true;
1790         }
1791
1792         return false;
1793 }
1794
1795 static int device_dax_write_cache_enabled(struct dm_target *ti,
1796                                           struct dm_dev *dev, sector_t start,
1797                                           sector_t len, void *data)
1798 {
1799         struct dax_device *dax_dev = dev->dax_dev;
1800
1801         if (!dax_dev)
1802                 return false;
1803
1804         if (dax_write_cache_enabled(dax_dev))
1805                 return true;
1806         return false;
1807 }
1808
1809 static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1810                                 sector_t start, sector_t len, void *data)
1811 {
1812         struct request_queue *q = bdev_get_queue(dev->bdev);
1813
1814         return !blk_queue_nonrot(q);
1815 }
1816
1817 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1818                              sector_t start, sector_t len, void *data)
1819 {
1820         struct request_queue *q = bdev_get_queue(dev->bdev);
1821
1822         return !blk_queue_add_random(q);
1823 }
1824
1825 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1826                                          sector_t start, sector_t len, void *data)
1827 {
1828         struct request_queue *q = bdev_get_queue(dev->bdev);
1829
1830         return !q->limits.max_write_same_sectors;
1831 }
1832
1833 static bool dm_table_supports_write_same(struct dm_table *t)
1834 {
1835         struct dm_target *ti;
1836         unsigned i;
1837
1838         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1839                 ti = dm_table_get_target(t, i);
1840
1841                 if (!ti->num_write_same_bios)
1842                         return false;
1843
1844                 if (!ti->type->iterate_devices ||
1845                     ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1846                         return false;
1847         }
1848
1849         return true;
1850 }
1851
1852 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1853                                            sector_t start, sector_t len, void *data)
1854 {
1855         struct request_queue *q = bdev_get_queue(dev->bdev);
1856
1857         return !q->limits.max_write_zeroes_sectors;
1858 }
1859
1860 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1861 {
1862         struct dm_target *ti;
1863         unsigned i = 0;
1864
1865         while (i < dm_table_get_num_targets(t)) {
1866                 ti = dm_table_get_target(t, i++);
1867
1868                 if (!ti->num_write_zeroes_bios)
1869                         return false;
1870
1871                 if (!ti->type->iterate_devices ||
1872                     ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1873                         return false;
1874         }
1875
1876         return true;
1877 }
1878
1879 static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1880                                      sector_t start, sector_t len, void *data)
1881 {
1882         struct request_queue *q = bdev_get_queue(dev->bdev);
1883
1884         return !blk_queue_nowait(q);
1885 }
1886
1887 static bool dm_table_supports_nowait(struct dm_table *t)
1888 {
1889         struct dm_target *ti;
1890         unsigned i = 0;
1891
1892         while (i < dm_table_get_num_targets(t)) {
1893                 ti = dm_table_get_target(t, i++);
1894
1895                 if (!dm_target_supports_nowait(ti->type))
1896                         return false;
1897
1898                 if (!ti->type->iterate_devices ||
1899                     ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1900                         return false;
1901         }
1902
1903         return true;
1904 }
1905
1906 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1907                                       sector_t start, sector_t len, void *data)
1908 {
1909         struct request_queue *q = bdev_get_queue(dev->bdev);
1910
1911         return !blk_queue_discard(q);
1912 }
1913
1914 static bool dm_table_supports_discards(struct dm_table *t)
1915 {
1916         struct dm_target *ti;
1917         unsigned i;
1918
1919         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1920                 ti = dm_table_get_target(t, i);
1921
1922                 if (!ti->num_discard_bios)
1923                         return false;
1924
1925                 /*
1926                  * Either the target provides discard support (as implied by setting
1927                  * 'discards_supported') or it relies on _all_ data devices having
1928                  * discard support.
1929                  */
1930                 if (!ti->discards_supported &&
1931                     (!ti->type->iterate_devices ||
1932                      ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1933                         return false;
1934         }
1935
1936         return true;
1937 }
1938
1939 static int device_not_secure_erase_capable(struct dm_target *ti,
1940                                            struct dm_dev *dev, sector_t start,
1941                                            sector_t len, void *data)
1942 {
1943         struct request_queue *q = bdev_get_queue(dev->bdev);
1944
1945         return !blk_queue_secure_erase(q);
1946 }
1947
1948 static bool dm_table_supports_secure_erase(struct dm_table *t)
1949 {
1950         struct dm_target *ti;
1951         unsigned int i;
1952
1953         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1954                 ti = dm_table_get_target(t, i);
1955
1956                 if (!ti->num_secure_erase_bios)
1957                         return false;
1958
1959                 if (!ti->type->iterate_devices ||
1960                     ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1961                         return false;
1962         }
1963
1964         return true;
1965 }
1966
1967 static int device_requires_stable_pages(struct dm_target *ti,
1968                                         struct dm_dev *dev, sector_t start,
1969                                         sector_t len, void *data)
1970 {
1971         struct request_queue *q = bdev_get_queue(dev->bdev);
1972
1973         return blk_queue_stable_writes(q);
1974 }
1975
1976 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1977                               struct queue_limits *limits)
1978 {
1979         bool wc = false, fua = false;
1980         int r;
1981
1982         /*
1983          * Copy table's limits to the DM device's request_queue
1984          */
1985         q->limits = *limits;
1986
1987         if (dm_table_supports_nowait(t))
1988                 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1989         else
1990                 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
1991
1992         if (!dm_table_supports_discards(t)) {
1993                 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1994                 /* Must also clear discard limits... */
1995                 q->limits.max_discard_sectors = 0;
1996                 q->limits.max_hw_discard_sectors = 0;
1997                 q->limits.discard_granularity = 0;
1998                 q->limits.discard_alignment = 0;
1999                 q->limits.discard_misaligned = 0;
2000         } else
2001                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
2002
2003         if (dm_table_supports_secure_erase(t))
2004                 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
2005
2006         if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
2007                 wc = true;
2008                 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
2009                         fua = true;
2010         }
2011         blk_queue_write_cache(q, wc, fua);
2012
2013         if (dm_table_supports_dax(t, device_not_dax_capable)) {
2014                 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
2015                 if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
2016                         set_dax_synchronous(t->md->dax_dev);
2017         }
2018         else
2019                 blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
2020
2021         if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
2022                 dax_write_cache(t->md->dax_dev, true);
2023
2024         /* Ensure that all underlying devices are non-rotational. */
2025         if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
2026                 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2027         else
2028                 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2029
2030         if (!dm_table_supports_write_same(t))
2031                 q->limits.max_write_same_sectors = 0;
2032         if (!dm_table_supports_write_zeroes(t))
2033                 q->limits.max_write_zeroes_sectors = 0;
2034
2035         dm_table_verify_integrity(t);
2036
2037         /*
2038          * Some devices don't use blk_integrity but still want stable pages
2039          * because they do their own checksumming.
2040          * If any underlying device requires stable pages, a table must require
2041          * them as well.  Only targets that support iterate_devices are considered:
2042          * don't want error, zero, etc to require stable pages.
2043          */
2044         if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
2045                 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
2046         else
2047                 blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
2048
2049         /*
2050          * Determine whether or not this queue's I/O timings contribute
2051          * to the entropy pool, Only request-based targets use this.
2052          * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2053          * have it set.
2054          */
2055         if (blk_queue_add_random(q) &&
2056             dm_table_any_dev_attr(t, device_is_not_random, NULL))
2057                 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2058
2059         /*
2060          * For a zoned target, setup the zones related queue attributes
2061          * and resources necessary for zone append emulation if necessary.
2062          */
2063         if (blk_queue_is_zoned(q)) {
2064                 r = dm_set_zones_restrictions(t, q);
2065                 if (r)
2066                         return r;
2067         }
2068
2069         dm_update_crypto_profile(q, t);
2070         disk_update_readahead(t->md->disk);
2071
2072         return 0;
2073 }
2074
2075 unsigned int dm_table_get_num_targets(struct dm_table *t)
2076 {
2077         return t->num_targets;
2078 }
2079
2080 struct list_head *dm_table_get_devices(struct dm_table *t)
2081 {
2082         return &t->devices;
2083 }
2084
2085 fmode_t dm_table_get_mode(struct dm_table *t)
2086 {
2087         return t->mode;
2088 }
2089 EXPORT_SYMBOL(dm_table_get_mode);
2090
2091 enum suspend_mode {
2092         PRESUSPEND,
2093         PRESUSPEND_UNDO,
2094         POSTSUSPEND,
2095 };
2096
2097 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2098 {
2099         int i = t->num_targets;
2100         struct dm_target *ti = t->targets;
2101
2102         lockdep_assert_held(&t->md->suspend_lock);
2103
2104         while (i--) {
2105                 switch (mode) {
2106                 case PRESUSPEND:
2107                         if (ti->type->presuspend)
2108                                 ti->type->presuspend(ti);
2109                         break;
2110                 case PRESUSPEND_UNDO:
2111                         if (ti->type->presuspend_undo)
2112                                 ti->type->presuspend_undo(ti);
2113                         break;
2114                 case POSTSUSPEND:
2115                         if (ti->type->postsuspend)
2116                                 ti->type->postsuspend(ti);
2117                         break;
2118                 }
2119                 ti++;
2120         }
2121 }
2122
2123 void dm_table_presuspend_targets(struct dm_table *t)
2124 {
2125         if (!t)
2126                 return;
2127
2128         suspend_targets(t, PRESUSPEND);
2129 }
2130
2131 void dm_table_presuspend_undo_targets(struct dm_table *t)
2132 {
2133         if (!t)
2134                 return;
2135
2136         suspend_targets(t, PRESUSPEND_UNDO);
2137 }
2138
2139 void dm_table_postsuspend_targets(struct dm_table *t)
2140 {
2141         if (!t)
2142                 return;
2143
2144         suspend_targets(t, POSTSUSPEND);
2145 }
2146
2147 int dm_table_resume_targets(struct dm_table *t)
2148 {
2149         int i, r = 0;
2150
2151         lockdep_assert_held(&t->md->suspend_lock);
2152
2153         for (i = 0; i < t->num_targets; i++) {
2154                 struct dm_target *ti = t->targets + i;
2155
2156                 if (!ti->type->preresume)
2157                         continue;
2158
2159                 r = ti->type->preresume(ti);
2160                 if (r) {
2161                         DMERR("%s: %s: preresume failed, error = %d",
2162                               dm_device_name(t->md), ti->type->name, r);
2163                         return r;
2164                 }
2165         }
2166
2167         for (i = 0; i < t->num_targets; i++) {
2168                 struct dm_target *ti = t->targets + i;
2169
2170                 if (ti->type->resume)
2171                         ti->type->resume(ti);
2172         }
2173
2174         return 0;
2175 }
2176
2177 struct mapped_device *dm_table_get_md(struct dm_table *t)
2178 {
2179         return t->md;
2180 }
2181 EXPORT_SYMBOL(dm_table_get_md);
2182
2183 const char *dm_table_device_name(struct dm_table *t)
2184 {
2185         return dm_device_name(t->md);
2186 }
2187 EXPORT_SYMBOL_GPL(dm_table_device_name);
2188
2189 void dm_table_run_md_queue_async(struct dm_table *t)
2190 {
2191         if (!dm_table_request_based(t))
2192                 return;
2193
2194         if (t->md->queue)
2195                 blk_mq_run_hw_queues(t->md->queue, true);
2196 }
2197 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2198