tty: n_gsm: Debug output allocation must use GFP_ATOMIC
[linux-2.6-microblaze.git] / drivers / md / dm-table.c
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-core.h"
9
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/namei.h>
15 #include <linux/ctype.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include <linux/blk-mq.h>
23 #include <linux/mount.h>
24 #include <linux/dax.h>
25
26 #define DM_MSG_PREFIX "table"
27
28 #define NODE_SIZE L1_CACHE_BYTES
29 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
30 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
31
32 /*
33  * Similar to ceiling(log_size(n))
34  */
35 static unsigned int int_log(unsigned int n, unsigned int base)
36 {
37         int result = 0;
38
39         while (n > 1) {
40                 n = dm_div_up(n, base);
41                 result++;
42         }
43
44         return result;
45 }
46
47 /*
48  * Calculate the index of the child node of the n'th node k'th key.
49  */
50 static inline unsigned int get_child(unsigned int n, unsigned int k)
51 {
52         return (n * CHILDREN_PER_NODE) + k;
53 }
54
55 /*
56  * Return the n'th node of level l from table t.
57  */
58 static inline sector_t *get_node(struct dm_table *t,
59                                  unsigned int l, unsigned int n)
60 {
61         return t->index[l] + (n * KEYS_PER_NODE);
62 }
63
64 /*
65  * Return the highest key that you could lookup from the n'th
66  * node on level l of the btree.
67  */
68 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
69 {
70         for (; l < t->depth - 1; l++)
71                 n = get_child(n, CHILDREN_PER_NODE - 1);
72
73         if (n >= t->counts[l])
74                 return (sector_t) - 1;
75
76         return get_node(t, l, n)[KEYS_PER_NODE - 1];
77 }
78
79 /*
80  * Fills in a level of the btree based on the highs of the level
81  * below it.
82  */
83 static int setup_btree_index(unsigned int l, struct dm_table *t)
84 {
85         unsigned int n, k;
86         sector_t *node;
87
88         for (n = 0U; n < t->counts[l]; n++) {
89                 node = get_node(t, l, n);
90
91                 for (k = 0U; k < KEYS_PER_NODE; k++)
92                         node[k] = high(t, l + 1, get_child(n, k));
93         }
94
95         return 0;
96 }
97
98 /*
99  * highs, and targets are managed as dynamic arrays during a
100  * table load.
101  */
102 static int alloc_targets(struct dm_table *t, unsigned int num)
103 {
104         sector_t *n_highs;
105         struct dm_target *n_targets;
106
107         /*
108          * Allocate both the target array and offset array at once.
109          */
110         n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
111                            GFP_KERNEL);
112         if (!n_highs)
113                 return -ENOMEM;
114
115         n_targets = (struct dm_target *) (n_highs + num);
116
117         memset(n_highs, -1, sizeof(*n_highs) * num);
118         kvfree(t->highs);
119
120         t->num_allocated = num;
121         t->highs = n_highs;
122         t->targets = n_targets;
123
124         return 0;
125 }
126
127 int dm_table_create(struct dm_table **result, fmode_t mode,
128                     unsigned num_targets, struct mapped_device *md)
129 {
130         struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
131
132         if (!t)
133                 return -ENOMEM;
134
135         INIT_LIST_HEAD(&t->devices);
136
137         if (!num_targets)
138                 num_targets = KEYS_PER_NODE;
139
140         num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
141
142         if (!num_targets) {
143                 kfree(t);
144                 return -ENOMEM;
145         }
146
147         if (alloc_targets(t, num_targets)) {
148                 kfree(t);
149                 return -ENOMEM;
150         }
151
152         t->type = DM_TYPE_NONE;
153         t->mode = mode;
154         t->md = md;
155         *result = t;
156         return 0;
157 }
158
159 static void free_devices(struct list_head *devices, struct mapped_device *md)
160 {
161         struct list_head *tmp, *next;
162
163         list_for_each_safe(tmp, next, devices) {
164                 struct dm_dev_internal *dd =
165                     list_entry(tmp, struct dm_dev_internal, list);
166                 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
167                        dm_device_name(md), dd->dm_dev->name);
168                 dm_put_table_device(md, dd->dm_dev);
169                 kfree(dd);
170         }
171 }
172
173 static void dm_table_destroy_crypto_profile(struct dm_table *t);
174
175 void dm_table_destroy(struct dm_table *t)
176 {
177         unsigned int i;
178
179         if (!t)
180                 return;
181
182         /* free the indexes */
183         if (t->depth >= 2)
184                 kvfree(t->index[t->depth - 2]);
185
186         /* free the targets */
187         for (i = 0; i < t->num_targets; i++) {
188                 struct dm_target *tgt = t->targets + i;
189
190                 if (tgt->type->dtr)
191                         tgt->type->dtr(tgt);
192
193                 dm_put_target_type(tgt->type);
194         }
195
196         kvfree(t->highs);
197
198         /* free the device list */
199         free_devices(&t->devices, t->md);
200
201         dm_free_md_mempools(t->mempools);
202
203         dm_table_destroy_crypto_profile(t);
204
205         kfree(t);
206 }
207
208 /*
209  * See if we've already got a device in the list.
210  */
211 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
212 {
213         struct dm_dev_internal *dd;
214
215         list_for_each_entry (dd, l, list)
216                 if (dd->dm_dev->bdev->bd_dev == dev)
217                         return dd;
218
219         return NULL;
220 }
221
222 /*
223  * If possible, this checks an area of a destination device is invalid.
224  */
225 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
226                                   sector_t start, sector_t len, void *data)
227 {
228         struct queue_limits *limits = data;
229         struct block_device *bdev = dev->bdev;
230         sector_t dev_size = bdev_nr_sectors(bdev);
231         unsigned short logical_block_size_sectors =
232                 limits->logical_block_size >> SECTOR_SHIFT;
233
234         if (!dev_size)
235                 return 0;
236
237         if ((start >= dev_size) || (start + len > dev_size)) {
238                 DMWARN("%s: %pg too small for target: "
239                        "start=%llu, len=%llu, dev_size=%llu",
240                        dm_device_name(ti->table->md), bdev,
241                        (unsigned long long)start,
242                        (unsigned long long)len,
243                        (unsigned long long)dev_size);
244                 return 1;
245         }
246
247         /*
248          * If the target is mapped to zoned block device(s), check
249          * that the zones are not partially mapped.
250          */
251         if (bdev_is_zoned(bdev)) {
252                 unsigned int zone_sectors = bdev_zone_sectors(bdev);
253
254                 if (start & (zone_sectors - 1)) {
255                         DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg",
256                                dm_device_name(ti->table->md),
257                                (unsigned long long)start,
258                                zone_sectors, bdev);
259                         return 1;
260                 }
261
262                 /*
263                  * Note: The last zone of a zoned block device may be smaller
264                  * than other zones. So for a target mapping the end of a
265                  * zoned block device with such a zone, len would not be zone
266                  * aligned. We do not allow such last smaller zone to be part
267                  * of the mapping here to ensure that mappings with multiple
268                  * devices do not end up with a smaller zone in the middle of
269                  * the sector range.
270                  */
271                 if (len & (zone_sectors - 1)) {
272                         DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg",
273                                dm_device_name(ti->table->md),
274                                (unsigned long long)len,
275                                zone_sectors, bdev);
276                         return 1;
277                 }
278         }
279
280         if (logical_block_size_sectors <= 1)
281                 return 0;
282
283         if (start & (logical_block_size_sectors - 1)) {
284                 DMWARN("%s: start=%llu not aligned to h/w "
285                        "logical block size %u of %pg",
286                        dm_device_name(ti->table->md),
287                        (unsigned long long)start,
288                        limits->logical_block_size, bdev);
289                 return 1;
290         }
291
292         if (len & (logical_block_size_sectors - 1)) {
293                 DMWARN("%s: len=%llu not aligned to h/w "
294                        "logical block size %u of %pg",
295                        dm_device_name(ti->table->md),
296                        (unsigned long long)len,
297                        limits->logical_block_size, bdev);
298                 return 1;
299         }
300
301         return 0;
302 }
303
304 /*
305  * This upgrades the mode on an already open dm_dev, being
306  * careful to leave things as they were if we fail to reopen the
307  * device and not to touch the existing bdev field in case
308  * it is accessed concurrently.
309  */
310 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
311                         struct mapped_device *md)
312 {
313         int r;
314         struct dm_dev *old_dev, *new_dev;
315
316         old_dev = dd->dm_dev;
317
318         r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
319                                 dd->dm_dev->mode | new_mode, &new_dev);
320         if (r)
321                 return r;
322
323         dd->dm_dev = new_dev;
324         dm_put_table_device(md, old_dev);
325
326         return 0;
327 }
328
329 /*
330  * Convert the path to a device
331  */
332 dev_t dm_get_dev_t(const char *path)
333 {
334         dev_t dev;
335
336         if (lookup_bdev(path, &dev))
337                 dev = name_to_dev_t(path);
338         return dev;
339 }
340 EXPORT_SYMBOL_GPL(dm_get_dev_t);
341
342 /*
343  * Add a device to the list, or just increment the usage count if
344  * it's already present.
345  */
346 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
347                   struct dm_dev **result)
348 {
349         int r;
350         dev_t dev;
351         unsigned int major, minor;
352         char dummy;
353         struct dm_dev_internal *dd;
354         struct dm_table *t = ti->table;
355
356         BUG_ON(!t);
357
358         if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
359                 /* Extract the major/minor numbers */
360                 dev = MKDEV(major, minor);
361                 if (MAJOR(dev) != major || MINOR(dev) != minor)
362                         return -EOVERFLOW;
363         } else {
364                 dev = dm_get_dev_t(path);
365                 if (!dev)
366                         return -ENODEV;
367         }
368
369         dd = find_device(&t->devices, dev);
370         if (!dd) {
371                 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
372                 if (!dd)
373                         return -ENOMEM;
374
375                 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
376                         kfree(dd);
377                         return r;
378                 }
379
380                 refcount_set(&dd->count, 1);
381                 list_add(&dd->list, &t->devices);
382                 goto out;
383
384         } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
385                 r = upgrade_mode(dd, mode, t->md);
386                 if (r)
387                         return r;
388         }
389         refcount_inc(&dd->count);
390 out:
391         *result = dd->dm_dev;
392         return 0;
393 }
394 EXPORT_SYMBOL(dm_get_device);
395
396 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
397                                 sector_t start, sector_t len, void *data)
398 {
399         struct queue_limits *limits = data;
400         struct block_device *bdev = dev->bdev;
401         struct request_queue *q = bdev_get_queue(bdev);
402
403         if (unlikely(!q)) {
404                 DMWARN("%s: Cannot set limits for nonexistent device %pg",
405                        dm_device_name(ti->table->md), bdev);
406                 return 0;
407         }
408
409         if (blk_stack_limits(limits, &q->limits,
410                         get_start_sect(bdev) + start) < 0)
411                 DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
412                        "physical_block_size=%u, logical_block_size=%u, "
413                        "alignment_offset=%u, start=%llu",
414                        dm_device_name(ti->table->md), bdev,
415                        q->limits.physical_block_size,
416                        q->limits.logical_block_size,
417                        q->limits.alignment_offset,
418                        (unsigned long long) start << SECTOR_SHIFT);
419         return 0;
420 }
421
422 /*
423  * Decrement a device's use count and remove it if necessary.
424  */
425 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
426 {
427         int found = 0;
428         struct list_head *devices = &ti->table->devices;
429         struct dm_dev_internal *dd;
430
431         list_for_each_entry(dd, devices, list) {
432                 if (dd->dm_dev == d) {
433                         found = 1;
434                         break;
435                 }
436         }
437         if (!found) {
438                 DMWARN("%s: device %s not in table devices list",
439                        dm_device_name(ti->table->md), d->name);
440                 return;
441         }
442         if (refcount_dec_and_test(&dd->count)) {
443                 dm_put_table_device(ti->table->md, d);
444                 list_del(&dd->list);
445                 kfree(dd);
446         }
447 }
448 EXPORT_SYMBOL(dm_put_device);
449
450 /*
451  * Checks to see if the target joins onto the end of the table.
452  */
453 static int adjoin(struct dm_table *table, struct dm_target *ti)
454 {
455         struct dm_target *prev;
456
457         if (!table->num_targets)
458                 return !ti->begin;
459
460         prev = &table->targets[table->num_targets - 1];
461         return (ti->begin == (prev->begin + prev->len));
462 }
463
464 /*
465  * Used to dynamically allocate the arg array.
466  *
467  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
468  * process messages even if some device is suspended. These messages have a
469  * small fixed number of arguments.
470  *
471  * On the other hand, dm-switch needs to process bulk data using messages and
472  * excessive use of GFP_NOIO could cause trouble.
473  */
474 static char **realloc_argv(unsigned *size, char **old_argv)
475 {
476         char **argv;
477         unsigned new_size;
478         gfp_t gfp;
479
480         if (*size) {
481                 new_size = *size * 2;
482                 gfp = GFP_KERNEL;
483         } else {
484                 new_size = 8;
485                 gfp = GFP_NOIO;
486         }
487         argv = kmalloc_array(new_size, sizeof(*argv), gfp);
488         if (argv && old_argv) {
489                 memcpy(argv, old_argv, *size * sizeof(*argv));
490                 *size = new_size;
491         }
492
493         kfree(old_argv);
494         return argv;
495 }
496
497 /*
498  * Destructively splits up the argument list to pass to ctr.
499  */
500 int dm_split_args(int *argc, char ***argvp, char *input)
501 {
502         char *start, *end = input, *out, **argv = NULL;
503         unsigned array_size = 0;
504
505         *argc = 0;
506
507         if (!input) {
508                 *argvp = NULL;
509                 return 0;
510         }
511
512         argv = realloc_argv(&array_size, argv);
513         if (!argv)
514                 return -ENOMEM;
515
516         while (1) {
517                 /* Skip whitespace */
518                 start = skip_spaces(end);
519
520                 if (!*start)
521                         break;  /* success, we hit the end */
522
523                 /* 'out' is used to remove any back-quotes */
524                 end = out = start;
525                 while (*end) {
526                         /* Everything apart from '\0' can be quoted */
527                         if (*end == '\\' && *(end + 1)) {
528                                 *out++ = *(end + 1);
529                                 end += 2;
530                                 continue;
531                         }
532
533                         if (isspace(*end))
534                                 break;  /* end of token */
535
536                         *out++ = *end++;
537                 }
538
539                 /* have we already filled the array ? */
540                 if ((*argc + 1) > array_size) {
541                         argv = realloc_argv(&array_size, argv);
542                         if (!argv)
543                                 return -ENOMEM;
544                 }
545
546                 /* we know this is whitespace */
547                 if (*end)
548                         end++;
549
550                 /* terminate the string and put it in the array */
551                 *out = '\0';
552                 argv[*argc] = start;
553                 (*argc)++;
554         }
555
556         *argvp = argv;
557         return 0;
558 }
559
560 /*
561  * Impose necessary and sufficient conditions on a devices's table such
562  * that any incoming bio which respects its logical_block_size can be
563  * processed successfully.  If it falls across the boundary between
564  * two or more targets, the size of each piece it gets split into must
565  * be compatible with the logical_block_size of the target processing it.
566  */
567 static int validate_hardware_logical_block_alignment(struct dm_table *table,
568                                                  struct queue_limits *limits)
569 {
570         /*
571          * This function uses arithmetic modulo the logical_block_size
572          * (in units of 512-byte sectors).
573          */
574         unsigned short device_logical_block_size_sects =
575                 limits->logical_block_size >> SECTOR_SHIFT;
576
577         /*
578          * Offset of the start of the next table entry, mod logical_block_size.
579          */
580         unsigned short next_target_start = 0;
581
582         /*
583          * Given an aligned bio that extends beyond the end of a
584          * target, how many sectors must the next target handle?
585          */
586         unsigned short remaining = 0;
587
588         struct dm_target *ti;
589         struct queue_limits ti_limits;
590         unsigned i;
591
592         /*
593          * Check each entry in the table in turn.
594          */
595         for (i = 0; i < dm_table_get_num_targets(table); i++) {
596                 ti = dm_table_get_target(table, i);
597
598                 blk_set_stacking_limits(&ti_limits);
599
600                 /* combine all target devices' limits */
601                 if (ti->type->iterate_devices)
602                         ti->type->iterate_devices(ti, dm_set_device_limits,
603                                                   &ti_limits);
604
605                 /*
606                  * If the remaining sectors fall entirely within this
607                  * table entry are they compatible with its logical_block_size?
608                  */
609                 if (remaining < ti->len &&
610                     remaining & ((ti_limits.logical_block_size >>
611                                   SECTOR_SHIFT) - 1))
612                         break;  /* Error */
613
614                 next_target_start =
615                     (unsigned short) ((next_target_start + ti->len) &
616                                       (device_logical_block_size_sects - 1));
617                 remaining = next_target_start ?
618                     device_logical_block_size_sects - next_target_start : 0;
619         }
620
621         if (remaining) {
622                 DMWARN("%s: table line %u (start sect %llu len %llu) "
623                        "not aligned to h/w logical block size %u",
624                        dm_device_name(table->md), i,
625                        (unsigned long long) ti->begin,
626                        (unsigned long long) ti->len,
627                        limits->logical_block_size);
628                 return -EINVAL;
629         }
630
631         return 0;
632 }
633
634 int dm_table_add_target(struct dm_table *t, const char *type,
635                         sector_t start, sector_t len, char *params)
636 {
637         int r = -EINVAL, argc;
638         char **argv;
639         struct dm_target *tgt;
640
641         if (t->singleton) {
642                 DMERR("%s: target type %s must appear alone in table",
643                       dm_device_name(t->md), t->targets->type->name);
644                 return -EINVAL;
645         }
646
647         BUG_ON(t->num_targets >= t->num_allocated);
648
649         tgt = t->targets + t->num_targets;
650         memset(tgt, 0, sizeof(*tgt));
651
652         if (!len) {
653                 DMERR("%s: zero-length target", dm_device_name(t->md));
654                 return -EINVAL;
655         }
656
657         tgt->type = dm_get_target_type(type);
658         if (!tgt->type) {
659                 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
660                 return -EINVAL;
661         }
662
663         if (dm_target_needs_singleton(tgt->type)) {
664                 if (t->num_targets) {
665                         tgt->error = "singleton target type must appear alone in table";
666                         goto bad;
667                 }
668                 t->singleton = true;
669         }
670
671         if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
672                 tgt->error = "target type may not be included in a read-only table";
673                 goto bad;
674         }
675
676         if (t->immutable_target_type) {
677                 if (t->immutable_target_type != tgt->type) {
678                         tgt->error = "immutable target type cannot be mixed with other target types";
679                         goto bad;
680                 }
681         } else if (dm_target_is_immutable(tgt->type)) {
682                 if (t->num_targets) {
683                         tgt->error = "immutable target type cannot be mixed with other target types";
684                         goto bad;
685                 }
686                 t->immutable_target_type = tgt->type;
687         }
688
689         if (dm_target_has_integrity(tgt->type))
690                 t->integrity_added = 1;
691
692         tgt->table = t;
693         tgt->begin = start;
694         tgt->len = len;
695         tgt->error = "Unknown error";
696
697         /*
698          * Does this target adjoin the previous one ?
699          */
700         if (!adjoin(t, tgt)) {
701                 tgt->error = "Gap in table";
702                 goto bad;
703         }
704
705         r = dm_split_args(&argc, &argv, params);
706         if (r) {
707                 tgt->error = "couldn't split parameters";
708                 goto bad;
709         }
710
711         r = tgt->type->ctr(tgt, argc, argv);
712         kfree(argv);
713         if (r)
714                 goto bad;
715
716         t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
717
718         if (!tgt->num_discard_bios && tgt->discards_supported)
719                 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
720                        dm_device_name(t->md), type);
721
722         if (tgt->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
723                 static_branch_enable(&swap_bios_enabled);
724
725         return 0;
726
727  bad:
728         DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, tgt->error, ERR_PTR(r));
729         dm_put_target_type(tgt->type);
730         return r;
731 }
732
733 /*
734  * Target argument parsing helpers.
735  */
736 static int validate_next_arg(const struct dm_arg *arg,
737                              struct dm_arg_set *arg_set,
738                              unsigned *value, char **error, unsigned grouped)
739 {
740         const char *arg_str = dm_shift_arg(arg_set);
741         char dummy;
742
743         if (!arg_str ||
744             (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
745             (*value < arg->min) ||
746             (*value > arg->max) ||
747             (grouped && arg_set->argc < *value)) {
748                 *error = arg->error;
749                 return -EINVAL;
750         }
751
752         return 0;
753 }
754
755 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
756                 unsigned *value, char **error)
757 {
758         return validate_next_arg(arg, arg_set, value, error, 0);
759 }
760 EXPORT_SYMBOL(dm_read_arg);
761
762 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
763                       unsigned *value, char **error)
764 {
765         return validate_next_arg(arg, arg_set, value, error, 1);
766 }
767 EXPORT_SYMBOL(dm_read_arg_group);
768
769 const char *dm_shift_arg(struct dm_arg_set *as)
770 {
771         char *r;
772
773         if (as->argc) {
774                 as->argc--;
775                 r = *as->argv;
776                 as->argv++;
777                 return r;
778         }
779
780         return NULL;
781 }
782 EXPORT_SYMBOL(dm_shift_arg);
783
784 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
785 {
786         BUG_ON(as->argc < num_args);
787         as->argc -= num_args;
788         as->argv += num_args;
789 }
790 EXPORT_SYMBOL(dm_consume_args);
791
792 static bool __table_type_bio_based(enum dm_queue_mode table_type)
793 {
794         return (table_type == DM_TYPE_BIO_BASED ||
795                 table_type == DM_TYPE_DAX_BIO_BASED);
796 }
797
798 static bool __table_type_request_based(enum dm_queue_mode table_type)
799 {
800         return table_type == DM_TYPE_REQUEST_BASED;
801 }
802
803 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
804 {
805         t->type = type;
806 }
807 EXPORT_SYMBOL_GPL(dm_table_set_type);
808
809 /* validate the dax capability of the target device span */
810 static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
811                         sector_t start, sector_t len, void *data)
812 {
813         if (dev->dax_dev)
814                 return false;
815
816         DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
817         return true;
818 }
819
820 /* Check devices support synchronous DAX */
821 static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
822                                               sector_t start, sector_t len, void *data)
823 {
824         return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
825 }
826
827 static bool dm_table_supports_dax(struct dm_table *t,
828                            iterate_devices_callout_fn iterate_fn)
829 {
830         struct dm_target *ti;
831         unsigned i;
832
833         /* Ensure that all targets support DAX. */
834         for (i = 0; i < dm_table_get_num_targets(t); i++) {
835                 ti = dm_table_get_target(t, i);
836
837                 if (!ti->type->direct_access)
838                         return false;
839
840                 if (!ti->type->iterate_devices ||
841                     ti->type->iterate_devices(ti, iterate_fn, NULL))
842                         return false;
843         }
844
845         return true;
846 }
847
848 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
849                                   sector_t start, sector_t len, void *data)
850 {
851         struct block_device *bdev = dev->bdev;
852         struct request_queue *q = bdev_get_queue(bdev);
853
854         /* request-based cannot stack on partitions! */
855         if (bdev_is_partition(bdev))
856                 return false;
857
858         return queue_is_mq(q);
859 }
860
861 static int dm_table_determine_type(struct dm_table *t)
862 {
863         unsigned i;
864         unsigned bio_based = 0, request_based = 0, hybrid = 0;
865         struct dm_target *tgt;
866         struct list_head *devices = dm_table_get_devices(t);
867         enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
868
869         if (t->type != DM_TYPE_NONE) {
870                 /* target already set the table's type */
871                 if (t->type == DM_TYPE_BIO_BASED) {
872                         /* possibly upgrade to a variant of bio-based */
873                         goto verify_bio_based;
874                 }
875                 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
876                 goto verify_rq_based;
877         }
878
879         for (i = 0; i < t->num_targets; i++) {
880                 tgt = t->targets + i;
881                 if (dm_target_hybrid(tgt))
882                         hybrid = 1;
883                 else if (dm_target_request_based(tgt))
884                         request_based = 1;
885                 else
886                         bio_based = 1;
887
888                 if (bio_based && request_based) {
889                         DMERR("Inconsistent table: different target types"
890                               " can't be mixed up");
891                         return -EINVAL;
892                 }
893         }
894
895         if (hybrid && !bio_based && !request_based) {
896                 /*
897                  * The targets can work either way.
898                  * Determine the type from the live device.
899                  * Default to bio-based if device is new.
900                  */
901                 if (__table_type_request_based(live_md_type))
902                         request_based = 1;
903                 else
904                         bio_based = 1;
905         }
906
907         if (bio_based) {
908 verify_bio_based:
909                 /* We must use this table as bio-based */
910                 t->type = DM_TYPE_BIO_BASED;
911                 if (dm_table_supports_dax(t, device_not_dax_capable) ||
912                     (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
913                         t->type = DM_TYPE_DAX_BIO_BASED;
914                 }
915                 return 0;
916         }
917
918         BUG_ON(!request_based); /* No targets in this table */
919
920         t->type = DM_TYPE_REQUEST_BASED;
921
922 verify_rq_based:
923         /*
924          * Request-based dm supports only tables that have a single target now.
925          * To support multiple targets, request splitting support is needed,
926          * and that needs lots of changes in the block-layer.
927          * (e.g. request completion process for partial completion.)
928          */
929         if (t->num_targets > 1) {
930                 DMERR("request-based DM doesn't support multiple targets");
931                 return -EINVAL;
932         }
933
934         if (list_empty(devices)) {
935                 int srcu_idx;
936                 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
937
938                 /* inherit live table's type */
939                 if (live_table)
940                         t->type = live_table->type;
941                 dm_put_live_table(t->md, srcu_idx);
942                 return 0;
943         }
944
945         tgt = dm_table_get_immutable_target(t);
946         if (!tgt) {
947                 DMERR("table load rejected: immutable target is required");
948                 return -EINVAL;
949         } else if (tgt->max_io_len) {
950                 DMERR("table load rejected: immutable target that splits IO is not supported");
951                 return -EINVAL;
952         }
953
954         /* Non-request-stackable devices can't be used for request-based dm */
955         if (!tgt->type->iterate_devices ||
956             !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
957                 DMERR("table load rejected: including non-request-stackable devices");
958                 return -EINVAL;
959         }
960
961         return 0;
962 }
963
964 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
965 {
966         return t->type;
967 }
968
969 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
970 {
971         return t->immutable_target_type;
972 }
973
974 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
975 {
976         /* Immutable target is implicitly a singleton */
977         if (t->num_targets > 1 ||
978             !dm_target_is_immutable(t->targets[0].type))
979                 return NULL;
980
981         return t->targets;
982 }
983
984 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
985 {
986         struct dm_target *ti;
987         unsigned i;
988
989         for (i = 0; i < dm_table_get_num_targets(t); i++) {
990                 ti = dm_table_get_target(t, i);
991                 if (dm_target_is_wildcard(ti->type))
992                         return ti;
993         }
994
995         return NULL;
996 }
997
998 bool dm_table_bio_based(struct dm_table *t)
999 {
1000         return __table_type_bio_based(dm_table_get_type(t));
1001 }
1002
1003 bool dm_table_request_based(struct dm_table *t)
1004 {
1005         return __table_type_request_based(dm_table_get_type(t));
1006 }
1007
1008 static bool dm_table_supports_poll(struct dm_table *t);
1009
1010 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1011 {
1012         enum dm_queue_mode type = dm_table_get_type(t);
1013         unsigned per_io_data_size = 0;
1014         unsigned min_pool_size = 0;
1015         struct dm_target *ti;
1016         unsigned i;
1017         bool poll_supported = false;
1018
1019         if (unlikely(type == DM_TYPE_NONE)) {
1020                 DMWARN("no table type is set, can't allocate mempools");
1021                 return -EINVAL;
1022         }
1023
1024         if (__table_type_bio_based(type)) {
1025                 for (i = 0; i < t->num_targets; i++) {
1026                         ti = t->targets + i;
1027                         per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1028                         min_pool_size = max(min_pool_size, ti->num_flush_bios);
1029                 }
1030                 poll_supported = dm_table_supports_poll(t);
1031         }
1032
1033         t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
1034                                            t->integrity_supported, poll_supported);
1035         if (!t->mempools)
1036                 return -ENOMEM;
1037
1038         return 0;
1039 }
1040
1041 void dm_table_free_md_mempools(struct dm_table *t)
1042 {
1043         dm_free_md_mempools(t->mempools);
1044         t->mempools = NULL;
1045 }
1046
1047 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1048 {
1049         return t->mempools;
1050 }
1051
1052 static int setup_indexes(struct dm_table *t)
1053 {
1054         int i;
1055         unsigned int total = 0;
1056         sector_t *indexes;
1057
1058         /* allocate the space for *all* the indexes */
1059         for (i = t->depth - 2; i >= 0; i--) {
1060                 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1061                 total += t->counts[i];
1062         }
1063
1064         indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1065         if (!indexes)
1066                 return -ENOMEM;
1067
1068         /* set up internal nodes, bottom-up */
1069         for (i = t->depth - 2; i >= 0; i--) {
1070                 t->index[i] = indexes;
1071                 indexes += (KEYS_PER_NODE * t->counts[i]);
1072                 setup_btree_index(i, t);
1073         }
1074
1075         return 0;
1076 }
1077
1078 /*
1079  * Builds the btree to index the map.
1080  */
1081 static int dm_table_build_index(struct dm_table *t)
1082 {
1083         int r = 0;
1084         unsigned int leaf_nodes;
1085
1086         /* how many indexes will the btree have ? */
1087         leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1088         t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1089
1090         /* leaf layer has already been set up */
1091         t->counts[t->depth - 1] = leaf_nodes;
1092         t->index[t->depth - 1] = t->highs;
1093
1094         if (t->depth >= 2)
1095                 r = setup_indexes(t);
1096
1097         return r;
1098 }
1099
1100 static bool integrity_profile_exists(struct gendisk *disk)
1101 {
1102         return !!blk_get_integrity(disk);
1103 }
1104
1105 /*
1106  * Get a disk whose integrity profile reflects the table's profile.
1107  * Returns NULL if integrity support was inconsistent or unavailable.
1108  */
1109 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1110 {
1111         struct list_head *devices = dm_table_get_devices(t);
1112         struct dm_dev_internal *dd = NULL;
1113         struct gendisk *prev_disk = NULL, *template_disk = NULL;
1114         unsigned i;
1115
1116         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1117                 struct dm_target *ti = dm_table_get_target(t, i);
1118                 if (!dm_target_passes_integrity(ti->type))
1119                         goto no_integrity;
1120         }
1121
1122         list_for_each_entry(dd, devices, list) {
1123                 template_disk = dd->dm_dev->bdev->bd_disk;
1124                 if (!integrity_profile_exists(template_disk))
1125                         goto no_integrity;
1126                 else if (prev_disk &&
1127                          blk_integrity_compare(prev_disk, template_disk) < 0)
1128                         goto no_integrity;
1129                 prev_disk = template_disk;
1130         }
1131
1132         return template_disk;
1133
1134 no_integrity:
1135         if (prev_disk)
1136                 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1137                        dm_device_name(t->md),
1138                        prev_disk->disk_name,
1139                        template_disk->disk_name);
1140         return NULL;
1141 }
1142
1143 /*
1144  * Register the mapped device for blk_integrity support if the
1145  * underlying devices have an integrity profile.  But all devices may
1146  * not have matching profiles (checking all devices isn't reliable
1147  * during table load because this table may use other DM device(s) which
1148  * must be resumed before they will have an initialized integity
1149  * profile).  Consequently, stacked DM devices force a 2 stage integrity
1150  * profile validation: First pass during table load, final pass during
1151  * resume.
1152  */
1153 static int dm_table_register_integrity(struct dm_table *t)
1154 {
1155         struct mapped_device *md = t->md;
1156         struct gendisk *template_disk = NULL;
1157
1158         /* If target handles integrity itself do not register it here. */
1159         if (t->integrity_added)
1160                 return 0;
1161
1162         template_disk = dm_table_get_integrity_disk(t);
1163         if (!template_disk)
1164                 return 0;
1165
1166         if (!integrity_profile_exists(dm_disk(md))) {
1167                 t->integrity_supported = true;
1168                 /*
1169                  * Register integrity profile during table load; we can do
1170                  * this because the final profile must match during resume.
1171                  */
1172                 blk_integrity_register(dm_disk(md),
1173                                        blk_get_integrity(template_disk));
1174                 return 0;
1175         }
1176
1177         /*
1178          * If DM device already has an initialized integrity
1179          * profile the new profile should not conflict.
1180          */
1181         if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1182                 DMWARN("%s: conflict with existing integrity profile: "
1183                        "%s profile mismatch",
1184                        dm_device_name(t->md),
1185                        template_disk->disk_name);
1186                 return 1;
1187         }
1188
1189         /* Preserve existing integrity profile */
1190         t->integrity_supported = true;
1191         return 0;
1192 }
1193
1194 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1195
1196 struct dm_crypto_profile {
1197         struct blk_crypto_profile profile;
1198         struct mapped_device *md;
1199 };
1200
1201 struct dm_keyslot_evict_args {
1202         const struct blk_crypto_key *key;
1203         int err;
1204 };
1205
1206 static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1207                                      sector_t start, sector_t len, void *data)
1208 {
1209         struct dm_keyslot_evict_args *args = data;
1210         int err;
1211
1212         err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
1213         if (!args->err)
1214                 args->err = err;
1215         /* Always try to evict the key from all devices. */
1216         return 0;
1217 }
1218
1219 /*
1220  * When an inline encryption key is evicted from a device-mapper device, evict
1221  * it from all the underlying devices.
1222  */
1223 static int dm_keyslot_evict(struct blk_crypto_profile *profile,
1224                             const struct blk_crypto_key *key, unsigned int slot)
1225 {
1226         struct mapped_device *md =
1227                 container_of(profile, struct dm_crypto_profile, profile)->md;
1228         struct dm_keyslot_evict_args args = { key };
1229         struct dm_table *t;
1230         int srcu_idx;
1231         int i;
1232         struct dm_target *ti;
1233
1234         t = dm_get_live_table(md, &srcu_idx);
1235         if (!t)
1236                 return 0;
1237         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1238                 ti = dm_table_get_target(t, i);
1239                 if (!ti->type->iterate_devices)
1240                         continue;
1241                 ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
1242         }
1243         dm_put_live_table(md, srcu_idx);
1244         return args.err;
1245 }
1246
1247 static int
1248 device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
1249                                      sector_t start, sector_t len, void *data)
1250 {
1251         struct blk_crypto_profile *parent = data;
1252         struct blk_crypto_profile *child =
1253                 bdev_get_queue(dev->bdev)->crypto_profile;
1254
1255         blk_crypto_intersect_capabilities(parent, child);
1256         return 0;
1257 }
1258
1259 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1260 {
1261         struct dm_crypto_profile *dmcp = container_of(profile,
1262                                                       struct dm_crypto_profile,
1263                                                       profile);
1264
1265         if (!profile)
1266                 return;
1267
1268         blk_crypto_profile_destroy(profile);
1269         kfree(dmcp);
1270 }
1271
1272 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1273 {
1274         dm_destroy_crypto_profile(t->crypto_profile);
1275         t->crypto_profile = NULL;
1276 }
1277
1278 /*
1279  * Constructs and initializes t->crypto_profile with a crypto profile that
1280  * represents the common set of crypto capabilities of the devices described by
1281  * the dm_table.  However, if the constructed crypto profile doesn't support all
1282  * crypto capabilities that are supported by the current mapped_device, it
1283  * returns an error instead, since we don't support removing crypto capabilities
1284  * on table changes.  Finally, if the constructed crypto profile is "empty" (has
1285  * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
1286  */
1287 static int dm_table_construct_crypto_profile(struct dm_table *t)
1288 {
1289         struct dm_crypto_profile *dmcp;
1290         struct blk_crypto_profile *profile;
1291         struct dm_target *ti;
1292         unsigned int i;
1293         bool empty_profile = true;
1294
1295         dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
1296         if (!dmcp)
1297                 return -ENOMEM;
1298         dmcp->md = t->md;
1299
1300         profile = &dmcp->profile;
1301         blk_crypto_profile_init(profile, 0);
1302         profile->ll_ops.keyslot_evict = dm_keyslot_evict;
1303         profile->max_dun_bytes_supported = UINT_MAX;
1304         memset(profile->modes_supported, 0xFF,
1305                sizeof(profile->modes_supported));
1306
1307         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1308                 ti = dm_table_get_target(t, i);
1309
1310                 if (!dm_target_passes_crypto(ti->type)) {
1311                         blk_crypto_intersect_capabilities(profile, NULL);
1312                         break;
1313                 }
1314                 if (!ti->type->iterate_devices)
1315                         continue;
1316                 ti->type->iterate_devices(ti,
1317                                           device_intersect_crypto_capabilities,
1318                                           profile);
1319         }
1320
1321         if (t->md->queue &&
1322             !blk_crypto_has_capabilities(profile,
1323                                          t->md->queue->crypto_profile)) {
1324                 DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1325                 dm_destroy_crypto_profile(profile);
1326                 return -EINVAL;
1327         }
1328
1329         /*
1330          * If the new profile doesn't actually support any crypto capabilities,
1331          * we may as well represent it with a NULL profile.
1332          */
1333         for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
1334                 if (profile->modes_supported[i]) {
1335                         empty_profile = false;
1336                         break;
1337                 }
1338         }
1339
1340         if (empty_profile) {
1341                 dm_destroy_crypto_profile(profile);
1342                 profile = NULL;
1343         }
1344
1345         /*
1346          * t->crypto_profile is only set temporarily while the table is being
1347          * set up, and it gets set to NULL after the profile has been
1348          * transferred to the request_queue.
1349          */
1350         t->crypto_profile = profile;
1351
1352         return 0;
1353 }
1354
1355 static void dm_update_crypto_profile(struct request_queue *q,
1356                                      struct dm_table *t)
1357 {
1358         if (!t->crypto_profile)
1359                 return;
1360
1361         /* Make the crypto profile less restrictive. */
1362         if (!q->crypto_profile) {
1363                 blk_crypto_register(t->crypto_profile, q);
1364         } else {
1365                 blk_crypto_update_capabilities(q->crypto_profile,
1366                                                t->crypto_profile);
1367                 dm_destroy_crypto_profile(t->crypto_profile);
1368         }
1369         t->crypto_profile = NULL;
1370 }
1371
1372 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1373
1374 static int dm_table_construct_crypto_profile(struct dm_table *t)
1375 {
1376         return 0;
1377 }
1378
1379 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1380 {
1381 }
1382
1383 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1384 {
1385 }
1386
1387 static void dm_update_crypto_profile(struct request_queue *q,
1388                                      struct dm_table *t)
1389 {
1390 }
1391
1392 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1393
1394 /*
1395  * Prepares the table for use by building the indices,
1396  * setting the type, and allocating mempools.
1397  */
1398 int dm_table_complete(struct dm_table *t)
1399 {
1400         int r;
1401
1402         r = dm_table_determine_type(t);
1403         if (r) {
1404                 DMERR("unable to determine table type");
1405                 return r;
1406         }
1407
1408         r = dm_table_build_index(t);
1409         if (r) {
1410                 DMERR("unable to build btrees");
1411                 return r;
1412         }
1413
1414         r = dm_table_register_integrity(t);
1415         if (r) {
1416                 DMERR("could not register integrity profile.");
1417                 return r;
1418         }
1419
1420         r = dm_table_construct_crypto_profile(t);
1421         if (r) {
1422                 DMERR("could not construct crypto profile.");
1423                 return r;
1424         }
1425
1426         r = dm_table_alloc_md_mempools(t, t->md);
1427         if (r)
1428                 DMERR("unable to allocate mempools");
1429
1430         return r;
1431 }
1432
1433 static DEFINE_MUTEX(_event_lock);
1434 void dm_table_event_callback(struct dm_table *t,
1435                              void (*fn)(void *), void *context)
1436 {
1437         mutex_lock(&_event_lock);
1438         t->event_fn = fn;
1439         t->event_context = context;
1440         mutex_unlock(&_event_lock);
1441 }
1442
1443 void dm_table_event(struct dm_table *t)
1444 {
1445         mutex_lock(&_event_lock);
1446         if (t->event_fn)
1447                 t->event_fn(t->event_context);
1448         mutex_unlock(&_event_lock);
1449 }
1450 EXPORT_SYMBOL(dm_table_event);
1451
1452 inline sector_t dm_table_get_size(struct dm_table *t)
1453 {
1454         return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1455 }
1456 EXPORT_SYMBOL(dm_table_get_size);
1457
1458 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1459 {
1460         if (index >= t->num_targets)
1461                 return NULL;
1462
1463         return t->targets + index;
1464 }
1465
1466 /*
1467  * Search the btree for the correct target.
1468  *
1469  * Caller should check returned pointer for NULL
1470  * to trap I/O beyond end of device.
1471  */
1472 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1473 {
1474         unsigned int l, n = 0, k = 0;
1475         sector_t *node;
1476
1477         if (unlikely(sector >= dm_table_get_size(t)))
1478                 return NULL;
1479
1480         for (l = 0; l < t->depth; l++) {
1481                 n = get_child(n, k);
1482                 node = get_node(t, l, n);
1483
1484                 for (k = 0; k < KEYS_PER_NODE; k++)
1485                         if (node[k] >= sector)
1486                                 break;
1487         }
1488
1489         return &t->targets[(KEYS_PER_NODE * n) + k];
1490 }
1491
1492 static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
1493                                    sector_t start, sector_t len, void *data)
1494 {
1495         struct request_queue *q = bdev_get_queue(dev->bdev);
1496
1497         return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
1498 }
1499
1500 /*
1501  * type->iterate_devices() should be called when the sanity check needs to
1502  * iterate and check all underlying data devices. iterate_devices() will
1503  * iterate all underlying data devices until it encounters a non-zero return
1504  * code, returned by whether the input iterate_devices_callout_fn, or
1505  * iterate_devices() itself internally.
1506  *
1507  * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1508  * iterate multiple underlying devices internally, in which case a non-zero
1509  * return code returned by iterate_devices_callout_fn will stop the iteration
1510  * in advance.
1511  *
1512  * Cases requiring _any_ underlying device supporting some kind of attribute,
1513  * should use the iteration structure like dm_table_any_dev_attr(), or call
1514  * it directly. @func should handle semantics of positive examples, e.g.
1515  * capable of something.
1516  *
1517  * Cases requiring _all_ underlying devices supporting some kind of attribute,
1518  * should use the iteration structure like dm_table_supports_nowait() or
1519  * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1520  * uses an @anti_func that handle semantics of counter examples, e.g. not
1521  * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1522  */
1523 static bool dm_table_any_dev_attr(struct dm_table *t,
1524                                   iterate_devices_callout_fn func, void *data)
1525 {
1526         struct dm_target *ti;
1527         unsigned int i;
1528
1529         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1530                 ti = dm_table_get_target(t, i);
1531
1532                 if (ti->type->iterate_devices &&
1533                     ti->type->iterate_devices(ti, func, data))
1534                         return true;
1535         }
1536
1537         return false;
1538 }
1539
1540 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1541                         sector_t start, sector_t len, void *data)
1542 {
1543         unsigned *num_devices = data;
1544
1545         (*num_devices)++;
1546
1547         return 0;
1548 }
1549
1550 static bool dm_table_supports_poll(struct dm_table *t)
1551 {
1552         struct dm_target *ti;
1553         unsigned i = 0;
1554
1555         while (i < dm_table_get_num_targets(t)) {
1556                 ti = dm_table_get_target(t, i++);
1557
1558                 if (!ti->type->iterate_devices ||
1559                     ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
1560                         return false;
1561         }
1562
1563         return true;
1564 }
1565
1566 /*
1567  * Check whether a table has no data devices attached using each
1568  * target's iterate_devices method.
1569  * Returns false if the result is unknown because a target doesn't
1570  * support iterate_devices.
1571  */
1572 bool dm_table_has_no_data_devices(struct dm_table *table)
1573 {
1574         struct dm_target *ti;
1575         unsigned i, num_devices;
1576
1577         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1578                 ti = dm_table_get_target(table, i);
1579
1580                 if (!ti->type->iterate_devices)
1581                         return false;
1582
1583                 num_devices = 0;
1584                 ti->type->iterate_devices(ti, count_device, &num_devices);
1585                 if (num_devices)
1586                         return false;
1587         }
1588
1589         return true;
1590 }
1591
1592 static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1593                                   sector_t start, sector_t len, void *data)
1594 {
1595         struct request_queue *q = bdev_get_queue(dev->bdev);
1596         enum blk_zoned_model *zoned_model = data;
1597
1598         return blk_queue_zoned_model(q) != *zoned_model;
1599 }
1600
1601 /*
1602  * Check the device zoned model based on the target feature flag. If the target
1603  * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1604  * also accepted but all devices must have the same zoned model. If the target
1605  * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1606  * zoned model with all zoned devices having the same zone size.
1607  */
1608 static bool dm_table_supports_zoned_model(struct dm_table *t,
1609                                           enum blk_zoned_model zoned_model)
1610 {
1611         struct dm_target *ti;
1612         unsigned i;
1613
1614         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1615                 ti = dm_table_get_target(t, i);
1616
1617                 if (dm_target_supports_zoned_hm(ti->type)) {
1618                         if (!ti->type->iterate_devices ||
1619                             ti->type->iterate_devices(ti, device_not_zoned_model,
1620                                                       &zoned_model))
1621                                 return false;
1622                 } else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1623                         if (zoned_model == BLK_ZONED_HM)
1624                                 return false;
1625                 }
1626         }
1627
1628         return true;
1629 }
1630
1631 static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1632                                            sector_t start, sector_t len, void *data)
1633 {
1634         struct request_queue *q = bdev_get_queue(dev->bdev);
1635         unsigned int *zone_sectors = data;
1636
1637         if (!blk_queue_is_zoned(q))
1638                 return 0;
1639
1640         return blk_queue_zone_sectors(q) != *zone_sectors;
1641 }
1642
1643 /*
1644  * Check consistency of zoned model and zone sectors across all targets. For
1645  * zone sectors, if the destination device is a zoned block device, it shall
1646  * have the specified zone_sectors.
1647  */
1648 static int validate_hardware_zoned_model(struct dm_table *table,
1649                                          enum blk_zoned_model zoned_model,
1650                                          unsigned int zone_sectors)
1651 {
1652         if (zoned_model == BLK_ZONED_NONE)
1653                 return 0;
1654
1655         if (!dm_table_supports_zoned_model(table, zoned_model)) {
1656                 DMERR("%s: zoned model is not consistent across all devices",
1657                       dm_device_name(table->md));
1658                 return -EINVAL;
1659         }
1660
1661         /* Check zone size validity and compatibility */
1662         if (!zone_sectors || !is_power_of_2(zone_sectors))
1663                 return -EINVAL;
1664
1665         if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
1666                 DMERR("%s: zone sectors is not consistent across all zoned devices",
1667                       dm_device_name(table->md));
1668                 return -EINVAL;
1669         }
1670
1671         return 0;
1672 }
1673
1674 /*
1675  * Establish the new table's queue_limits and validate them.
1676  */
1677 int dm_calculate_queue_limits(struct dm_table *table,
1678                               struct queue_limits *limits)
1679 {
1680         struct dm_target *ti;
1681         struct queue_limits ti_limits;
1682         unsigned i;
1683         enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1684         unsigned int zone_sectors = 0;
1685
1686         blk_set_stacking_limits(limits);
1687
1688         for (i = 0; i < dm_table_get_num_targets(table); i++) {
1689                 blk_set_stacking_limits(&ti_limits);
1690
1691                 ti = dm_table_get_target(table, i);
1692
1693                 if (!ti->type->iterate_devices)
1694                         goto combine_limits;
1695
1696                 /*
1697                  * Combine queue limits of all the devices this target uses.
1698                  */
1699                 ti->type->iterate_devices(ti, dm_set_device_limits,
1700                                           &ti_limits);
1701
1702                 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1703                         /*
1704                          * After stacking all limits, validate all devices
1705                          * in table support this zoned model and zone sectors.
1706                          */
1707                         zoned_model = ti_limits.zoned;
1708                         zone_sectors = ti_limits.chunk_sectors;
1709                 }
1710
1711                 /* Set I/O hints portion of queue limits */
1712                 if (ti->type->io_hints)
1713                         ti->type->io_hints(ti, &ti_limits);
1714
1715                 /*
1716                  * Check each device area is consistent with the target's
1717                  * overall queue limits.
1718                  */
1719                 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1720                                               &ti_limits))
1721                         return -EINVAL;
1722
1723 combine_limits:
1724                 /*
1725                  * Merge this target's queue limits into the overall limits
1726                  * for the table.
1727                  */
1728                 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1729                         DMWARN("%s: adding target device "
1730                                "(start sect %llu len %llu) "
1731                                "caused an alignment inconsistency",
1732                                dm_device_name(table->md),
1733                                (unsigned long long) ti->begin,
1734                                (unsigned long long) ti->len);
1735         }
1736
1737         /*
1738          * Verify that the zoned model and zone sectors, as determined before
1739          * any .io_hints override, are the same across all devices in the table.
1740          * - this is especially relevant if .io_hints is emulating a disk-managed
1741          *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1742          * BUT...
1743          */
1744         if (limits->zoned != BLK_ZONED_NONE) {
1745                 /*
1746                  * ...IF the above limits stacking determined a zoned model
1747                  * validate that all of the table's devices conform to it.
1748                  */
1749                 zoned_model = limits->zoned;
1750                 zone_sectors = limits->chunk_sectors;
1751         }
1752         if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1753                 return -EINVAL;
1754
1755         return validate_hardware_logical_block_alignment(table, limits);
1756 }
1757
1758 /*
1759  * Verify that all devices have an integrity profile that matches the
1760  * DM device's registered integrity profile.  If the profiles don't
1761  * match then unregister the DM device's integrity profile.
1762  */
1763 static void dm_table_verify_integrity(struct dm_table *t)
1764 {
1765         struct gendisk *template_disk = NULL;
1766
1767         if (t->integrity_added)
1768                 return;
1769
1770         if (t->integrity_supported) {
1771                 /*
1772                  * Verify that the original integrity profile
1773                  * matches all the devices in this table.
1774                  */
1775                 template_disk = dm_table_get_integrity_disk(t);
1776                 if (template_disk &&
1777                     blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1778                         return;
1779         }
1780
1781         if (integrity_profile_exists(dm_disk(t->md))) {
1782                 DMWARN("%s: unable to establish an integrity profile",
1783                        dm_device_name(t->md));
1784                 blk_integrity_unregister(dm_disk(t->md));
1785         }
1786 }
1787
1788 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1789                                 sector_t start, sector_t len, void *data)
1790 {
1791         unsigned long flush = (unsigned long) data;
1792         struct request_queue *q = bdev_get_queue(dev->bdev);
1793
1794         return (q->queue_flags & flush);
1795 }
1796
1797 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1798 {
1799         struct dm_target *ti;
1800         unsigned i;
1801
1802         /*
1803          * Require at least one underlying device to support flushes.
1804          * t->devices includes internal dm devices such as mirror logs
1805          * so we need to use iterate_devices here, which targets
1806          * supporting flushes must provide.
1807          */
1808         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1809                 ti = dm_table_get_target(t, i);
1810
1811                 if (!ti->num_flush_bios)
1812                         continue;
1813
1814                 if (ti->flush_supported)
1815                         return true;
1816
1817                 if (ti->type->iterate_devices &&
1818                     ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1819                         return true;
1820         }
1821
1822         return false;
1823 }
1824
1825 static int device_dax_write_cache_enabled(struct dm_target *ti,
1826                                           struct dm_dev *dev, sector_t start,
1827                                           sector_t len, void *data)
1828 {
1829         struct dax_device *dax_dev = dev->dax_dev;
1830
1831         if (!dax_dev)
1832                 return false;
1833
1834         if (dax_write_cache_enabled(dax_dev))
1835                 return true;
1836         return false;
1837 }
1838
1839 static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1840                                 sector_t start, sector_t len, void *data)
1841 {
1842         return !bdev_nonrot(dev->bdev);
1843 }
1844
1845 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1846                              sector_t start, sector_t len, void *data)
1847 {
1848         struct request_queue *q = bdev_get_queue(dev->bdev);
1849
1850         return !blk_queue_add_random(q);
1851 }
1852
1853 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1854                                            sector_t start, sector_t len, void *data)
1855 {
1856         struct request_queue *q = bdev_get_queue(dev->bdev);
1857
1858         return !q->limits.max_write_zeroes_sectors;
1859 }
1860
1861 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1862 {
1863         struct dm_target *ti;
1864         unsigned i = 0;
1865
1866         while (i < dm_table_get_num_targets(t)) {
1867                 ti = dm_table_get_target(t, i++);
1868
1869                 if (!ti->num_write_zeroes_bios)
1870                         return false;
1871
1872                 if (!ti->type->iterate_devices ||
1873                     ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1874                         return false;
1875         }
1876
1877         return true;
1878 }
1879
1880 static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1881                                      sector_t start, sector_t len, void *data)
1882 {
1883         struct request_queue *q = bdev_get_queue(dev->bdev);
1884
1885         return !blk_queue_nowait(q);
1886 }
1887
1888 static bool dm_table_supports_nowait(struct dm_table *t)
1889 {
1890         struct dm_target *ti;
1891         unsigned i = 0;
1892
1893         while (i < dm_table_get_num_targets(t)) {
1894                 ti = dm_table_get_target(t, i++);
1895
1896                 if (!dm_target_supports_nowait(ti->type))
1897                         return false;
1898
1899                 if (!ti->type->iterate_devices ||
1900                     ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1901                         return false;
1902         }
1903
1904         return true;
1905 }
1906
1907 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1908                                       sector_t start, sector_t len, void *data)
1909 {
1910         return !bdev_max_discard_sectors(dev->bdev);
1911 }
1912
1913 static bool dm_table_supports_discards(struct dm_table *t)
1914 {
1915         struct dm_target *ti;
1916         unsigned i;
1917
1918         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1919                 ti = dm_table_get_target(t, i);
1920
1921                 if (!ti->num_discard_bios)
1922                         return false;
1923
1924                 /*
1925                  * Either the target provides discard support (as implied by setting
1926                  * 'discards_supported') or it relies on _all_ data devices having
1927                  * discard support.
1928                  */
1929                 if (!ti->discards_supported &&
1930                     (!ti->type->iterate_devices ||
1931                      ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1932                         return false;
1933         }
1934
1935         return true;
1936 }
1937
1938 static int device_not_secure_erase_capable(struct dm_target *ti,
1939                                            struct dm_dev *dev, sector_t start,
1940                                            sector_t len, void *data)
1941 {
1942         return !bdev_max_secure_erase_sectors(dev->bdev);
1943 }
1944
1945 static bool dm_table_supports_secure_erase(struct dm_table *t)
1946 {
1947         struct dm_target *ti;
1948         unsigned int i;
1949
1950         for (i = 0; i < dm_table_get_num_targets(t); i++) {
1951                 ti = dm_table_get_target(t, i);
1952
1953                 if (!ti->num_secure_erase_bios)
1954                         return false;
1955
1956                 if (!ti->type->iterate_devices ||
1957                     ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1958                         return false;
1959         }
1960
1961         return true;
1962 }
1963
1964 static int device_requires_stable_pages(struct dm_target *ti,
1965                                         struct dm_dev *dev, sector_t start,
1966                                         sector_t len, void *data)
1967 {
1968         return bdev_stable_writes(dev->bdev);
1969 }
1970
1971 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1972                               struct queue_limits *limits)
1973 {
1974         bool wc = false, fua = false;
1975         int r;
1976
1977         /*
1978          * Copy table's limits to the DM device's request_queue
1979          */
1980         q->limits = *limits;
1981
1982         if (dm_table_supports_nowait(t))
1983                 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1984         else
1985                 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
1986
1987         if (!dm_table_supports_discards(t)) {
1988                 q->limits.max_discard_sectors = 0;
1989                 q->limits.max_hw_discard_sectors = 0;
1990                 q->limits.discard_granularity = 0;
1991                 q->limits.discard_alignment = 0;
1992                 q->limits.discard_misaligned = 0;
1993         }
1994
1995         if (!dm_table_supports_secure_erase(t))
1996                 q->limits.max_secure_erase_sectors = 0;
1997
1998         if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1999                 wc = true;
2000                 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
2001                         fua = true;
2002         }
2003         blk_queue_write_cache(q, wc, fua);
2004
2005         if (dm_table_supports_dax(t, device_not_dax_capable)) {
2006                 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
2007                 if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
2008                         set_dax_synchronous(t->md->dax_dev);
2009         }
2010         else
2011                 blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
2012
2013         if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
2014                 dax_write_cache(t->md->dax_dev, true);
2015
2016         /* Ensure that all underlying devices are non-rotational. */
2017         if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
2018                 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2019         else
2020                 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2021
2022         if (!dm_table_supports_write_zeroes(t))
2023                 q->limits.max_write_zeroes_sectors = 0;
2024
2025         dm_table_verify_integrity(t);
2026
2027         /*
2028          * Some devices don't use blk_integrity but still want stable pages
2029          * because they do their own checksumming.
2030          * If any underlying device requires stable pages, a table must require
2031          * them as well.  Only targets that support iterate_devices are considered:
2032          * don't want error, zero, etc to require stable pages.
2033          */
2034         if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
2035                 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
2036         else
2037                 blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
2038
2039         /*
2040          * Determine whether or not this queue's I/O timings contribute
2041          * to the entropy pool, Only request-based targets use this.
2042          * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2043          * have it set.
2044          */
2045         if (blk_queue_add_random(q) &&
2046             dm_table_any_dev_attr(t, device_is_not_random, NULL))
2047                 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2048
2049         /*
2050          * For a zoned target, setup the zones related queue attributes
2051          * and resources necessary for zone append emulation if necessary.
2052          */
2053         if (blk_queue_is_zoned(q)) {
2054                 r = dm_set_zones_restrictions(t, q);
2055                 if (r)
2056                         return r;
2057                 if (!static_key_enabled(&zoned_enabled.key))
2058                         static_branch_enable(&zoned_enabled);
2059         }
2060
2061         dm_update_crypto_profile(q, t);
2062         disk_update_readahead(t->md->disk);
2063
2064         /*
2065          * Check for request-based device is left to
2066          * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
2067          *
2068          * For bio-based device, only set QUEUE_FLAG_POLL when all
2069          * underlying devices supporting polling.
2070          */
2071         if (__table_type_bio_based(t->type)) {
2072                 if (dm_table_supports_poll(t))
2073                         blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2074                 else
2075                         blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
2076         }
2077
2078         return 0;
2079 }
2080
2081 unsigned int dm_table_get_num_targets(struct dm_table *t)
2082 {
2083         return t->num_targets;
2084 }
2085
2086 struct list_head *dm_table_get_devices(struct dm_table *t)
2087 {
2088         return &t->devices;
2089 }
2090
2091 fmode_t dm_table_get_mode(struct dm_table *t)
2092 {
2093         return t->mode;
2094 }
2095 EXPORT_SYMBOL(dm_table_get_mode);
2096
2097 enum suspend_mode {
2098         PRESUSPEND,
2099         PRESUSPEND_UNDO,
2100         POSTSUSPEND,
2101 };
2102
2103 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2104 {
2105         int i = t->num_targets;
2106         struct dm_target *ti = t->targets;
2107
2108         lockdep_assert_held(&t->md->suspend_lock);
2109
2110         while (i--) {
2111                 switch (mode) {
2112                 case PRESUSPEND:
2113                         if (ti->type->presuspend)
2114                                 ti->type->presuspend(ti);
2115                         break;
2116                 case PRESUSPEND_UNDO:
2117                         if (ti->type->presuspend_undo)
2118                                 ti->type->presuspend_undo(ti);
2119                         break;
2120                 case POSTSUSPEND:
2121                         if (ti->type->postsuspend)
2122                                 ti->type->postsuspend(ti);
2123                         break;
2124                 }
2125                 ti++;
2126         }
2127 }
2128
2129 void dm_table_presuspend_targets(struct dm_table *t)
2130 {
2131         if (!t)
2132                 return;
2133
2134         suspend_targets(t, PRESUSPEND);
2135 }
2136
2137 void dm_table_presuspend_undo_targets(struct dm_table *t)
2138 {
2139         if (!t)
2140                 return;
2141
2142         suspend_targets(t, PRESUSPEND_UNDO);
2143 }
2144
2145 void dm_table_postsuspend_targets(struct dm_table *t)
2146 {
2147         if (!t)
2148                 return;
2149
2150         suspend_targets(t, POSTSUSPEND);
2151 }
2152
2153 int dm_table_resume_targets(struct dm_table *t)
2154 {
2155         int i, r = 0;
2156
2157         lockdep_assert_held(&t->md->suspend_lock);
2158
2159         for (i = 0; i < t->num_targets; i++) {
2160                 struct dm_target *ti = t->targets + i;
2161
2162                 if (!ti->type->preresume)
2163                         continue;
2164
2165                 r = ti->type->preresume(ti);
2166                 if (r) {
2167                         DMERR("%s: %s: preresume failed, error = %d",
2168                               dm_device_name(t->md), ti->type->name, r);
2169                         return r;
2170                 }
2171         }
2172
2173         for (i = 0; i < t->num_targets; i++) {
2174                 struct dm_target *ti = t->targets + i;
2175
2176                 if (ti->type->resume)
2177                         ti->type->resume(ti);
2178         }
2179
2180         return 0;
2181 }
2182
2183 struct mapped_device *dm_table_get_md(struct dm_table *t)
2184 {
2185         return t->md;
2186 }
2187 EXPORT_SYMBOL(dm_table_get_md);
2188
2189 const char *dm_table_device_name(struct dm_table *t)
2190 {
2191         return dm_device_name(t->md);
2192 }
2193 EXPORT_SYMBOL_GPL(dm_table_device_name);
2194
2195 void dm_table_run_md_queue_async(struct dm_table *t)
2196 {
2197         if (!dm_table_request_based(t))
2198                 return;
2199
2200         if (t->md->queue)
2201                 blk_mq_run_hw_queues(t->md->queue, true);
2202 }
2203 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2204