Merge tag 'mfd-next-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[linux-2.6-microblaze.git] / drivers / nvdimm / namespace_devs.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/sort.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/nd.h>
11 #include "nd-core.h"
12 #include "pmem.h"
13 #include "pfn.h"
14 #include "nd.h"
15
16 static void namespace_io_release(struct device *dev)
17 {
18         struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
19
20         kfree(nsio);
21 }
22
23 static void namespace_pmem_release(struct device *dev)
24 {
25         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
26         struct nd_region *nd_region = to_nd_region(dev->parent);
27
28         if (nspm->id >= 0)
29                 ida_simple_remove(&nd_region->ns_ida, nspm->id);
30         kfree(nspm->alt_name);
31         kfree(nspm->uuid);
32         kfree(nspm);
33 }
34
35 static void namespace_blk_release(struct device *dev)
36 {
37         struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
38         struct nd_region *nd_region = to_nd_region(dev->parent);
39
40         if (nsblk->id >= 0)
41                 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
42         kfree(nsblk->alt_name);
43         kfree(nsblk->uuid);
44         kfree(nsblk->res);
45         kfree(nsblk);
46 }
47
48 static bool is_namespace_pmem(const struct device *dev);
49 static bool is_namespace_blk(const struct device *dev);
50 static bool is_namespace_io(const struct device *dev);
51
52 static int is_uuid_busy(struct device *dev, void *data)
53 {
54         uuid_t *uuid1 = data, *uuid2 = NULL;
55
56         if (is_namespace_pmem(dev)) {
57                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
58
59                 uuid2 = nspm->uuid;
60         } else if (is_namespace_blk(dev)) {
61                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
62
63                 uuid2 = nsblk->uuid;
64         } else if (is_nd_btt(dev)) {
65                 struct nd_btt *nd_btt = to_nd_btt(dev);
66
67                 uuid2 = nd_btt->uuid;
68         } else if (is_nd_pfn(dev)) {
69                 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
70
71                 uuid2 = nd_pfn->uuid;
72         }
73
74         if (uuid2 && uuid_equal(uuid1, uuid2))
75                 return -EBUSY;
76
77         return 0;
78 }
79
80 static int is_namespace_uuid_busy(struct device *dev, void *data)
81 {
82         if (is_nd_region(dev))
83                 return device_for_each_child(dev, data, is_uuid_busy);
84         return 0;
85 }
86
87 /**
88  * nd_is_uuid_unique - verify that no other namespace has @uuid
89  * @dev: any device on a nvdimm_bus
90  * @uuid: uuid to check
91  */
92 bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
93 {
94         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
95
96         if (!nvdimm_bus)
97                 return false;
98         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
99         if (device_for_each_child(&nvdimm_bus->dev, uuid,
100                                 is_namespace_uuid_busy) != 0)
101                 return false;
102         return true;
103 }
104
105 bool pmem_should_map_pages(struct device *dev)
106 {
107         struct nd_region *nd_region = to_nd_region(dev->parent);
108         struct nd_namespace_common *ndns = to_ndns(dev);
109         struct nd_namespace_io *nsio;
110
111         if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
112                 return false;
113
114         if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
115                 return false;
116
117         if (is_nd_pfn(dev) || is_nd_btt(dev))
118                 return false;
119
120         if (ndns->force_raw)
121                 return false;
122
123         nsio = to_nd_namespace_io(dev);
124         if (region_intersects(nsio->res.start, resource_size(&nsio->res),
125                                 IORESOURCE_SYSTEM_RAM,
126                                 IORES_DESC_NONE) == REGION_MIXED)
127                 return false;
128
129         return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
130 }
131 EXPORT_SYMBOL(pmem_should_map_pages);
132
133 unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
134 {
135         if (is_namespace_pmem(&ndns->dev)) {
136                 struct nd_namespace_pmem *nspm;
137
138                 nspm = to_nd_namespace_pmem(&ndns->dev);
139                 if (nspm->lbasize == 0 || nspm->lbasize == 512)
140                         /* default */;
141                 else if (nspm->lbasize == 4096)
142                         return 4096;
143                 else
144                         dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
145                                         nspm->lbasize);
146         }
147
148         /*
149          * There is no namespace label (is_namespace_io()), or the label
150          * indicates the default sector size.
151          */
152         return 512;
153 }
154 EXPORT_SYMBOL(pmem_sector_size);
155
156 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
157                 char *name)
158 {
159         struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
160         const char *suffix = NULL;
161
162         if (ndns->claim && is_nd_btt(ndns->claim))
163                 suffix = "s";
164
165         if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
166                 int nsidx = 0;
167
168                 if (is_namespace_pmem(&ndns->dev)) {
169                         struct nd_namespace_pmem *nspm;
170
171                         nspm = to_nd_namespace_pmem(&ndns->dev);
172                         nsidx = nspm->id;
173                 }
174
175                 if (nsidx)
176                         sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
177                                         suffix ? suffix : "");
178                 else
179                         sprintf(name, "pmem%d%s", nd_region->id,
180                                         suffix ? suffix : "");
181         } else if (is_namespace_blk(&ndns->dev)) {
182                 struct nd_namespace_blk *nsblk;
183
184                 nsblk = to_nd_namespace_blk(&ndns->dev);
185                 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
186                                 suffix ? suffix : "");
187         } else {
188                 return NULL;
189         }
190
191         return name;
192 }
193 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
194
195 const uuid_t *nd_dev_to_uuid(struct device *dev)
196 {
197         if (!dev)
198                 return &uuid_null;
199
200         if (is_namespace_pmem(dev)) {
201                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
202
203                 return nspm->uuid;
204         } else if (is_namespace_blk(dev)) {
205                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
206
207                 return nsblk->uuid;
208         } else
209                 return &uuid_null;
210 }
211 EXPORT_SYMBOL(nd_dev_to_uuid);
212
213 static ssize_t nstype_show(struct device *dev,
214                 struct device_attribute *attr, char *buf)
215 {
216         struct nd_region *nd_region = to_nd_region(dev->parent);
217
218         return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
219 }
220 static DEVICE_ATTR_RO(nstype);
221
222 static ssize_t __alt_name_store(struct device *dev, const char *buf,
223                 const size_t len)
224 {
225         char *input, *pos, *alt_name, **ns_altname;
226         ssize_t rc;
227
228         if (is_namespace_pmem(dev)) {
229                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
230
231                 ns_altname = &nspm->alt_name;
232         } else if (is_namespace_blk(dev)) {
233                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
234
235                 ns_altname = &nsblk->alt_name;
236         } else
237                 return -ENXIO;
238
239         if (dev->driver || to_ndns(dev)->claim)
240                 return -EBUSY;
241
242         input = kstrndup(buf, len, GFP_KERNEL);
243         if (!input)
244                 return -ENOMEM;
245
246         pos = strim(input);
247         if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
248                 rc = -EINVAL;
249                 goto out;
250         }
251
252         alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
253         if (!alt_name) {
254                 rc = -ENOMEM;
255                 goto out;
256         }
257         kfree(*ns_altname);
258         *ns_altname = alt_name;
259         sprintf(*ns_altname, "%s", pos);
260         rc = len;
261
262 out:
263         kfree(input);
264         return rc;
265 }
266
267 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
268 {
269         struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
270         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
271         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
272         struct nd_label_id label_id;
273         resource_size_t size = 0;
274         struct resource *res;
275
276         if (!nsblk->uuid)
277                 return 0;
278         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
279         for_each_dpa_resource(ndd, res)
280                 if (strcmp(res->name, label_id.id) == 0)
281                         size += resource_size(res);
282         return size;
283 }
284
285 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
286 {
287         struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
288         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
289         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
290         struct nd_label_id label_id;
291         struct resource *res;
292         int count, i;
293
294         if (!nsblk->uuid || !nsblk->lbasize || !ndd)
295                 return false;
296
297         count = 0;
298         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
299         for_each_dpa_resource(ndd, res) {
300                 if (strcmp(res->name, label_id.id) != 0)
301                         continue;
302                 /*
303                  * Resources with unacknowledged adjustments indicate a
304                  * failure to update labels
305                  */
306                 if (res->flags & DPA_RESOURCE_ADJUSTED)
307                         return false;
308                 count++;
309         }
310
311         /* These values match after a successful label update */
312         if (count != nsblk->num_resources)
313                 return false;
314
315         for (i = 0; i < nsblk->num_resources; i++) {
316                 struct resource *found = NULL;
317
318                 for_each_dpa_resource(ndd, res)
319                         if (res == nsblk->res[i]) {
320                                 found = res;
321                                 break;
322                         }
323                 /* stale resource */
324                 if (!found)
325                         return false;
326         }
327
328         return true;
329 }
330
331 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
332 {
333         resource_size_t size;
334
335         nvdimm_bus_lock(&nsblk->common.dev);
336         size = __nd_namespace_blk_validate(nsblk);
337         nvdimm_bus_unlock(&nsblk->common.dev);
338
339         return size;
340 }
341 EXPORT_SYMBOL(nd_namespace_blk_validate);
342
343
344 static int nd_namespace_label_update(struct nd_region *nd_region,
345                 struct device *dev)
346 {
347         dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
348                         "namespace must be idle during label update\n");
349         if (dev->driver || to_ndns(dev)->claim)
350                 return 0;
351
352         /*
353          * Only allow label writes that will result in a valid namespace
354          * or deletion of an existing namespace.
355          */
356         if (is_namespace_pmem(dev)) {
357                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
358                 resource_size_t size = resource_size(&nspm->nsio.res);
359
360                 if (size == 0 && nspm->uuid)
361                         /* delete allocation */;
362                 else if (!nspm->uuid)
363                         return 0;
364
365                 return nd_pmem_namespace_label_update(nd_region, nspm, size);
366         } else if (is_namespace_blk(dev)) {
367                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
368                 resource_size_t size = nd_namespace_blk_size(nsblk);
369
370                 if (size == 0 && nsblk->uuid)
371                         /* delete allocation */;
372                 else if (!nsblk->uuid || !nsblk->lbasize)
373                         return 0;
374
375                 return nd_blk_namespace_label_update(nd_region, nsblk, size);
376         } else
377                 return -ENXIO;
378 }
379
380 static ssize_t alt_name_store(struct device *dev,
381                 struct device_attribute *attr, const char *buf, size_t len)
382 {
383         struct nd_region *nd_region = to_nd_region(dev->parent);
384         ssize_t rc;
385
386         nd_device_lock(dev);
387         nvdimm_bus_lock(dev);
388         wait_nvdimm_bus_probe_idle(dev);
389         rc = __alt_name_store(dev, buf, len);
390         if (rc >= 0)
391                 rc = nd_namespace_label_update(nd_region, dev);
392         dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
393         nvdimm_bus_unlock(dev);
394         nd_device_unlock(dev);
395
396         return rc < 0 ? rc : len;
397 }
398
399 static ssize_t alt_name_show(struct device *dev,
400                 struct device_attribute *attr, char *buf)
401 {
402         char *ns_altname;
403
404         if (is_namespace_pmem(dev)) {
405                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
406
407                 ns_altname = nspm->alt_name;
408         } else if (is_namespace_blk(dev)) {
409                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
410
411                 ns_altname = nsblk->alt_name;
412         } else
413                 return -ENXIO;
414
415         return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
416 }
417 static DEVICE_ATTR_RW(alt_name);
418
419 static int scan_free(struct nd_region *nd_region,
420                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
421                 resource_size_t n)
422 {
423         bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
424         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
425         int rc = 0;
426
427         while (n) {
428                 struct resource *res, *last;
429                 resource_size_t new_start;
430
431                 last = NULL;
432                 for_each_dpa_resource(ndd, res)
433                         if (strcmp(res->name, label_id->id) == 0)
434                                 last = res;
435                 res = last;
436                 if (!res)
437                         return 0;
438
439                 if (n >= resource_size(res)) {
440                         n -= resource_size(res);
441                         nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
442                         nvdimm_free_dpa(ndd, res);
443                         /* retry with last resource deleted */
444                         continue;
445                 }
446
447                 /*
448                  * Keep BLK allocations relegated to high DPA as much as
449                  * possible
450                  */
451                 if (is_blk)
452                         new_start = res->start + n;
453                 else
454                         new_start = res->start;
455
456                 rc = adjust_resource(res, new_start, resource_size(res) - n);
457                 if (rc == 0)
458                         res->flags |= DPA_RESOURCE_ADJUSTED;
459                 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
460                 break;
461         }
462
463         return rc;
464 }
465
466 /**
467  * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
468  * @nd_region: the set of dimms to reclaim @n bytes from
469  * @label_id: unique identifier for the namespace consuming this dpa range
470  * @n: number of bytes per-dimm to release
471  *
472  * Assumes resources are ordered.  Starting from the end try to
473  * adjust_resource() the allocation to @n, but if @n is larger than the
474  * allocation delete it and find the 'new' last allocation in the label
475  * set.
476  */
477 static int shrink_dpa_allocation(struct nd_region *nd_region,
478                 struct nd_label_id *label_id, resource_size_t n)
479 {
480         int i;
481
482         for (i = 0; i < nd_region->ndr_mappings; i++) {
483                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
484                 int rc;
485
486                 rc = scan_free(nd_region, nd_mapping, label_id, n);
487                 if (rc)
488                         return rc;
489         }
490
491         return 0;
492 }
493
494 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
495                 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
496                 resource_size_t n)
497 {
498         bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
499         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
500         resource_size_t first_dpa;
501         struct resource *res;
502         int rc = 0;
503
504         /* allocate blk from highest dpa first */
505         if (is_blk)
506                 first_dpa = nd_mapping->start + nd_mapping->size - n;
507         else
508                 first_dpa = nd_mapping->start;
509
510         /* first resource allocation for this label-id or dimm */
511         res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
512         if (!res)
513                 rc = -EBUSY;
514
515         nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
516         return rc ? n : 0;
517 }
518
519
520 /**
521  * space_valid() - validate free dpa space against constraints
522  * @nd_region: hosting region of the free space
523  * @ndd: dimm device data for debug
524  * @label_id: namespace id to allocate space
525  * @prev: potential allocation that precedes free space
526  * @next: allocation that follows the given free space range
527  * @exist: first allocation with same id in the mapping
528  * @n: range that must satisfied for pmem allocations
529  * @valid: free space range to validate
530  *
531  * BLK-space is valid as long as it does not precede a PMEM
532  * allocation in a given region. PMEM-space must be contiguous
533  * and adjacent to an existing existing allocation (if one
534  * exists).  If reserving PMEM any space is valid.
535  */
536 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
537                 struct nd_label_id *label_id, struct resource *prev,
538                 struct resource *next, struct resource *exist,
539                 resource_size_t n, struct resource *valid)
540 {
541         bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
542         bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
543         unsigned long align;
544
545         align = nd_region->align / nd_region->ndr_mappings;
546         valid->start = ALIGN(valid->start, align);
547         valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
548
549         if (valid->start >= valid->end)
550                 goto invalid;
551
552         if (is_reserve)
553                 return;
554
555         if (!is_pmem) {
556                 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
557                 struct nvdimm_bus *nvdimm_bus;
558                 struct blk_alloc_info info = {
559                         .nd_mapping = nd_mapping,
560                         .available = nd_mapping->size,
561                         .res = valid,
562                 };
563
564                 WARN_ON(!is_nd_blk(&nd_region->dev));
565                 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
566                 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
567                 return;
568         }
569
570         /* allocation needs to be contiguous, so this is all or nothing */
571         if (resource_size(valid) < n)
572                 goto invalid;
573
574         /* we've got all the space we need and no existing allocation */
575         if (!exist)
576                 return;
577
578         /* allocation needs to be contiguous with the existing namespace */
579         if (valid->start == exist->end + 1
580                         || valid->end == exist->start - 1)
581                 return;
582
583  invalid:
584         /* truncate @valid size to 0 */
585         valid->end = valid->start - 1;
586 }
587
588 enum alloc_loc {
589         ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
590 };
591
592 static resource_size_t scan_allocate(struct nd_region *nd_region,
593                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
594                 resource_size_t n)
595 {
596         resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
597         bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
598         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
599         struct resource *res, *exist = NULL, valid;
600         const resource_size_t to_allocate = n;
601         int first;
602
603         for_each_dpa_resource(ndd, res)
604                 if (strcmp(label_id->id, res->name) == 0)
605                         exist = res;
606
607         valid.start = nd_mapping->start;
608         valid.end = mapping_end;
609         valid.name = "free space";
610  retry:
611         first = 0;
612         for_each_dpa_resource(ndd, res) {
613                 struct resource *next = res->sibling, *new_res = NULL;
614                 resource_size_t allocate, available = 0;
615                 enum alloc_loc loc = ALLOC_ERR;
616                 const char *action;
617                 int rc = 0;
618
619                 /* ignore resources outside this nd_mapping */
620                 if (res->start > mapping_end)
621                         continue;
622                 if (res->end < nd_mapping->start)
623                         continue;
624
625                 /* space at the beginning of the mapping */
626                 if (!first++ && res->start > nd_mapping->start) {
627                         valid.start = nd_mapping->start;
628                         valid.end = res->start - 1;
629                         space_valid(nd_region, ndd, label_id, NULL, next, exist,
630                                         to_allocate, &valid);
631                         available = resource_size(&valid);
632                         if (available)
633                                 loc = ALLOC_BEFORE;
634                 }
635
636                 /* space between allocations */
637                 if (!loc && next) {
638                         valid.start = res->start + resource_size(res);
639                         valid.end = min(mapping_end, next->start - 1);
640                         space_valid(nd_region, ndd, label_id, res, next, exist,
641                                         to_allocate, &valid);
642                         available = resource_size(&valid);
643                         if (available)
644                                 loc = ALLOC_MID;
645                 }
646
647                 /* space at the end of the mapping */
648                 if (!loc && !next) {
649                         valid.start = res->start + resource_size(res);
650                         valid.end = mapping_end;
651                         space_valid(nd_region, ndd, label_id, res, next, exist,
652                                         to_allocate, &valid);
653                         available = resource_size(&valid);
654                         if (available)
655                                 loc = ALLOC_AFTER;
656                 }
657
658                 if (!loc || !available)
659                         continue;
660                 allocate = min(available, n);
661                 switch (loc) {
662                 case ALLOC_BEFORE:
663                         if (strcmp(res->name, label_id->id) == 0) {
664                                 /* adjust current resource up */
665                                 rc = adjust_resource(res, res->start - allocate,
666                                                 resource_size(res) + allocate);
667                                 action = "cur grow up";
668                         } else
669                                 action = "allocate";
670                         break;
671                 case ALLOC_MID:
672                         if (strcmp(next->name, label_id->id) == 0) {
673                                 /* adjust next resource up */
674                                 rc = adjust_resource(next, next->start
675                                                 - allocate, resource_size(next)
676                                                 + allocate);
677                                 new_res = next;
678                                 action = "next grow up";
679                         } else if (strcmp(res->name, label_id->id) == 0) {
680                                 action = "grow down";
681                         } else
682                                 action = "allocate";
683                         break;
684                 case ALLOC_AFTER:
685                         if (strcmp(res->name, label_id->id) == 0)
686                                 action = "grow down";
687                         else
688                                 action = "allocate";
689                         break;
690                 default:
691                         return n;
692                 }
693
694                 if (strcmp(action, "allocate") == 0) {
695                         /* BLK allocate bottom up */
696                         if (!is_pmem)
697                                 valid.start += available - allocate;
698
699                         new_res = nvdimm_allocate_dpa(ndd, label_id,
700                                         valid.start, allocate);
701                         if (!new_res)
702                                 rc = -EBUSY;
703                 } else if (strcmp(action, "grow down") == 0) {
704                         /* adjust current resource down */
705                         rc = adjust_resource(res, res->start, resource_size(res)
706                                         + allocate);
707                         if (rc == 0)
708                                 res->flags |= DPA_RESOURCE_ADJUSTED;
709                 }
710
711                 if (!new_res)
712                         new_res = res;
713
714                 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
715                                 action, loc, rc);
716
717                 if (rc)
718                         return n;
719
720                 n -= allocate;
721                 if (n) {
722                         /*
723                          * Retry scan with newly inserted resources.
724                          * For example, if we did an ALLOC_BEFORE
725                          * insertion there may also have been space
726                          * available for an ALLOC_AFTER insertion, so we
727                          * need to check this same resource again
728                          */
729                         goto retry;
730                 } else
731                         return 0;
732         }
733
734         /*
735          * If we allocated nothing in the BLK case it may be because we are in
736          * an initial "pmem-reserve pass".  Only do an initial BLK allocation
737          * when none of the DPA space is reserved.
738          */
739         if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
740                 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
741         return n;
742 }
743
744 static int merge_dpa(struct nd_region *nd_region,
745                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
746 {
747         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
748         struct resource *res;
749
750         if (strncmp("pmem", label_id->id, 4) == 0)
751                 return 0;
752  retry:
753         for_each_dpa_resource(ndd, res) {
754                 int rc;
755                 struct resource *next = res->sibling;
756                 resource_size_t end = res->start + resource_size(res);
757
758                 if (!next || strcmp(res->name, label_id->id) != 0
759                                 || strcmp(next->name, label_id->id) != 0
760                                 || end != next->start)
761                         continue;
762                 end += resource_size(next);
763                 nvdimm_free_dpa(ndd, next);
764                 rc = adjust_resource(res, res->start, end - res->start);
765                 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
766                 if (rc)
767                         return rc;
768                 res->flags |= DPA_RESOURCE_ADJUSTED;
769                 goto retry;
770         }
771
772         return 0;
773 }
774
775 int __reserve_free_pmem(struct device *dev, void *data)
776 {
777         struct nvdimm *nvdimm = data;
778         struct nd_region *nd_region;
779         struct nd_label_id label_id;
780         int i;
781
782         if (!is_memory(dev))
783                 return 0;
784
785         nd_region = to_nd_region(dev);
786         if (nd_region->ndr_mappings == 0)
787                 return 0;
788
789         memset(&label_id, 0, sizeof(label_id));
790         strcat(label_id.id, "pmem-reserve");
791         for (i = 0; i < nd_region->ndr_mappings; i++) {
792                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
793                 resource_size_t n, rem = 0;
794
795                 if (nd_mapping->nvdimm != nvdimm)
796                         continue;
797
798                 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
799                 if (n == 0)
800                         return 0;
801                 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
802                 dev_WARN_ONCE(&nd_region->dev, rem,
803                                 "pmem reserve underrun: %#llx of %#llx bytes\n",
804                                 (unsigned long long) n - rem,
805                                 (unsigned long long) n);
806                 return rem ? -ENXIO : 0;
807         }
808
809         return 0;
810 }
811
812 void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
813                 struct nd_mapping *nd_mapping)
814 {
815         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
816         struct resource *res, *_res;
817
818         for_each_dpa_resource_safe(ndd, res, _res)
819                 if (strcmp(res->name, "pmem-reserve") == 0)
820                         nvdimm_free_dpa(ndd, res);
821 }
822
823 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
824                 struct nd_mapping *nd_mapping)
825 {
826         struct nvdimm *nvdimm = nd_mapping->nvdimm;
827         int rc;
828
829         rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
830                         __reserve_free_pmem);
831         if (rc)
832                 release_free_pmem(nvdimm_bus, nd_mapping);
833         return rc;
834 }
835
836 /**
837  * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
838  * @nd_region: the set of dimms to allocate @n more bytes from
839  * @label_id: unique identifier for the namespace consuming this dpa range
840  * @n: number of bytes per-dimm to add to the existing allocation
841  *
842  * Assumes resources are ordered.  For BLK regions, first consume
843  * BLK-only available DPA free space, then consume PMEM-aliased DPA
844  * space starting at the highest DPA.  For PMEM regions start
845  * allocations from the start of an interleave set and end at the first
846  * BLK allocation or the end of the interleave set, whichever comes
847  * first.
848  */
849 static int grow_dpa_allocation(struct nd_region *nd_region,
850                 struct nd_label_id *label_id, resource_size_t n)
851 {
852         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
853         bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
854         int i;
855
856         for (i = 0; i < nd_region->ndr_mappings; i++) {
857                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
858                 resource_size_t rem = n;
859                 int rc, j;
860
861                 /*
862                  * In the BLK case try once with all unallocated PMEM
863                  * reserved, and once without
864                  */
865                 for (j = is_pmem; j < 2; j++) {
866                         bool blk_only = j == 0;
867
868                         if (blk_only) {
869                                 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
870                                 if (rc)
871                                         return rc;
872                         }
873                         rem = scan_allocate(nd_region, nd_mapping,
874                                         label_id, rem);
875                         if (blk_only)
876                                 release_free_pmem(nvdimm_bus, nd_mapping);
877
878                         /* try again and allow encroachments into PMEM */
879                         if (rem == 0)
880                                 break;
881                 }
882
883                 dev_WARN_ONCE(&nd_region->dev, rem,
884                                 "allocation underrun: %#llx of %#llx bytes\n",
885                                 (unsigned long long) n - rem,
886                                 (unsigned long long) n);
887                 if (rem)
888                         return -ENXIO;
889
890                 rc = merge_dpa(nd_region, nd_mapping, label_id);
891                 if (rc)
892                         return rc;
893         }
894
895         return 0;
896 }
897
898 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
899                 struct nd_namespace_pmem *nspm, resource_size_t size)
900 {
901         struct resource *res = &nspm->nsio.res;
902         resource_size_t offset = 0;
903
904         if (size && !nspm->uuid) {
905                 WARN_ON_ONCE(1);
906                 size = 0;
907         }
908
909         if (size && nspm->uuid) {
910                 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
911                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
912                 struct nd_label_id label_id;
913                 struct resource *res;
914
915                 if (!ndd) {
916                         size = 0;
917                         goto out;
918                 }
919
920                 nd_label_gen_id(&label_id, nspm->uuid, 0);
921
922                 /* calculate a spa offset from the dpa allocation offset */
923                 for_each_dpa_resource(ndd, res)
924                         if (strcmp(res->name, label_id.id) == 0) {
925                                 offset = (res->start - nd_mapping->start)
926                                         * nd_region->ndr_mappings;
927                                 goto out;
928                         }
929
930                 WARN_ON_ONCE(1);
931                 size = 0;
932         }
933
934  out:
935         res->start = nd_region->ndr_start + offset;
936         res->end = res->start + size - 1;
937 }
938
939 static bool uuid_not_set(const uuid_t *uuid, struct device *dev,
940                          const char *where)
941 {
942         if (!uuid) {
943                 dev_dbg(dev, "%s: uuid not set\n", where);
944                 return true;
945         }
946         return false;
947 }
948
949 static ssize_t __size_store(struct device *dev, unsigned long long val)
950 {
951         resource_size_t allocated = 0, available = 0;
952         struct nd_region *nd_region = to_nd_region(dev->parent);
953         struct nd_namespace_common *ndns = to_ndns(dev);
954         struct nd_mapping *nd_mapping;
955         struct nvdimm_drvdata *ndd;
956         struct nd_label_id label_id;
957         u32 flags = 0, remainder;
958         int rc, i, id = -1;
959         uuid_t *uuid = NULL;
960
961         if (dev->driver || ndns->claim)
962                 return -EBUSY;
963
964         if (is_namespace_pmem(dev)) {
965                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
966
967                 uuid = nspm->uuid;
968                 id = nspm->id;
969         } else if (is_namespace_blk(dev)) {
970                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
971
972                 uuid = nsblk->uuid;
973                 flags = NSLABEL_FLAG_LOCAL;
974                 id = nsblk->id;
975         }
976
977         /*
978          * We need a uuid for the allocation-label and dimm(s) on which
979          * to store the label.
980          */
981         if (uuid_not_set(uuid, dev, __func__))
982                 return -ENXIO;
983         if (nd_region->ndr_mappings == 0) {
984                 dev_dbg(dev, "not associated with dimm(s)\n");
985                 return -ENXIO;
986         }
987
988         div_u64_rem(val, nd_region->align, &remainder);
989         if (remainder) {
990                 dev_dbg(dev, "%llu is not %ldK aligned\n", val,
991                                 nd_region->align / SZ_1K);
992                 return -EINVAL;
993         }
994
995         nd_label_gen_id(&label_id, uuid, flags);
996         for (i = 0; i < nd_region->ndr_mappings; i++) {
997                 nd_mapping = &nd_region->mapping[i];
998                 ndd = to_ndd(nd_mapping);
999
1000                 /*
1001                  * All dimms in an interleave set, or the base dimm for a blk
1002                  * region, need to be enabled for the size to be changed.
1003                  */
1004                 if (!ndd)
1005                         return -ENXIO;
1006
1007                 allocated += nvdimm_allocated_dpa(ndd, &label_id);
1008         }
1009         available = nd_region_allocatable_dpa(nd_region);
1010
1011         if (val > available + allocated)
1012                 return -ENOSPC;
1013
1014         if (val == allocated)
1015                 return 0;
1016
1017         val = div_u64(val, nd_region->ndr_mappings);
1018         allocated = div_u64(allocated, nd_region->ndr_mappings);
1019         if (val < allocated)
1020                 rc = shrink_dpa_allocation(nd_region, &label_id,
1021                                 allocated - val);
1022         else
1023                 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
1024
1025         if (rc)
1026                 return rc;
1027
1028         if (is_namespace_pmem(dev)) {
1029                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1030
1031                 nd_namespace_pmem_set_resource(nd_region, nspm,
1032                                 val * nd_region->ndr_mappings);
1033         }
1034
1035         /*
1036          * Try to delete the namespace if we deleted all of its
1037          * allocation, this is not the seed or 0th device for the
1038          * region, and it is not actively claimed by a btt, pfn, or dax
1039          * instance.
1040          */
1041         if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1042                 nd_device_unregister(dev, ND_ASYNC);
1043
1044         return rc;
1045 }
1046
1047 static ssize_t size_store(struct device *dev,
1048                 struct device_attribute *attr, const char *buf, size_t len)
1049 {
1050         struct nd_region *nd_region = to_nd_region(dev->parent);
1051         unsigned long long val;
1052         uuid_t **uuid = NULL;
1053         int rc;
1054
1055         rc = kstrtoull(buf, 0, &val);
1056         if (rc)
1057                 return rc;
1058
1059         nd_device_lock(dev);
1060         nvdimm_bus_lock(dev);
1061         wait_nvdimm_bus_probe_idle(dev);
1062         rc = __size_store(dev, val);
1063         if (rc >= 0)
1064                 rc = nd_namespace_label_update(nd_region, dev);
1065
1066         if (is_namespace_pmem(dev)) {
1067                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1068
1069                 uuid = &nspm->uuid;
1070         } else if (is_namespace_blk(dev)) {
1071                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1072
1073                 uuid = &nsblk->uuid;
1074         }
1075
1076         if (rc == 0 && val == 0 && uuid) {
1077                 /* setting size zero == 'delete namespace' */
1078                 kfree(*uuid);
1079                 *uuid = NULL;
1080         }
1081
1082         dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
1083
1084         nvdimm_bus_unlock(dev);
1085         nd_device_unlock(dev);
1086
1087         return rc < 0 ? rc : len;
1088 }
1089
1090 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1091 {
1092         struct device *dev = &ndns->dev;
1093
1094         if (is_namespace_pmem(dev)) {
1095                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1096
1097                 return resource_size(&nspm->nsio.res);
1098         } else if (is_namespace_blk(dev)) {
1099                 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1100         } else if (is_namespace_io(dev)) {
1101                 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1102
1103                 return resource_size(&nsio->res);
1104         } else
1105                 WARN_ONCE(1, "unknown namespace type\n");
1106         return 0;
1107 }
1108
1109 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1110 {
1111         resource_size_t size;
1112
1113         nvdimm_bus_lock(&ndns->dev);
1114         size = __nvdimm_namespace_capacity(ndns);
1115         nvdimm_bus_unlock(&ndns->dev);
1116
1117         return size;
1118 }
1119 EXPORT_SYMBOL(nvdimm_namespace_capacity);
1120
1121 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
1122 {
1123         int i;
1124         bool locked = false;
1125         struct device *dev = &ndns->dev;
1126         struct nd_region *nd_region = to_nd_region(dev->parent);
1127
1128         for (i = 0; i < nd_region->ndr_mappings; i++) {
1129                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1130                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1131
1132                 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
1133                         dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
1134                         locked = true;
1135                 }
1136         }
1137         return locked;
1138 }
1139 EXPORT_SYMBOL(nvdimm_namespace_locked);
1140
1141 static ssize_t size_show(struct device *dev,
1142                 struct device_attribute *attr, char *buf)
1143 {
1144         return sprintf(buf, "%llu\n", (unsigned long long)
1145                         nvdimm_namespace_capacity(to_ndns(dev)));
1146 }
1147 static DEVICE_ATTR(size, 0444, size_show, size_store);
1148
1149 static uuid_t *namespace_to_uuid(struct device *dev)
1150 {
1151         if (is_namespace_pmem(dev)) {
1152                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1153
1154                 return nspm->uuid;
1155         } else if (is_namespace_blk(dev)) {
1156                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1157
1158                 return nsblk->uuid;
1159         } else
1160                 return ERR_PTR(-ENXIO);
1161 }
1162
1163 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
1164                          char *buf)
1165 {
1166         uuid_t *uuid = namespace_to_uuid(dev);
1167
1168         if (IS_ERR(uuid))
1169                 return PTR_ERR(uuid);
1170         if (uuid)
1171                 return sprintf(buf, "%pUb\n", uuid);
1172         return sprintf(buf, "\n");
1173 }
1174
1175 /**
1176  * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1177  * @nd_region: parent region so we can updates all dimms in the set
1178  * @dev: namespace type for generating label_id
1179  * @new_uuid: incoming uuid
1180  * @old_uuid: reference to the uuid storage location in the namespace object
1181  */
1182 static int namespace_update_uuid(struct nd_region *nd_region,
1183                                  struct device *dev, uuid_t *new_uuid,
1184                                  uuid_t **old_uuid)
1185 {
1186         u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1187         struct nd_label_id old_label_id;
1188         struct nd_label_id new_label_id;
1189         int i;
1190
1191         if (!nd_is_uuid_unique(dev, new_uuid))
1192                 return -EINVAL;
1193
1194         if (*old_uuid == NULL)
1195                 goto out;
1196
1197         /*
1198          * If we've already written a label with this uuid, then it's
1199          * too late to rename because we can't reliably update the uuid
1200          * without losing the old namespace.  Userspace must delete this
1201          * namespace to abandon the old uuid.
1202          */
1203         for (i = 0; i < nd_region->ndr_mappings; i++) {
1204                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1205
1206                 /*
1207                  * This check by itself is sufficient because old_uuid
1208                  * would be NULL above if this uuid did not exist in the
1209                  * currently written set.
1210                  *
1211                  * FIXME: can we delete uuid with zero dpa allocated?
1212                  */
1213                 if (list_empty(&nd_mapping->labels))
1214                         return -EBUSY;
1215         }
1216
1217         nd_label_gen_id(&old_label_id, *old_uuid, flags);
1218         nd_label_gen_id(&new_label_id, new_uuid, flags);
1219         for (i = 0; i < nd_region->ndr_mappings; i++) {
1220                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1221                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1222                 struct nd_label_ent *label_ent;
1223                 struct resource *res;
1224
1225                 for_each_dpa_resource(ndd, res)
1226                         if (strcmp(res->name, old_label_id.id) == 0)
1227                                 sprintf((void *) res->name, "%s",
1228                                                 new_label_id.id);
1229
1230                 mutex_lock(&nd_mapping->lock);
1231                 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1232                         struct nd_namespace_label *nd_label = label_ent->label;
1233                         struct nd_label_id label_id;
1234                         uuid_t uuid;
1235
1236                         if (!nd_label)
1237                                 continue;
1238                         nsl_get_uuid(ndd, nd_label, &uuid);
1239                         nd_label_gen_id(&label_id, &uuid,
1240                                         nsl_get_flags(ndd, nd_label));
1241                         if (strcmp(old_label_id.id, label_id.id) == 0)
1242                                 set_bit(ND_LABEL_REAP, &label_ent->flags);
1243                 }
1244                 mutex_unlock(&nd_mapping->lock);
1245         }
1246         kfree(*old_uuid);
1247  out:
1248         *old_uuid = new_uuid;
1249         return 0;
1250 }
1251
1252 static ssize_t uuid_store(struct device *dev,
1253                 struct device_attribute *attr, const char *buf, size_t len)
1254 {
1255         struct nd_region *nd_region = to_nd_region(dev->parent);
1256         uuid_t *uuid = NULL;
1257         uuid_t **ns_uuid;
1258         ssize_t rc = 0;
1259
1260         if (is_namespace_pmem(dev)) {
1261                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1262
1263                 ns_uuid = &nspm->uuid;
1264         } else if (is_namespace_blk(dev)) {
1265                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1266
1267                 ns_uuid = &nsblk->uuid;
1268         } else
1269                 return -ENXIO;
1270
1271         nd_device_lock(dev);
1272         nvdimm_bus_lock(dev);
1273         wait_nvdimm_bus_probe_idle(dev);
1274         if (to_ndns(dev)->claim)
1275                 rc = -EBUSY;
1276         if (rc >= 0)
1277                 rc = nd_uuid_store(dev, &uuid, buf, len);
1278         if (rc >= 0)
1279                 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1280         if (rc >= 0)
1281                 rc = nd_namespace_label_update(nd_region, dev);
1282         else
1283                 kfree(uuid);
1284         dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1285                         buf[len - 1] == '\n' ? "" : "\n");
1286         nvdimm_bus_unlock(dev);
1287         nd_device_unlock(dev);
1288
1289         return rc < 0 ? rc : len;
1290 }
1291 static DEVICE_ATTR_RW(uuid);
1292
1293 static ssize_t resource_show(struct device *dev,
1294                 struct device_attribute *attr, char *buf)
1295 {
1296         struct resource *res;
1297
1298         if (is_namespace_pmem(dev)) {
1299                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1300
1301                 res = &nspm->nsio.res;
1302         } else if (is_namespace_io(dev)) {
1303                 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1304
1305                 res = &nsio->res;
1306         } else
1307                 return -ENXIO;
1308
1309         /* no address to convey if the namespace has no allocation */
1310         if (resource_size(res) == 0)
1311                 return -ENXIO;
1312         return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1313 }
1314 static DEVICE_ATTR_ADMIN_RO(resource);
1315
1316 static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
1317         4096, 4104, 4160, 4224, 0 };
1318
1319 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1320
1321 static ssize_t sector_size_show(struct device *dev,
1322                 struct device_attribute *attr, char *buf)
1323 {
1324         if (is_namespace_blk(dev)) {
1325                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1326
1327                 return nd_size_select_show(nsblk->lbasize,
1328                                 blk_lbasize_supported, buf);
1329         }
1330
1331         if (is_namespace_pmem(dev)) {
1332                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1333
1334                 return nd_size_select_show(nspm->lbasize,
1335                                 pmem_lbasize_supported, buf);
1336         }
1337         return -ENXIO;
1338 }
1339
1340 static ssize_t sector_size_store(struct device *dev,
1341                 struct device_attribute *attr, const char *buf, size_t len)
1342 {
1343         struct nd_region *nd_region = to_nd_region(dev->parent);
1344         const unsigned long *supported;
1345         unsigned long *lbasize;
1346         ssize_t rc = 0;
1347
1348         if (is_namespace_blk(dev)) {
1349                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1350
1351                 lbasize = &nsblk->lbasize;
1352                 supported = blk_lbasize_supported;
1353         } else if (is_namespace_pmem(dev)) {
1354                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1355
1356                 lbasize = &nspm->lbasize;
1357                 supported = pmem_lbasize_supported;
1358         } else
1359                 return -ENXIO;
1360
1361         nd_device_lock(dev);
1362         nvdimm_bus_lock(dev);
1363         if (to_ndns(dev)->claim)
1364                 rc = -EBUSY;
1365         if (rc >= 0)
1366                 rc = nd_size_select_store(dev, buf, lbasize, supported);
1367         if (rc >= 0)
1368                 rc = nd_namespace_label_update(nd_region, dev);
1369         dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1370                         buf, buf[len - 1] == '\n' ? "" : "\n");
1371         nvdimm_bus_unlock(dev);
1372         nd_device_unlock(dev);
1373
1374         return rc ? rc : len;
1375 }
1376 static DEVICE_ATTR_RW(sector_size);
1377
1378 static ssize_t dpa_extents_show(struct device *dev,
1379                 struct device_attribute *attr, char *buf)
1380 {
1381         struct nd_region *nd_region = to_nd_region(dev->parent);
1382         struct nd_label_id label_id;
1383         uuid_t *uuid = NULL;
1384         int count = 0, i;
1385         u32 flags = 0;
1386
1387         nvdimm_bus_lock(dev);
1388         if (is_namespace_pmem(dev)) {
1389                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1390
1391                 uuid = nspm->uuid;
1392                 flags = 0;
1393         } else if (is_namespace_blk(dev)) {
1394                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1395
1396                 uuid = nsblk->uuid;
1397                 flags = NSLABEL_FLAG_LOCAL;
1398         }
1399
1400         if (!uuid)
1401                 goto out;
1402
1403         nd_label_gen_id(&label_id, uuid, flags);
1404         for (i = 0; i < nd_region->ndr_mappings; i++) {
1405                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1406                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1407                 struct resource *res;
1408
1409                 for_each_dpa_resource(ndd, res)
1410                         if (strcmp(res->name, label_id.id) == 0)
1411                                 count++;
1412         }
1413  out:
1414         nvdimm_bus_unlock(dev);
1415
1416         return sprintf(buf, "%d\n", count);
1417 }
1418 static DEVICE_ATTR_RO(dpa_extents);
1419
1420 static int btt_claim_class(struct device *dev)
1421 {
1422         struct nd_region *nd_region = to_nd_region(dev->parent);
1423         int i, loop_bitmask = 0;
1424
1425         for (i = 0; i < nd_region->ndr_mappings; i++) {
1426                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1427                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1428                 struct nd_namespace_index *nsindex;
1429
1430                 /*
1431                  * If any of the DIMMs do not support labels the only
1432                  * possible BTT format is v1.
1433                  */
1434                 if (!ndd) {
1435                         loop_bitmask = 0;
1436                         break;
1437                 }
1438
1439                 nsindex = to_namespace_index(ndd, ndd->ns_current);
1440                 if (nsindex == NULL)
1441                         loop_bitmask |= 1;
1442                 else {
1443                         /* check whether existing labels are v1.1 or v1.2 */
1444                         if (__le16_to_cpu(nsindex->major) == 1
1445                                         && __le16_to_cpu(nsindex->minor) == 1)
1446                                 loop_bitmask |= 2;
1447                         else
1448                                 loop_bitmask |= 4;
1449                 }
1450         }
1451         /*
1452          * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1453          * block is found, a v1.1 label for any mapping will set bit 1, and a
1454          * v1.2 label will set bit 2.
1455          *
1456          * At the end of the loop, at most one of the three bits must be set.
1457          * If multiple bits were set, it means the different mappings disagree
1458          * about their labels, and this must be cleaned up first.
1459          *
1460          * If all the label index blocks are found to agree, nsindex of NULL
1461          * implies labels haven't been initialized yet, and when they will,
1462          * they will be of the 1.2 format, so we can assume BTT2.0
1463          *
1464          * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1465          * found, we enforce BTT2.0
1466          *
1467          * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1468          */
1469         switch (loop_bitmask) {
1470         case 0:
1471         case 2:
1472                 return NVDIMM_CCLASS_BTT;
1473         case 1:
1474         case 4:
1475                 return NVDIMM_CCLASS_BTT2;
1476         default:
1477                 return -ENXIO;
1478         }
1479 }
1480
1481 static ssize_t holder_show(struct device *dev,
1482                 struct device_attribute *attr, char *buf)
1483 {
1484         struct nd_namespace_common *ndns = to_ndns(dev);
1485         ssize_t rc;
1486
1487         nd_device_lock(dev);
1488         rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1489         nd_device_unlock(dev);
1490
1491         return rc;
1492 }
1493 static DEVICE_ATTR_RO(holder);
1494
1495 static int __holder_class_store(struct device *dev, const char *buf)
1496 {
1497         struct nd_namespace_common *ndns = to_ndns(dev);
1498
1499         if (dev->driver || ndns->claim)
1500                 return -EBUSY;
1501
1502         if (sysfs_streq(buf, "btt")) {
1503                 int rc = btt_claim_class(dev);
1504
1505                 if (rc < NVDIMM_CCLASS_NONE)
1506                         return rc;
1507                 ndns->claim_class = rc;
1508         } else if (sysfs_streq(buf, "pfn"))
1509                 ndns->claim_class = NVDIMM_CCLASS_PFN;
1510         else if (sysfs_streq(buf, "dax"))
1511                 ndns->claim_class = NVDIMM_CCLASS_DAX;
1512         else if (sysfs_streq(buf, ""))
1513                 ndns->claim_class = NVDIMM_CCLASS_NONE;
1514         else
1515                 return -EINVAL;
1516
1517         return 0;
1518 }
1519
1520 static ssize_t holder_class_store(struct device *dev,
1521                 struct device_attribute *attr, const char *buf, size_t len)
1522 {
1523         struct nd_region *nd_region = to_nd_region(dev->parent);
1524         int rc;
1525
1526         nd_device_lock(dev);
1527         nvdimm_bus_lock(dev);
1528         wait_nvdimm_bus_probe_idle(dev);
1529         rc = __holder_class_store(dev, buf);
1530         if (rc >= 0)
1531                 rc = nd_namespace_label_update(nd_region, dev);
1532         dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
1533         nvdimm_bus_unlock(dev);
1534         nd_device_unlock(dev);
1535
1536         return rc < 0 ? rc : len;
1537 }
1538
1539 static ssize_t holder_class_show(struct device *dev,
1540                 struct device_attribute *attr, char *buf)
1541 {
1542         struct nd_namespace_common *ndns = to_ndns(dev);
1543         ssize_t rc;
1544
1545         nd_device_lock(dev);
1546         if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1547                 rc = sprintf(buf, "\n");
1548         else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1549                         (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1550                 rc = sprintf(buf, "btt\n");
1551         else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1552                 rc = sprintf(buf, "pfn\n");
1553         else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1554                 rc = sprintf(buf, "dax\n");
1555         else
1556                 rc = sprintf(buf, "<unknown>\n");
1557         nd_device_unlock(dev);
1558
1559         return rc;
1560 }
1561 static DEVICE_ATTR_RW(holder_class);
1562
1563 static ssize_t mode_show(struct device *dev,
1564                 struct device_attribute *attr, char *buf)
1565 {
1566         struct nd_namespace_common *ndns = to_ndns(dev);
1567         struct device *claim;
1568         char *mode;
1569         ssize_t rc;
1570
1571         nd_device_lock(dev);
1572         claim = ndns->claim;
1573         if (claim && is_nd_btt(claim))
1574                 mode = "safe";
1575         else if (claim && is_nd_pfn(claim))
1576                 mode = "memory";
1577         else if (claim && is_nd_dax(claim))
1578                 mode = "dax";
1579         else if (!claim && pmem_should_map_pages(dev))
1580                 mode = "memory";
1581         else
1582                 mode = "raw";
1583         rc = sprintf(buf, "%s\n", mode);
1584         nd_device_unlock(dev);
1585
1586         return rc;
1587 }
1588 static DEVICE_ATTR_RO(mode);
1589
1590 static ssize_t force_raw_store(struct device *dev,
1591                 struct device_attribute *attr, const char *buf, size_t len)
1592 {
1593         bool force_raw;
1594         int rc = strtobool(buf, &force_raw);
1595
1596         if (rc)
1597                 return rc;
1598
1599         to_ndns(dev)->force_raw = force_raw;
1600         return len;
1601 }
1602
1603 static ssize_t force_raw_show(struct device *dev,
1604                 struct device_attribute *attr, char *buf)
1605 {
1606         return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1607 }
1608 static DEVICE_ATTR_RW(force_raw);
1609
1610 static struct attribute *nd_namespace_attributes[] = {
1611         &dev_attr_nstype.attr,
1612         &dev_attr_size.attr,
1613         &dev_attr_mode.attr,
1614         &dev_attr_uuid.attr,
1615         &dev_attr_holder.attr,
1616         &dev_attr_resource.attr,
1617         &dev_attr_alt_name.attr,
1618         &dev_attr_force_raw.attr,
1619         &dev_attr_sector_size.attr,
1620         &dev_attr_dpa_extents.attr,
1621         &dev_attr_holder_class.attr,
1622         NULL,
1623 };
1624
1625 static umode_t namespace_visible(struct kobject *kobj,
1626                 struct attribute *a, int n)
1627 {
1628         struct device *dev = container_of(kobj, struct device, kobj);
1629
1630         if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
1631                 return 0;
1632
1633         if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1634                 if (a == &dev_attr_size.attr)
1635                         return 0644;
1636
1637                 return a->mode;
1638         }
1639
1640         /* base is_namespace_io() attributes */
1641         if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
1642             a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
1643             a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
1644             a == &dev_attr_resource.attr)
1645                 return a->mode;
1646
1647         return 0;
1648 }
1649
1650 static struct attribute_group nd_namespace_attribute_group = {
1651         .attrs = nd_namespace_attributes,
1652         .is_visible = namespace_visible,
1653 };
1654
1655 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1656         &nd_device_attribute_group,
1657         &nd_namespace_attribute_group,
1658         &nd_numa_attribute_group,
1659         NULL,
1660 };
1661
1662 static const struct device_type namespace_io_device_type = {
1663         .name = "nd_namespace_io",
1664         .release = namespace_io_release,
1665         .groups = nd_namespace_attribute_groups,
1666 };
1667
1668 static const struct device_type namespace_pmem_device_type = {
1669         .name = "nd_namespace_pmem",
1670         .release = namespace_pmem_release,
1671         .groups = nd_namespace_attribute_groups,
1672 };
1673
1674 static const struct device_type namespace_blk_device_type = {
1675         .name = "nd_namespace_blk",
1676         .release = namespace_blk_release,
1677         .groups = nd_namespace_attribute_groups,
1678 };
1679
1680 static bool is_namespace_pmem(const struct device *dev)
1681 {
1682         return dev ? dev->type == &namespace_pmem_device_type : false;
1683 }
1684
1685 static bool is_namespace_blk(const struct device *dev)
1686 {
1687         return dev ? dev->type == &namespace_blk_device_type : false;
1688 }
1689
1690 static bool is_namespace_io(const struct device *dev)
1691 {
1692         return dev ? dev->type == &namespace_io_device_type : false;
1693 }
1694
1695 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1696 {
1697         struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1698         struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1699         struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1700         struct nd_namespace_common *ndns = NULL;
1701         resource_size_t size;
1702
1703         if (nd_btt || nd_pfn || nd_dax) {
1704                 if (nd_btt)
1705                         ndns = nd_btt->ndns;
1706                 else if (nd_pfn)
1707                         ndns = nd_pfn->ndns;
1708                 else if (nd_dax)
1709                         ndns = nd_dax->nd_pfn.ndns;
1710
1711                 if (!ndns)
1712                         return ERR_PTR(-ENODEV);
1713
1714                 /*
1715                  * Flush any in-progess probes / removals in the driver
1716                  * for the raw personality of this namespace.
1717                  */
1718                 nd_device_lock(&ndns->dev);
1719                 nd_device_unlock(&ndns->dev);
1720                 if (ndns->dev.driver) {
1721                         dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1722                                         dev_name(dev));
1723                         return ERR_PTR(-EBUSY);
1724                 }
1725                 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1726                                         "host (%s) vs claim (%s) mismatch\n",
1727                                         dev_name(dev),
1728                                         dev_name(ndns->claim)))
1729                         return ERR_PTR(-ENXIO);
1730         } else {
1731                 ndns = to_ndns(dev);
1732                 if (ndns->claim) {
1733                         dev_dbg(dev, "claimed by %s, failing probe\n",
1734                                 dev_name(ndns->claim));
1735
1736                         return ERR_PTR(-ENXIO);
1737                 }
1738         }
1739
1740         if (nvdimm_namespace_locked(ndns))
1741                 return ERR_PTR(-EACCES);
1742
1743         size = nvdimm_namespace_capacity(ndns);
1744         if (size < ND_MIN_NAMESPACE_SIZE) {
1745                 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1746                                 &size, ND_MIN_NAMESPACE_SIZE);
1747                 return ERR_PTR(-ENODEV);
1748         }
1749
1750         /*
1751          * Note, alignment validation for fsdax and devdax mode
1752          * namespaces happens in nd_pfn_validate() where infoblock
1753          * padding parameters can be applied.
1754          */
1755         if (pmem_should_map_pages(dev)) {
1756                 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
1757                 struct resource *res = &nsio->res;
1758
1759                 if (!IS_ALIGNED(res->start | (res->end + 1),
1760                                         memremap_compat_align())) {
1761                         dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
1762                         return ERR_PTR(-EOPNOTSUPP);
1763                 }
1764         }
1765
1766         if (is_namespace_pmem(&ndns->dev)) {
1767                 struct nd_namespace_pmem *nspm;
1768
1769                 nspm = to_nd_namespace_pmem(&ndns->dev);
1770                 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1771                         return ERR_PTR(-ENODEV);
1772         } else if (is_namespace_blk(&ndns->dev)) {
1773                 struct nd_namespace_blk *nsblk;
1774
1775                 nsblk = to_nd_namespace_blk(&ndns->dev);
1776                 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1777                         return ERR_PTR(-ENODEV);
1778                 if (!nsblk->lbasize) {
1779                         dev_dbg(&ndns->dev, "sector size not set\n");
1780                         return ERR_PTR(-ENODEV);
1781                 }
1782                 if (!nd_namespace_blk_validate(nsblk))
1783                         return ERR_PTR(-ENODEV);
1784         }
1785
1786         return ndns;
1787 }
1788 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1789
1790 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
1791                 resource_size_t size)
1792 {
1793         if (is_namespace_blk(&ndns->dev))
1794                 return 0;
1795         return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
1796 }
1797 EXPORT_SYMBOL_GPL(devm_namespace_enable);
1798
1799 void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
1800 {
1801         if (is_namespace_blk(&ndns->dev))
1802                 return;
1803         devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
1804 }
1805 EXPORT_SYMBOL_GPL(devm_namespace_disable);
1806
1807 static struct device **create_namespace_io(struct nd_region *nd_region)
1808 {
1809         struct nd_namespace_io *nsio;
1810         struct device *dev, **devs;
1811         struct resource *res;
1812
1813         nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1814         if (!nsio)
1815                 return NULL;
1816
1817         devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1818         if (!devs) {
1819                 kfree(nsio);
1820                 return NULL;
1821         }
1822
1823         dev = &nsio->common.dev;
1824         dev->type = &namespace_io_device_type;
1825         dev->parent = &nd_region->dev;
1826         res = &nsio->res;
1827         res->name = dev_name(&nd_region->dev);
1828         res->flags = IORESOURCE_MEM;
1829         res->start = nd_region->ndr_start;
1830         res->end = res->start + nd_region->ndr_size - 1;
1831
1832         devs[0] = dev;
1833         return devs;
1834 }
1835
1836 static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
1837                             u64 cookie, u16 pos)
1838 {
1839         struct nd_namespace_label *found = NULL;
1840         int i;
1841
1842         for (i = 0; i < nd_region->ndr_mappings; i++) {
1843                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1844                 struct nd_interleave_set *nd_set = nd_region->nd_set;
1845                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1846                 struct nd_label_ent *label_ent;
1847                 bool found_uuid = false;
1848
1849                 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1850                         struct nd_namespace_label *nd_label = label_ent->label;
1851                         u16 position;
1852
1853                         if (!nd_label)
1854                                 continue;
1855                         position = nsl_get_position(ndd, nd_label);
1856
1857                         if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
1858                                 continue;
1859
1860                         if (!nsl_uuid_equal(ndd, nd_label, uuid))
1861                                 continue;
1862
1863                         if (!nsl_validate_type_guid(ndd, nd_label,
1864                                                     &nd_set->type_guid))
1865                                 continue;
1866
1867                         if (found_uuid) {
1868                                 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1869                                 return false;
1870                         }
1871                         found_uuid = true;
1872                         if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
1873                                 continue;
1874                         if (position != pos)
1875                                 continue;
1876                         found = nd_label;
1877                         break;
1878                 }
1879                 if (found)
1880                         break;
1881         }
1882         return found != NULL;
1883 }
1884
1885 static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
1886 {
1887         int i;
1888
1889         if (!pmem_id)
1890                 return -ENODEV;
1891
1892         for (i = 0; i < nd_region->ndr_mappings; i++) {
1893                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1894                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1895                 struct nd_namespace_label *nd_label = NULL;
1896                 u64 hw_start, hw_end, pmem_start, pmem_end;
1897                 struct nd_label_ent *label_ent;
1898
1899                 lockdep_assert_held(&nd_mapping->lock);
1900                 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1901                         nd_label = label_ent->label;
1902                         if (!nd_label)
1903                                 continue;
1904                         if (nsl_uuid_equal(ndd, nd_label, pmem_id))
1905                                 break;
1906                         nd_label = NULL;
1907                 }
1908
1909                 if (!nd_label) {
1910                         WARN_ON(1);
1911                         return -EINVAL;
1912                 }
1913
1914                 /*
1915                  * Check that this label is compliant with the dpa
1916                  * range published in NFIT
1917                  */
1918                 hw_start = nd_mapping->start;
1919                 hw_end = hw_start + nd_mapping->size;
1920                 pmem_start = nsl_get_dpa(ndd, nd_label);
1921                 pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
1922                 if (pmem_start >= hw_start && pmem_start < hw_end
1923                                 && pmem_end <= hw_end && pmem_end > hw_start)
1924                         /* pass */;
1925                 else {
1926                         dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1927                                 dev_name(ndd->dev),
1928                                 nsl_uuid_raw(ndd, nd_label));
1929                         return -EINVAL;
1930                 }
1931
1932                 /* move recently validated label to the front of the list */
1933                 list_move(&label_ent->list, &nd_mapping->labels);
1934         }
1935         return 0;
1936 }
1937
1938 /**
1939  * create_namespace_pmem - validate interleave set labelling, retrieve label0
1940  * @nd_region: region with mappings to validate
1941  * @nspm: target namespace to create
1942  * @nd_label: target pmem namespace label to evaluate
1943  */
1944 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1945                                             struct nd_mapping *nd_mapping,
1946                                             struct nd_namespace_label *nd_label)
1947 {
1948         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1949         struct nd_namespace_index *nsindex =
1950                 to_namespace_index(ndd, ndd->ns_current);
1951         u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1952         u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1953         struct nd_label_ent *label_ent;
1954         struct nd_namespace_pmem *nspm;
1955         resource_size_t size = 0;
1956         struct resource *res;
1957         struct device *dev;
1958         uuid_t uuid;
1959         int rc = 0;
1960         u16 i;
1961
1962         if (cookie == 0) {
1963                 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1964                 return ERR_PTR(-ENXIO);
1965         }
1966
1967         if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
1968                 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1969                         nsl_uuid_raw(ndd, nd_label));
1970                 if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
1971                         return ERR_PTR(-EAGAIN);
1972
1973                 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1974                         nsl_uuid_raw(ndd, nd_label));
1975         }
1976
1977         nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1978         if (!nspm)
1979                 return ERR_PTR(-ENOMEM);
1980
1981         nspm->id = -1;
1982         dev = &nspm->nsio.common.dev;
1983         dev->type = &namespace_pmem_device_type;
1984         dev->parent = &nd_region->dev;
1985         res = &nspm->nsio.res;
1986         res->name = dev_name(&nd_region->dev);
1987         res->flags = IORESOURCE_MEM;
1988
1989         for (i = 0; i < nd_region->ndr_mappings; i++) {
1990                 uuid_t uuid;
1991
1992                 nsl_get_uuid(ndd, nd_label, &uuid);
1993                 if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
1994                         continue;
1995                 if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
1996                         continue;
1997                 break;
1998         }
1999
2000         if (i < nd_region->ndr_mappings) {
2001                 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
2002
2003                 /*
2004                  * Give up if we don't find an instance of a uuid at each
2005                  * position (from 0 to nd_region->ndr_mappings - 1), or if we
2006                  * find a dimm with two instances of the same uuid.
2007                  */
2008                 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
2009                         nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
2010                 rc = -EINVAL;
2011                 goto err;
2012         }
2013
2014         /*
2015          * Fix up each mapping's 'labels' to have the validated pmem label for
2016          * that position at labels[0], and NULL at labels[1].  In the process,
2017          * check that the namespace aligns with interleave-set.  We know
2018          * that it does not overlap with any blk namespaces by virtue of
2019          * the dimm being enabled (i.e. nd_label_reserve_dpa()
2020          * succeeded).
2021          */
2022         nsl_get_uuid(ndd, nd_label, &uuid);
2023         rc = select_pmem_id(nd_region, &uuid);
2024         if (rc)
2025                 goto err;
2026
2027         /* Calculate total size and populate namespace properties from label0 */
2028         for (i = 0; i < nd_region->ndr_mappings; i++) {
2029                 struct nd_namespace_label *label0;
2030                 struct nvdimm_drvdata *ndd;
2031
2032                 nd_mapping = &nd_region->mapping[i];
2033                 label_ent = list_first_entry_or_null(&nd_mapping->labels,
2034                                 typeof(*label_ent), list);
2035                 label0 = label_ent ? label_ent->label : NULL;
2036
2037                 if (!label0) {
2038                         WARN_ON(1);
2039                         continue;
2040                 }
2041
2042                 ndd = to_ndd(nd_mapping);
2043                 size += nsl_get_rawsize(ndd, label0);
2044                 if (nsl_get_position(ndd, label0) != 0)
2045                         continue;
2046                 WARN_ON(nspm->alt_name || nspm->uuid);
2047                 nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
2048                                          NSLABEL_NAME_LEN, GFP_KERNEL);
2049                 nsl_get_uuid(ndd, label0, &uuid);
2050                 nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
2051                 nspm->lbasize = nsl_get_lbasize(ndd, label0);
2052                 nspm->nsio.common.claim_class =
2053                         nsl_get_claim_class(ndd, label0);
2054         }
2055
2056         if (!nspm->alt_name || !nspm->uuid) {
2057                 rc = -ENOMEM;
2058                 goto err;
2059         }
2060
2061         nd_namespace_pmem_set_resource(nd_region, nspm, size);
2062
2063         return dev;
2064  err:
2065         namespace_pmem_release(dev);
2066         switch (rc) {
2067         case -EINVAL:
2068                 dev_dbg(&nd_region->dev, "invalid label(s)\n");
2069                 break;
2070         case -ENODEV:
2071                 dev_dbg(&nd_region->dev, "label not found\n");
2072                 break;
2073         default:
2074                 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
2075                 break;
2076         }
2077         return ERR_PTR(rc);
2078 }
2079
2080 struct resource *nsblk_add_resource(struct nd_region *nd_region,
2081                 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
2082                 resource_size_t start)
2083 {
2084         struct nd_label_id label_id;
2085         struct resource *res;
2086
2087         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
2088         res = krealloc(nsblk->res,
2089                         sizeof(void *) * (nsblk->num_resources + 1),
2090                         GFP_KERNEL);
2091         if (!res)
2092                 return NULL;
2093         nsblk->res = (struct resource **) res;
2094         for_each_dpa_resource(ndd, res)
2095                 if (strcmp(res->name, label_id.id) == 0
2096                                 && res->start == start) {
2097                         nsblk->res[nsblk->num_resources++] = res;
2098                         return res;
2099                 }
2100         return NULL;
2101 }
2102
2103 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
2104 {
2105         struct nd_namespace_blk *nsblk;
2106         struct device *dev;
2107
2108         if (!is_nd_blk(&nd_region->dev))
2109                 return NULL;
2110
2111         nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2112         if (!nsblk)
2113                 return NULL;
2114
2115         dev = &nsblk->common.dev;
2116         dev->type = &namespace_blk_device_type;
2117         nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2118         if (nsblk->id < 0) {
2119                 kfree(nsblk);
2120                 return NULL;
2121         }
2122         dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
2123         dev->parent = &nd_region->dev;
2124
2125         return &nsblk->common.dev;
2126 }
2127
2128 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
2129 {
2130         struct nd_namespace_pmem *nspm;
2131         struct resource *res;
2132         struct device *dev;
2133
2134         if (!is_memory(&nd_region->dev))
2135                 return NULL;
2136
2137         nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2138         if (!nspm)
2139                 return NULL;
2140
2141         dev = &nspm->nsio.common.dev;
2142         dev->type = &namespace_pmem_device_type;
2143         dev->parent = &nd_region->dev;
2144         res = &nspm->nsio.res;
2145         res->name = dev_name(&nd_region->dev);
2146         res->flags = IORESOURCE_MEM;
2147
2148         nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2149         if (nspm->id < 0) {
2150                 kfree(nspm);
2151                 return NULL;
2152         }
2153         dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
2154         nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2155
2156         return dev;
2157 }
2158
2159 void nd_region_create_ns_seed(struct nd_region *nd_region)
2160 {
2161         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2162
2163         if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
2164                 return;
2165
2166         if (is_nd_blk(&nd_region->dev))
2167                 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
2168         else
2169                 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
2170
2171         /*
2172          * Seed creation failures are not fatal, provisioning is simply
2173          * disabled until memory becomes available
2174          */
2175         if (!nd_region->ns_seed)
2176                 dev_err(&nd_region->dev, "failed to create %s namespace\n",
2177                                 is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
2178         else
2179                 nd_device_register(nd_region->ns_seed);
2180 }
2181
2182 void nd_region_create_dax_seed(struct nd_region *nd_region)
2183 {
2184         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2185         nd_region->dax_seed = nd_dax_create(nd_region);
2186         /*
2187          * Seed creation failures are not fatal, provisioning is simply
2188          * disabled until memory becomes available
2189          */
2190         if (!nd_region->dax_seed)
2191                 dev_err(&nd_region->dev, "failed to create dax namespace\n");
2192 }
2193
2194 void nd_region_create_pfn_seed(struct nd_region *nd_region)
2195 {
2196         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2197         nd_region->pfn_seed = nd_pfn_create(nd_region);
2198         /*
2199          * Seed creation failures are not fatal, provisioning is simply
2200          * disabled until memory becomes available
2201          */
2202         if (!nd_region->pfn_seed)
2203                 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
2204 }
2205
2206 void nd_region_create_btt_seed(struct nd_region *nd_region)
2207 {
2208         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2209         nd_region->btt_seed = nd_btt_create(nd_region);
2210         /*
2211          * Seed creation failures are not fatal, provisioning is simply
2212          * disabled until memory becomes available
2213          */
2214         if (!nd_region->btt_seed)
2215                 dev_err(&nd_region->dev, "failed to create btt namespace\n");
2216 }
2217
2218 static int add_namespace_resource(struct nd_region *nd_region,
2219                 struct nd_namespace_label *nd_label, struct device **devs,
2220                 int count)
2221 {
2222         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2223         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2224         int i;
2225
2226         for (i = 0; i < count; i++) {
2227                 uuid_t *uuid = namespace_to_uuid(devs[i]);
2228                 struct resource *res;
2229
2230                 if (IS_ERR(uuid)) {
2231                         WARN_ON(1);
2232                         continue;
2233                 }
2234
2235                 if (!nsl_uuid_equal(ndd, nd_label, uuid))
2236                         continue;
2237                 if (is_namespace_blk(devs[i])) {
2238                         res = nsblk_add_resource(nd_region, ndd,
2239                                         to_nd_namespace_blk(devs[i]),
2240                                         nsl_get_dpa(ndd, nd_label));
2241                         if (!res)
2242                                 return -ENXIO;
2243                         nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
2244                 } else {
2245                         dev_err(&nd_region->dev,
2246                                 "error: conflicting extents for uuid: %pUb\n",
2247                                 uuid);
2248                         return -ENXIO;
2249                 }
2250                 break;
2251         }
2252
2253         return i;
2254 }
2255
2256 static struct device *create_namespace_blk(struct nd_region *nd_region,
2257                 struct nd_namespace_label *nd_label, int count)
2258 {
2259
2260         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2261         struct nd_interleave_set *nd_set = nd_region->nd_set;
2262         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2263         struct nd_namespace_blk *nsblk;
2264         char name[NSLABEL_NAME_LEN];
2265         struct device *dev = NULL;
2266         struct resource *res;
2267         uuid_t uuid;
2268
2269         if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
2270                 return ERR_PTR(-EAGAIN);
2271         if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2))
2272                 return ERR_PTR(-EAGAIN);
2273
2274         nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2275         if (!nsblk)
2276                 return ERR_PTR(-ENOMEM);
2277         dev = &nsblk->common.dev;
2278         dev->type = &namespace_blk_device_type;
2279         dev->parent = &nd_region->dev;
2280         nsblk->id = -1;
2281         nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
2282         nsl_get_uuid(ndd, nd_label, &uuid);
2283         nsblk->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
2284         nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
2285         if (!nsblk->uuid)
2286                 goto blk_err;
2287         nsl_get_name(ndd, nd_label, name);
2288         if (name[0]) {
2289                 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL);
2290                 if (!nsblk->alt_name)
2291                         goto blk_err;
2292         }
2293         res = nsblk_add_resource(nd_region, ndd, nsblk,
2294                         nsl_get_dpa(ndd, nd_label));
2295         if (!res)
2296                 goto blk_err;
2297         nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
2298         return dev;
2299  blk_err:
2300         namespace_blk_release(dev);
2301         return ERR_PTR(-ENXIO);
2302 }
2303
2304 static int cmp_dpa(const void *a, const void *b)
2305 {
2306         const struct device *dev_a = *(const struct device **) a;
2307         const struct device *dev_b = *(const struct device **) b;
2308         struct nd_namespace_blk *nsblk_a, *nsblk_b;
2309         struct nd_namespace_pmem *nspm_a, *nspm_b;
2310
2311         if (is_namespace_io(dev_a))
2312                 return 0;
2313
2314         if (is_namespace_blk(dev_a)) {
2315                 nsblk_a = to_nd_namespace_blk(dev_a);
2316                 nsblk_b = to_nd_namespace_blk(dev_b);
2317
2318                 return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
2319                                 sizeof(resource_size_t));
2320         }
2321
2322         nspm_a = to_nd_namespace_pmem(dev_a);
2323         nspm_b = to_nd_namespace_pmem(dev_b);
2324
2325         return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
2326                         sizeof(resource_size_t));
2327 }
2328
2329 static struct device **scan_labels(struct nd_region *nd_region)
2330 {
2331         int i, count = 0;
2332         struct device *dev, **devs = NULL;
2333         struct nd_label_ent *label_ent, *e;
2334         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2335         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2336         resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
2337
2338         /* "safe" because create_namespace_pmem() might list_move() label_ent */
2339         list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
2340                 struct nd_namespace_label *nd_label = label_ent->label;
2341                 struct device **__devs;
2342                 u32 flags;
2343
2344                 if (!nd_label)
2345                         continue;
2346                 flags = nsl_get_flags(ndd, nd_label);
2347                 if (is_nd_blk(&nd_region->dev)
2348                                 == !!(flags & NSLABEL_FLAG_LOCAL))
2349                         /* pass, region matches label type */;
2350                 else
2351                         continue;
2352
2353                 /* skip labels that describe extents outside of the region */
2354                 if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
2355                     nsl_get_dpa(ndd, nd_label) > map_end)
2356                         continue;
2357
2358                 i = add_namespace_resource(nd_region, nd_label, devs, count);
2359                 if (i < 0)
2360                         goto err;
2361                 if (i < count)
2362                         continue;
2363                 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
2364                 if (!__devs)
2365                         goto err;
2366                 memcpy(__devs, devs, sizeof(dev) * count);
2367                 kfree(devs);
2368                 devs = __devs;
2369
2370                 if (is_nd_blk(&nd_region->dev))
2371                         dev = create_namespace_blk(nd_region, nd_label, count);
2372                 else
2373                         dev = create_namespace_pmem(nd_region, nd_mapping,
2374                                                     nd_label);
2375
2376                 if (IS_ERR(dev)) {
2377                         switch (PTR_ERR(dev)) {
2378                         case -EAGAIN:
2379                                 /* skip invalid labels */
2380                                 continue;
2381                         case -ENODEV:
2382                                 /* fallthrough to seed creation */
2383                                 break;
2384                         default:
2385                                 goto err;
2386                         }
2387                 } else
2388                         devs[count++] = dev;
2389
2390         }
2391
2392         dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
2393                         count, is_nd_blk(&nd_region->dev)
2394                         ? "blk" : "pmem", count == 1 ? "" : "s");
2395
2396         if (count == 0) {
2397                 /* Publish a zero-sized namespace for userspace to configure. */
2398                 nd_mapping_free_labels(nd_mapping);
2399
2400                 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
2401                 if (!devs)
2402                         goto err;
2403                 if (is_nd_blk(&nd_region->dev)) {
2404                         struct nd_namespace_blk *nsblk;
2405
2406                         nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2407                         if (!nsblk)
2408                                 goto err;
2409                         dev = &nsblk->common.dev;
2410                         dev->type = &namespace_blk_device_type;
2411                 } else {
2412                         struct nd_namespace_pmem *nspm;
2413
2414                         nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2415                         if (!nspm)
2416                                 goto err;
2417                         dev = &nspm->nsio.common.dev;
2418                         dev->type = &namespace_pmem_device_type;
2419                         nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2420                 }
2421                 dev->parent = &nd_region->dev;
2422                 devs[count++] = dev;
2423         } else if (is_memory(&nd_region->dev)) {
2424                 /* clean unselected labels */
2425                 for (i = 0; i < nd_region->ndr_mappings; i++) {
2426                         struct list_head *l, *e;
2427                         LIST_HEAD(list);
2428                         int j;
2429
2430                         nd_mapping = &nd_region->mapping[i];
2431                         if (list_empty(&nd_mapping->labels)) {
2432                                 WARN_ON(1);
2433                                 continue;
2434                         }
2435
2436                         j = count;
2437                         list_for_each_safe(l, e, &nd_mapping->labels) {
2438                                 if (!j--)
2439                                         break;
2440                                 list_move_tail(l, &list);
2441                         }
2442                         nd_mapping_free_labels(nd_mapping);
2443                         list_splice_init(&list, &nd_mapping->labels);
2444                 }
2445         }
2446
2447         if (count > 1)
2448                 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2449
2450         return devs;
2451
2452  err:
2453         if (devs) {
2454                 for (i = 0; devs[i]; i++)
2455                         if (is_nd_blk(&nd_region->dev))
2456                                 namespace_blk_release(devs[i]);
2457                         else
2458                                 namespace_pmem_release(devs[i]);
2459                 kfree(devs);
2460         }
2461         return NULL;
2462 }
2463
2464 static struct device **create_namespaces(struct nd_region *nd_region)
2465 {
2466         struct nd_mapping *nd_mapping;
2467         struct device **devs;
2468         int i;
2469
2470         if (nd_region->ndr_mappings == 0)
2471                 return NULL;
2472
2473         /* lock down all mappings while we scan labels */
2474         for (i = 0; i < nd_region->ndr_mappings; i++) {
2475                 nd_mapping = &nd_region->mapping[i];
2476                 mutex_lock_nested(&nd_mapping->lock, i);
2477         }
2478
2479         devs = scan_labels(nd_region);
2480
2481         for (i = 0; i < nd_region->ndr_mappings; i++) {
2482                 int reverse = nd_region->ndr_mappings - 1 - i;
2483
2484                 nd_mapping = &nd_region->mapping[reverse];
2485                 mutex_unlock(&nd_mapping->lock);
2486         }
2487
2488         return devs;
2489 }
2490
2491 static void deactivate_labels(void *region)
2492 {
2493         struct nd_region *nd_region = region;
2494         int i;
2495
2496         for (i = 0; i < nd_region->ndr_mappings; i++) {
2497                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2498                 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2499                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2500
2501                 mutex_lock(&nd_mapping->lock);
2502                 nd_mapping_free_labels(nd_mapping);
2503                 mutex_unlock(&nd_mapping->lock);
2504
2505                 put_ndd(ndd);
2506                 nd_mapping->ndd = NULL;
2507                 if (ndd)
2508                         atomic_dec(&nvdimm->busy);
2509         }
2510 }
2511
2512 static int init_active_labels(struct nd_region *nd_region)
2513 {
2514         int i, rc = 0;
2515
2516         for (i = 0; i < nd_region->ndr_mappings; i++) {
2517                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2518                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2519                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2520                 struct nd_label_ent *label_ent;
2521                 int count, j;
2522
2523                 /*
2524                  * If the dimm is disabled then we may need to prevent
2525                  * the region from being activated.
2526                  */
2527                 if (!ndd) {
2528                         if (test_bit(NDD_LOCKED, &nvdimm->flags))
2529                                 /* fail, label data may be unreadable */;
2530                         else if (test_bit(NDD_LABELING, &nvdimm->flags))
2531                                 /* fail, labels needed to disambiguate dpa */;
2532                         else
2533                                 continue;
2534
2535                         dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2536                                         dev_name(&nd_mapping->nvdimm->dev),
2537                                         test_bit(NDD_LOCKED, &nvdimm->flags)
2538                                         ? "locked" : "disabled");
2539                         rc = -ENXIO;
2540                         goto out;
2541                 }
2542                 nd_mapping->ndd = ndd;
2543                 atomic_inc(&nvdimm->busy);
2544                 get_ndd(ndd);
2545
2546                 count = nd_label_active_count(ndd);
2547                 dev_dbg(ndd->dev, "count: %d\n", count);
2548                 if (!count)
2549                         continue;
2550                 for (j = 0; j < count; j++) {
2551                         struct nd_namespace_label *label;
2552
2553                         label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2554                         if (!label_ent)
2555                                 break;
2556                         label = nd_label_active(ndd, j);
2557                         if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
2558                                 u32 flags = nsl_get_flags(ndd, label);
2559
2560                                 flags &= ~NSLABEL_FLAG_LOCAL;
2561                                 nsl_set_flags(ndd, label, flags);
2562                         }
2563                         label_ent->label = label;
2564
2565                         mutex_lock(&nd_mapping->lock);
2566                         list_add_tail(&label_ent->list, &nd_mapping->labels);
2567                         mutex_unlock(&nd_mapping->lock);
2568                 }
2569
2570                 if (j < count)
2571                         break;
2572         }
2573
2574         if (i < nd_region->ndr_mappings)
2575                 rc = -ENOMEM;
2576
2577 out:
2578         if (rc) {
2579                 deactivate_labels(nd_region);
2580                 return rc;
2581         }
2582
2583         return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2584                                         nd_region);
2585 }
2586
2587 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2588 {
2589         struct device **devs = NULL;
2590         int i, rc = 0, type;
2591
2592         *err = 0;
2593         nvdimm_bus_lock(&nd_region->dev);
2594         rc = init_active_labels(nd_region);
2595         if (rc) {
2596                 nvdimm_bus_unlock(&nd_region->dev);
2597                 return rc;
2598         }
2599
2600         type = nd_region_to_nstype(nd_region);
2601         switch (type) {
2602         case ND_DEVICE_NAMESPACE_IO:
2603                 devs = create_namespace_io(nd_region);
2604                 break;
2605         case ND_DEVICE_NAMESPACE_PMEM:
2606         case ND_DEVICE_NAMESPACE_BLK:
2607                 devs = create_namespaces(nd_region);
2608                 break;
2609         default:
2610                 break;
2611         }
2612         nvdimm_bus_unlock(&nd_region->dev);
2613
2614         if (!devs)
2615                 return -ENODEV;
2616
2617         for (i = 0; devs[i]; i++) {
2618                 struct device *dev = devs[i];
2619                 int id;
2620
2621                 if (type == ND_DEVICE_NAMESPACE_BLK) {
2622                         struct nd_namespace_blk *nsblk;
2623
2624                         nsblk = to_nd_namespace_blk(dev);
2625                         id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2626                                         GFP_KERNEL);
2627                         nsblk->id = id;
2628                 } else if (type == ND_DEVICE_NAMESPACE_PMEM) {
2629                         struct nd_namespace_pmem *nspm;
2630
2631                         nspm = to_nd_namespace_pmem(dev);
2632                         id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2633                                         GFP_KERNEL);
2634                         nspm->id = id;
2635                 } else
2636                         id = i;
2637
2638                 if (id < 0)
2639                         break;
2640                 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2641                 nd_device_register(dev);
2642         }
2643         if (i)
2644                 nd_region->ns_seed = devs[0];
2645
2646         if (devs[i]) {
2647                 int j;
2648
2649                 for (j = i; devs[j]; j++) {
2650                         struct device *dev = devs[j];
2651
2652                         device_initialize(dev);
2653                         put_device(dev);
2654                 }
2655                 *err = j - i;
2656                 /*
2657                  * All of the namespaces we tried to register failed, so
2658                  * fail region activation.
2659                  */
2660                 if (*err == 0)
2661                         rc = -ENODEV;
2662         }
2663         kfree(devs);
2664
2665         if (rc == -ENODEV)
2666                 return rc;
2667
2668         return i;
2669 }