1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/sort.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
16 static void namespace_io_release(struct device *dev)
18 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
23 static void namespace_pmem_release(struct device *dev)
25 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
26 struct nd_region *nd_region = to_nd_region(dev->parent);
29 ida_simple_remove(&nd_region->ns_ida, nspm->id);
30 kfree(nspm->alt_name);
35 static bool is_namespace_pmem(const struct device *dev);
36 static bool is_namespace_io(const struct device *dev);
38 static int is_uuid_busy(struct device *dev, void *data)
40 uuid_t *uuid1 = data, *uuid2 = NULL;
42 if (is_namespace_pmem(dev)) {
43 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
46 } else if (is_nd_btt(dev)) {
47 struct nd_btt *nd_btt = to_nd_btt(dev);
50 } else if (is_nd_pfn(dev)) {
51 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
56 if (uuid2 && uuid_equal(uuid1, uuid2))
62 static int is_namespace_uuid_busy(struct device *dev, void *data)
64 if (is_nd_region(dev))
65 return device_for_each_child(dev, data, is_uuid_busy);
70 * nd_is_uuid_unique - verify that no other namespace has @uuid
71 * @dev: any device on a nvdimm_bus
72 * @uuid: uuid to check
74 bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
76 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
80 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
81 if (device_for_each_child(&nvdimm_bus->dev, uuid,
82 is_namespace_uuid_busy) != 0)
87 bool pmem_should_map_pages(struct device *dev)
89 struct nd_region *nd_region = to_nd_region(dev->parent);
90 struct nd_namespace_common *ndns = to_ndns(dev);
91 struct nd_namespace_io *nsio;
93 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
96 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
99 if (is_nd_pfn(dev) || is_nd_btt(dev))
105 nsio = to_nd_namespace_io(dev);
106 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
107 IORESOURCE_SYSTEM_RAM,
108 IORES_DESC_NONE) == REGION_MIXED)
111 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
113 EXPORT_SYMBOL(pmem_should_map_pages);
115 unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
117 if (is_namespace_pmem(&ndns->dev)) {
118 struct nd_namespace_pmem *nspm;
120 nspm = to_nd_namespace_pmem(&ndns->dev);
121 if (nspm->lbasize == 0 || nspm->lbasize == 512)
123 else if (nspm->lbasize == 4096)
126 dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
131 * There is no namespace label (is_namespace_io()), or the label
132 * indicates the default sector size.
136 EXPORT_SYMBOL(pmem_sector_size);
138 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
141 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
142 const char *suffix = NULL;
144 if (ndns->claim && is_nd_btt(ndns->claim))
147 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
150 if (is_namespace_pmem(&ndns->dev)) {
151 struct nd_namespace_pmem *nspm;
153 nspm = to_nd_namespace_pmem(&ndns->dev);
158 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
159 suffix ? suffix : "");
161 sprintf(name, "pmem%d%s", nd_region->id,
162 suffix ? suffix : "");
169 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
171 const uuid_t *nd_dev_to_uuid(struct device *dev)
173 if (dev && is_namespace_pmem(dev)) {
174 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
180 EXPORT_SYMBOL(nd_dev_to_uuid);
182 static ssize_t nstype_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
185 struct nd_region *nd_region = to_nd_region(dev->parent);
187 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
189 static DEVICE_ATTR_RO(nstype);
191 static ssize_t __alt_name_store(struct device *dev, const char *buf,
194 char *input, *pos, *alt_name, **ns_altname;
197 if (is_namespace_pmem(dev)) {
198 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
200 ns_altname = &nspm->alt_name;
204 if (dev->driver || to_ndns(dev)->claim)
207 input = kstrndup(buf, len, GFP_KERNEL);
212 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
217 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
223 *ns_altname = alt_name;
224 sprintf(*ns_altname, "%s", pos);
232 static int nd_namespace_label_update(struct nd_region *nd_region,
235 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
236 "namespace must be idle during label update\n");
237 if (dev->driver || to_ndns(dev)->claim)
241 * Only allow label writes that will result in a valid namespace
242 * or deletion of an existing namespace.
244 if (is_namespace_pmem(dev)) {
245 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
246 resource_size_t size = resource_size(&nspm->nsio.res);
248 if (size == 0 && nspm->uuid)
249 /* delete allocation */;
250 else if (!nspm->uuid)
253 return nd_pmem_namespace_label_update(nd_region, nspm, size);
258 static ssize_t alt_name_store(struct device *dev,
259 struct device_attribute *attr, const char *buf, size_t len)
261 struct nd_region *nd_region = to_nd_region(dev->parent);
265 nvdimm_bus_lock(dev);
266 wait_nvdimm_bus_probe_idle(dev);
267 rc = __alt_name_store(dev, buf, len);
269 rc = nd_namespace_label_update(nd_region, dev);
270 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
271 nvdimm_bus_unlock(dev);
274 return rc < 0 ? rc : len;
277 static ssize_t alt_name_show(struct device *dev,
278 struct device_attribute *attr, char *buf)
282 if (is_namespace_pmem(dev)) {
283 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
285 ns_altname = nspm->alt_name;
289 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
291 static DEVICE_ATTR_RW(alt_name);
293 static int scan_free(struct nd_region *nd_region,
294 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
297 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
301 struct resource *res, *last;
304 for_each_dpa_resource(ndd, res)
305 if (strcmp(res->name, label_id->id) == 0)
311 if (n >= resource_size(res)) {
312 n -= resource_size(res);
313 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
314 nvdimm_free_dpa(ndd, res);
315 /* retry with last resource deleted */
319 rc = adjust_resource(res, res->start, resource_size(res) - n);
321 res->flags |= DPA_RESOURCE_ADJUSTED;
322 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
330 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
331 * @nd_region: the set of dimms to reclaim @n bytes from
332 * @label_id: unique identifier for the namespace consuming this dpa range
333 * @n: number of bytes per-dimm to release
335 * Assumes resources are ordered. Starting from the end try to
336 * adjust_resource() the allocation to @n, but if @n is larger than the
337 * allocation delete it and find the 'new' last allocation in the label
340 static int shrink_dpa_allocation(struct nd_region *nd_region,
341 struct nd_label_id *label_id, resource_size_t n)
345 for (i = 0; i < nd_region->ndr_mappings; i++) {
346 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
349 rc = scan_free(nd_region, nd_mapping, label_id, n);
357 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
358 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
361 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
362 struct resource *res;
365 /* first resource allocation for this label-id or dimm */
366 res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n);
370 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
376 * space_valid() - validate free dpa space against constraints
377 * @nd_region: hosting region of the free space
378 * @ndd: dimm device data for debug
379 * @label_id: namespace id to allocate space
380 * @prev: potential allocation that precedes free space
381 * @next: allocation that follows the given free space range
382 * @exist: first allocation with same id in the mapping
383 * @n: range that must satisfied for pmem allocations
384 * @valid: free space range to validate
386 * BLK-space is valid as long as it does not precede a PMEM
387 * allocation in a given region. PMEM-space must be contiguous
388 * and adjacent to an existing allocation (if one
389 * exists). If reserving PMEM any space is valid.
391 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
392 struct nd_label_id *label_id, struct resource *prev,
393 struct resource *next, struct resource *exist,
394 resource_size_t n, struct resource *valid)
396 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
399 align = nd_region->align / nd_region->ndr_mappings;
400 valid->start = ALIGN(valid->start, align);
401 valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
403 if (valid->start >= valid->end)
409 /* allocation needs to be contiguous, so this is all or nothing */
410 if (resource_size(valid) < n)
413 /* we've got all the space we need and no existing allocation */
417 /* allocation needs to be contiguous with the existing namespace */
418 if (valid->start == exist->end + 1
419 || valid->end == exist->start - 1)
423 /* truncate @valid size to 0 */
424 valid->end = valid->start - 1;
428 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
431 static resource_size_t scan_allocate(struct nd_region *nd_region,
432 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
435 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
436 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
437 struct resource *res, *exist = NULL, valid;
438 const resource_size_t to_allocate = n;
441 for_each_dpa_resource(ndd, res)
442 if (strcmp(label_id->id, res->name) == 0)
445 valid.start = nd_mapping->start;
446 valid.end = mapping_end;
447 valid.name = "free space";
450 for_each_dpa_resource(ndd, res) {
451 struct resource *next = res->sibling, *new_res = NULL;
452 resource_size_t allocate, available = 0;
453 enum alloc_loc loc = ALLOC_ERR;
457 /* ignore resources outside this nd_mapping */
458 if (res->start > mapping_end)
460 if (res->end < nd_mapping->start)
463 /* space at the beginning of the mapping */
464 if (!first++ && res->start > nd_mapping->start) {
465 valid.start = nd_mapping->start;
466 valid.end = res->start - 1;
467 space_valid(nd_region, ndd, label_id, NULL, next, exist,
468 to_allocate, &valid);
469 available = resource_size(&valid);
474 /* space between allocations */
476 valid.start = res->start + resource_size(res);
477 valid.end = min(mapping_end, next->start - 1);
478 space_valid(nd_region, ndd, label_id, res, next, exist,
479 to_allocate, &valid);
480 available = resource_size(&valid);
485 /* space at the end of the mapping */
487 valid.start = res->start + resource_size(res);
488 valid.end = mapping_end;
489 space_valid(nd_region, ndd, label_id, res, next, exist,
490 to_allocate, &valid);
491 available = resource_size(&valid);
496 if (!loc || !available)
498 allocate = min(available, n);
501 if (strcmp(res->name, label_id->id) == 0) {
502 /* adjust current resource up */
503 rc = adjust_resource(res, res->start - allocate,
504 resource_size(res) + allocate);
505 action = "cur grow up";
510 if (strcmp(next->name, label_id->id) == 0) {
511 /* adjust next resource up */
512 rc = adjust_resource(next, next->start
513 - allocate, resource_size(next)
516 action = "next grow up";
517 } else if (strcmp(res->name, label_id->id) == 0) {
518 action = "grow down";
523 if (strcmp(res->name, label_id->id) == 0)
524 action = "grow down";
532 if (strcmp(action, "allocate") == 0) {
533 new_res = nvdimm_allocate_dpa(ndd, label_id,
534 valid.start, allocate);
537 } else if (strcmp(action, "grow down") == 0) {
538 /* adjust current resource down */
539 rc = adjust_resource(res, res->start, resource_size(res)
542 res->flags |= DPA_RESOURCE_ADJUSTED;
548 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
557 * Retry scan with newly inserted resources.
558 * For example, if we did an ALLOC_BEFORE
559 * insertion there may also have been space
560 * available for an ALLOC_AFTER insertion, so we
561 * need to check this same resource again
568 if (n == to_allocate)
569 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
573 static int merge_dpa(struct nd_region *nd_region,
574 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
576 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
577 struct resource *res;
579 if (strncmp("pmem", label_id->id, 4) == 0)
582 for_each_dpa_resource(ndd, res) {
584 struct resource *next = res->sibling;
585 resource_size_t end = res->start + resource_size(res);
587 if (!next || strcmp(res->name, label_id->id) != 0
588 || strcmp(next->name, label_id->id) != 0
589 || end != next->start)
591 end += resource_size(next);
592 nvdimm_free_dpa(ndd, next);
593 rc = adjust_resource(res, res->start, end - res->start);
594 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
597 res->flags |= DPA_RESOURCE_ADJUSTED;
604 int __reserve_free_pmem(struct device *dev, void *data)
606 struct nvdimm *nvdimm = data;
607 struct nd_region *nd_region;
608 struct nd_label_id label_id;
614 nd_region = to_nd_region(dev);
615 if (nd_region->ndr_mappings == 0)
618 memset(&label_id, 0, sizeof(label_id));
619 strcat(label_id.id, "pmem-reserve");
620 for (i = 0; i < nd_region->ndr_mappings; i++) {
621 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
622 resource_size_t n, rem = 0;
624 if (nd_mapping->nvdimm != nvdimm)
627 n = nd_pmem_available_dpa(nd_region, nd_mapping);
630 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
631 dev_WARN_ONCE(&nd_region->dev, rem,
632 "pmem reserve underrun: %#llx of %#llx bytes\n",
633 (unsigned long long) n - rem,
634 (unsigned long long) n);
635 return rem ? -ENXIO : 0;
641 void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
642 struct nd_mapping *nd_mapping)
644 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
645 struct resource *res, *_res;
647 for_each_dpa_resource_safe(ndd, res, _res)
648 if (strcmp(res->name, "pmem-reserve") == 0)
649 nvdimm_free_dpa(ndd, res);
653 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
654 * @nd_region: the set of dimms to allocate @n more bytes from
655 * @label_id: unique identifier for the namespace consuming this dpa range
656 * @n: number of bytes per-dimm to add to the existing allocation
658 * Assumes resources are ordered. For BLK regions, first consume
659 * BLK-only available DPA free space, then consume PMEM-aliased DPA
660 * space starting at the highest DPA. For PMEM regions start
661 * allocations from the start of an interleave set and end at the first
662 * BLK allocation or the end of the interleave set, whichever comes
665 static int grow_dpa_allocation(struct nd_region *nd_region,
666 struct nd_label_id *label_id, resource_size_t n)
670 for (i = 0; i < nd_region->ndr_mappings; i++) {
671 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
672 resource_size_t rem = n;
675 rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
676 dev_WARN_ONCE(&nd_region->dev, rem,
677 "allocation underrun: %#llx of %#llx bytes\n",
678 (unsigned long long) n - rem,
679 (unsigned long long) n);
683 rc = merge_dpa(nd_region, nd_mapping, label_id);
691 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
692 struct nd_namespace_pmem *nspm, resource_size_t size)
694 struct resource *res = &nspm->nsio.res;
695 resource_size_t offset = 0;
697 if (size && !nspm->uuid) {
702 if (size && nspm->uuid) {
703 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
704 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
705 struct nd_label_id label_id;
706 struct resource *res;
713 nd_label_gen_id(&label_id, nspm->uuid, 0);
715 /* calculate a spa offset from the dpa allocation offset */
716 for_each_dpa_resource(ndd, res)
717 if (strcmp(res->name, label_id.id) == 0) {
718 offset = (res->start - nd_mapping->start)
719 * nd_region->ndr_mappings;
728 res->start = nd_region->ndr_start + offset;
729 res->end = res->start + size - 1;
732 static bool uuid_not_set(const uuid_t *uuid, struct device *dev,
736 dev_dbg(dev, "%s: uuid not set\n", where);
742 static ssize_t __size_store(struct device *dev, unsigned long long val)
744 resource_size_t allocated = 0, available = 0;
745 struct nd_region *nd_region = to_nd_region(dev->parent);
746 struct nd_namespace_common *ndns = to_ndns(dev);
747 struct nd_mapping *nd_mapping;
748 struct nvdimm_drvdata *ndd;
749 struct nd_label_id label_id;
750 u32 flags = 0, remainder;
754 if (dev->driver || ndns->claim)
757 if (is_namespace_pmem(dev)) {
758 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
765 * We need a uuid for the allocation-label and dimm(s) on which
766 * to store the label.
768 if (uuid_not_set(uuid, dev, __func__))
770 if (nd_region->ndr_mappings == 0) {
771 dev_dbg(dev, "not associated with dimm(s)\n");
775 div_u64_rem(val, nd_region->align, &remainder);
777 dev_dbg(dev, "%llu is not %ldK aligned\n", val,
778 nd_region->align / SZ_1K);
782 nd_label_gen_id(&label_id, uuid, flags);
783 for (i = 0; i < nd_region->ndr_mappings; i++) {
784 nd_mapping = &nd_region->mapping[i];
785 ndd = to_ndd(nd_mapping);
788 * All dimms in an interleave set, need to be enabled
789 * for the size to be changed.
794 allocated += nvdimm_allocated_dpa(ndd, &label_id);
796 available = nd_region_allocatable_dpa(nd_region);
798 if (val > available + allocated)
801 if (val == allocated)
804 val = div_u64(val, nd_region->ndr_mappings);
805 allocated = div_u64(allocated, nd_region->ndr_mappings);
807 rc = shrink_dpa_allocation(nd_region, &label_id,
810 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
815 if (is_namespace_pmem(dev)) {
816 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
818 nd_namespace_pmem_set_resource(nd_region, nspm,
819 val * nd_region->ndr_mappings);
823 * Try to delete the namespace if we deleted all of its
824 * allocation, this is not the seed or 0th device for the
825 * region, and it is not actively claimed by a btt, pfn, or dax
828 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
829 nd_device_unregister(dev, ND_ASYNC);
834 static ssize_t size_store(struct device *dev,
835 struct device_attribute *attr, const char *buf, size_t len)
837 struct nd_region *nd_region = to_nd_region(dev->parent);
838 unsigned long long val;
841 rc = kstrtoull(buf, 0, &val);
846 nvdimm_bus_lock(dev);
847 wait_nvdimm_bus_probe_idle(dev);
848 rc = __size_store(dev, val);
850 rc = nd_namespace_label_update(nd_region, dev);
852 /* setting size zero == 'delete namespace' */
853 if (rc == 0 && val == 0 && is_namespace_pmem(dev)) {
854 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
860 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
862 nvdimm_bus_unlock(dev);
865 return rc < 0 ? rc : len;
868 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
870 struct device *dev = &ndns->dev;
872 if (is_namespace_pmem(dev)) {
873 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
875 return resource_size(&nspm->nsio.res);
876 } else if (is_namespace_io(dev)) {
877 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
879 return resource_size(&nsio->res);
881 WARN_ONCE(1, "unknown namespace type\n");
885 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
887 resource_size_t size;
889 nvdimm_bus_lock(&ndns->dev);
890 size = __nvdimm_namespace_capacity(ndns);
891 nvdimm_bus_unlock(&ndns->dev);
895 EXPORT_SYMBOL(nvdimm_namespace_capacity);
897 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
901 struct device *dev = &ndns->dev;
902 struct nd_region *nd_region = to_nd_region(dev->parent);
904 for (i = 0; i < nd_region->ndr_mappings; i++) {
905 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
906 struct nvdimm *nvdimm = nd_mapping->nvdimm;
908 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
909 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
915 EXPORT_SYMBOL(nvdimm_namespace_locked);
917 static ssize_t size_show(struct device *dev,
918 struct device_attribute *attr, char *buf)
920 return sprintf(buf, "%llu\n", (unsigned long long)
921 nvdimm_namespace_capacity(to_ndns(dev)));
923 static DEVICE_ATTR(size, 0444, size_show, size_store);
925 static uuid_t *namespace_to_uuid(struct device *dev)
927 if (is_namespace_pmem(dev)) {
928 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
932 return ERR_PTR(-ENXIO);
935 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
938 uuid_t *uuid = namespace_to_uuid(dev);
941 return PTR_ERR(uuid);
943 return sprintf(buf, "%pUb\n", uuid);
944 return sprintf(buf, "\n");
948 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
949 * @nd_region: parent region so we can updates all dimms in the set
950 * @dev: namespace type for generating label_id
951 * @new_uuid: incoming uuid
952 * @old_uuid: reference to the uuid storage location in the namespace object
954 static int namespace_update_uuid(struct nd_region *nd_region,
955 struct device *dev, uuid_t *new_uuid,
958 struct nd_label_id old_label_id;
959 struct nd_label_id new_label_id;
962 if (!nd_is_uuid_unique(dev, new_uuid))
965 if (*old_uuid == NULL)
969 * If we've already written a label with this uuid, then it's
970 * too late to rename because we can't reliably update the uuid
971 * without losing the old namespace. Userspace must delete this
972 * namespace to abandon the old uuid.
974 for (i = 0; i < nd_region->ndr_mappings; i++) {
975 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
978 * This check by itself is sufficient because old_uuid
979 * would be NULL above if this uuid did not exist in the
980 * currently written set.
982 * FIXME: can we delete uuid with zero dpa allocated?
984 if (list_empty(&nd_mapping->labels))
988 nd_label_gen_id(&old_label_id, *old_uuid, 0);
989 nd_label_gen_id(&new_label_id, new_uuid, 0);
990 for (i = 0; i < nd_region->ndr_mappings; i++) {
991 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
992 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
993 struct nd_label_ent *label_ent;
994 struct resource *res;
996 for_each_dpa_resource(ndd, res)
997 if (strcmp(res->name, old_label_id.id) == 0)
998 sprintf((void *) res->name, "%s",
1001 mutex_lock(&nd_mapping->lock);
1002 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1003 struct nd_namespace_label *nd_label = label_ent->label;
1004 struct nd_label_id label_id;
1009 nsl_get_uuid(ndd, nd_label, &uuid);
1010 nd_label_gen_id(&label_id, &uuid,
1011 nsl_get_flags(ndd, nd_label));
1012 if (strcmp(old_label_id.id, label_id.id) == 0)
1013 set_bit(ND_LABEL_REAP, &label_ent->flags);
1015 mutex_unlock(&nd_mapping->lock);
1019 *old_uuid = new_uuid;
1023 static ssize_t uuid_store(struct device *dev,
1024 struct device_attribute *attr, const char *buf, size_t len)
1026 struct nd_region *nd_region = to_nd_region(dev->parent);
1027 uuid_t *uuid = NULL;
1031 if (is_namespace_pmem(dev)) {
1032 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1034 ns_uuid = &nspm->uuid;
1039 nvdimm_bus_lock(dev);
1040 wait_nvdimm_bus_probe_idle(dev);
1041 if (to_ndns(dev)->claim)
1044 rc = nd_uuid_store(dev, &uuid, buf, len);
1046 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1048 rc = nd_namespace_label_update(nd_region, dev);
1051 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1052 buf[len - 1] == '\n' ? "" : "\n");
1053 nvdimm_bus_unlock(dev);
1056 return rc < 0 ? rc : len;
1058 static DEVICE_ATTR_RW(uuid);
1060 static ssize_t resource_show(struct device *dev,
1061 struct device_attribute *attr, char *buf)
1063 struct resource *res;
1065 if (is_namespace_pmem(dev)) {
1066 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1068 res = &nspm->nsio.res;
1069 } else if (is_namespace_io(dev)) {
1070 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1076 /* no address to convey if the namespace has no allocation */
1077 if (resource_size(res) == 0)
1079 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1081 static DEVICE_ATTR_ADMIN_RO(resource);
1083 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1085 static ssize_t sector_size_show(struct device *dev,
1086 struct device_attribute *attr, char *buf)
1088 if (is_namespace_pmem(dev)) {
1089 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1091 return nd_size_select_show(nspm->lbasize,
1092 pmem_lbasize_supported, buf);
1097 static ssize_t sector_size_store(struct device *dev,
1098 struct device_attribute *attr, const char *buf, size_t len)
1100 struct nd_region *nd_region = to_nd_region(dev->parent);
1101 const unsigned long *supported;
1102 unsigned long *lbasize;
1105 if (is_namespace_pmem(dev)) {
1106 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1108 lbasize = &nspm->lbasize;
1109 supported = pmem_lbasize_supported;
1114 nvdimm_bus_lock(dev);
1115 if (to_ndns(dev)->claim)
1118 rc = nd_size_select_store(dev, buf, lbasize, supported);
1120 rc = nd_namespace_label_update(nd_region, dev);
1121 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1122 buf, buf[len - 1] == '\n' ? "" : "\n");
1123 nvdimm_bus_unlock(dev);
1126 return rc ? rc : len;
1128 static DEVICE_ATTR_RW(sector_size);
1130 static ssize_t dpa_extents_show(struct device *dev,
1131 struct device_attribute *attr, char *buf)
1133 struct nd_region *nd_region = to_nd_region(dev->parent);
1134 struct nd_label_id label_id;
1135 uuid_t *uuid = NULL;
1139 nvdimm_bus_lock(dev);
1140 if (is_namespace_pmem(dev)) {
1141 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1150 nd_label_gen_id(&label_id, uuid, flags);
1151 for (i = 0; i < nd_region->ndr_mappings; i++) {
1152 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1153 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1154 struct resource *res;
1156 for_each_dpa_resource(ndd, res)
1157 if (strcmp(res->name, label_id.id) == 0)
1161 nvdimm_bus_unlock(dev);
1163 return sprintf(buf, "%d\n", count);
1165 static DEVICE_ATTR_RO(dpa_extents);
1167 static int btt_claim_class(struct device *dev)
1169 struct nd_region *nd_region = to_nd_region(dev->parent);
1170 int i, loop_bitmask = 0;
1172 for (i = 0; i < nd_region->ndr_mappings; i++) {
1173 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1174 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1175 struct nd_namespace_index *nsindex;
1178 * If any of the DIMMs do not support labels the only
1179 * possible BTT format is v1.
1186 nsindex = to_namespace_index(ndd, ndd->ns_current);
1187 if (nsindex == NULL)
1190 /* check whether existing labels are v1.1 or v1.2 */
1191 if (__le16_to_cpu(nsindex->major) == 1
1192 && __le16_to_cpu(nsindex->minor) == 1)
1199 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1200 * block is found, a v1.1 label for any mapping will set bit 1, and a
1201 * v1.2 label will set bit 2.
1203 * At the end of the loop, at most one of the three bits must be set.
1204 * If multiple bits were set, it means the different mappings disagree
1205 * about their labels, and this must be cleaned up first.
1207 * If all the label index blocks are found to agree, nsindex of NULL
1208 * implies labels haven't been initialized yet, and when they will,
1209 * they will be of the 1.2 format, so we can assume BTT2.0
1211 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1212 * found, we enforce BTT2.0
1214 * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1216 switch (loop_bitmask) {
1219 return NVDIMM_CCLASS_BTT;
1222 return NVDIMM_CCLASS_BTT2;
1228 static ssize_t holder_show(struct device *dev,
1229 struct device_attribute *attr, char *buf)
1231 struct nd_namespace_common *ndns = to_ndns(dev);
1235 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1240 static DEVICE_ATTR_RO(holder);
1242 static int __holder_class_store(struct device *dev, const char *buf)
1244 struct nd_namespace_common *ndns = to_ndns(dev);
1246 if (dev->driver || ndns->claim)
1249 if (sysfs_streq(buf, "btt")) {
1250 int rc = btt_claim_class(dev);
1252 if (rc < NVDIMM_CCLASS_NONE)
1254 ndns->claim_class = rc;
1255 } else if (sysfs_streq(buf, "pfn"))
1256 ndns->claim_class = NVDIMM_CCLASS_PFN;
1257 else if (sysfs_streq(buf, "dax"))
1258 ndns->claim_class = NVDIMM_CCLASS_DAX;
1259 else if (sysfs_streq(buf, ""))
1260 ndns->claim_class = NVDIMM_CCLASS_NONE;
1267 static ssize_t holder_class_store(struct device *dev,
1268 struct device_attribute *attr, const char *buf, size_t len)
1270 struct nd_region *nd_region = to_nd_region(dev->parent);
1274 nvdimm_bus_lock(dev);
1275 wait_nvdimm_bus_probe_idle(dev);
1276 rc = __holder_class_store(dev, buf);
1278 rc = nd_namespace_label_update(nd_region, dev);
1279 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
1280 nvdimm_bus_unlock(dev);
1283 return rc < 0 ? rc : len;
1286 static ssize_t holder_class_show(struct device *dev,
1287 struct device_attribute *attr, char *buf)
1289 struct nd_namespace_common *ndns = to_ndns(dev);
1293 if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1294 rc = sprintf(buf, "\n");
1295 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1296 (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1297 rc = sprintf(buf, "btt\n");
1298 else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1299 rc = sprintf(buf, "pfn\n");
1300 else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1301 rc = sprintf(buf, "dax\n");
1303 rc = sprintf(buf, "<unknown>\n");
1308 static DEVICE_ATTR_RW(holder_class);
1310 static ssize_t mode_show(struct device *dev,
1311 struct device_attribute *attr, char *buf)
1313 struct nd_namespace_common *ndns = to_ndns(dev);
1314 struct device *claim;
1319 claim = ndns->claim;
1320 if (claim && is_nd_btt(claim))
1322 else if (claim && is_nd_pfn(claim))
1324 else if (claim && is_nd_dax(claim))
1326 else if (!claim && pmem_should_map_pages(dev))
1330 rc = sprintf(buf, "%s\n", mode);
1335 static DEVICE_ATTR_RO(mode);
1337 static ssize_t force_raw_store(struct device *dev,
1338 struct device_attribute *attr, const char *buf, size_t len)
1341 int rc = strtobool(buf, &force_raw);
1346 to_ndns(dev)->force_raw = force_raw;
1350 static ssize_t force_raw_show(struct device *dev,
1351 struct device_attribute *attr, char *buf)
1353 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1355 static DEVICE_ATTR_RW(force_raw);
1357 static struct attribute *nd_namespace_attributes[] = {
1358 &dev_attr_nstype.attr,
1359 &dev_attr_size.attr,
1360 &dev_attr_mode.attr,
1361 &dev_attr_uuid.attr,
1362 &dev_attr_holder.attr,
1363 &dev_attr_resource.attr,
1364 &dev_attr_alt_name.attr,
1365 &dev_attr_force_raw.attr,
1366 &dev_attr_sector_size.attr,
1367 &dev_attr_dpa_extents.attr,
1368 &dev_attr_holder_class.attr,
1372 static umode_t namespace_visible(struct kobject *kobj,
1373 struct attribute *a, int n)
1375 struct device *dev = container_of(kobj, struct device, kobj);
1377 if (is_namespace_pmem(dev)) {
1378 if (a == &dev_attr_size.attr)
1384 /* base is_namespace_io() attributes */
1385 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
1386 a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
1387 a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
1388 a == &dev_attr_resource.attr)
1394 static struct attribute_group nd_namespace_attribute_group = {
1395 .attrs = nd_namespace_attributes,
1396 .is_visible = namespace_visible,
1399 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1400 &nd_device_attribute_group,
1401 &nd_namespace_attribute_group,
1402 &nd_numa_attribute_group,
1406 static const struct device_type namespace_io_device_type = {
1407 .name = "nd_namespace_io",
1408 .release = namespace_io_release,
1409 .groups = nd_namespace_attribute_groups,
1412 static const struct device_type namespace_pmem_device_type = {
1413 .name = "nd_namespace_pmem",
1414 .release = namespace_pmem_release,
1415 .groups = nd_namespace_attribute_groups,
1418 static bool is_namespace_pmem(const struct device *dev)
1420 return dev ? dev->type == &namespace_pmem_device_type : false;
1423 static bool is_namespace_io(const struct device *dev)
1425 return dev ? dev->type == &namespace_io_device_type : false;
1428 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1430 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1431 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1432 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1433 struct nd_namespace_common *ndns = NULL;
1434 resource_size_t size;
1436 if (nd_btt || nd_pfn || nd_dax) {
1438 ndns = nd_btt->ndns;
1440 ndns = nd_pfn->ndns;
1442 ndns = nd_dax->nd_pfn.ndns;
1445 return ERR_PTR(-ENODEV);
1448 * Flush any in-progess probes / removals in the driver
1449 * for the raw personality of this namespace.
1451 device_lock(&ndns->dev);
1452 device_unlock(&ndns->dev);
1453 if (ndns->dev.driver) {
1454 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1456 return ERR_PTR(-EBUSY);
1458 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1459 "host (%s) vs claim (%s) mismatch\n",
1461 dev_name(ndns->claim)))
1462 return ERR_PTR(-ENXIO);
1464 ndns = to_ndns(dev);
1466 dev_dbg(dev, "claimed by %s, failing probe\n",
1467 dev_name(ndns->claim));
1469 return ERR_PTR(-ENXIO);
1473 if (nvdimm_namespace_locked(ndns))
1474 return ERR_PTR(-EACCES);
1476 size = nvdimm_namespace_capacity(ndns);
1477 if (size < ND_MIN_NAMESPACE_SIZE) {
1478 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1479 &size, ND_MIN_NAMESPACE_SIZE);
1480 return ERR_PTR(-ENODEV);
1484 * Note, alignment validation for fsdax and devdax mode
1485 * namespaces happens in nd_pfn_validate() where infoblock
1486 * padding parameters can be applied.
1488 if (pmem_should_map_pages(dev)) {
1489 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
1490 struct resource *res = &nsio->res;
1492 if (!IS_ALIGNED(res->start | (res->end + 1),
1493 memremap_compat_align())) {
1494 dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
1495 return ERR_PTR(-EOPNOTSUPP);
1499 if (is_namespace_pmem(&ndns->dev)) {
1500 struct nd_namespace_pmem *nspm;
1502 nspm = to_nd_namespace_pmem(&ndns->dev);
1503 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1504 return ERR_PTR(-ENODEV);
1509 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1511 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
1512 resource_size_t size)
1514 return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
1516 EXPORT_SYMBOL_GPL(devm_namespace_enable);
1518 void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
1520 devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
1522 EXPORT_SYMBOL_GPL(devm_namespace_disable);
1524 static struct device **create_namespace_io(struct nd_region *nd_region)
1526 struct nd_namespace_io *nsio;
1527 struct device *dev, **devs;
1528 struct resource *res;
1530 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1534 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1540 dev = &nsio->common.dev;
1541 dev->type = &namespace_io_device_type;
1542 dev->parent = &nd_region->dev;
1544 res->name = dev_name(&nd_region->dev);
1545 res->flags = IORESOURCE_MEM;
1546 res->start = nd_region->ndr_start;
1547 res->end = res->start + nd_region->ndr_size - 1;
1553 static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
1554 u64 cookie, u16 pos)
1556 struct nd_namespace_label *found = NULL;
1559 for (i = 0; i < nd_region->ndr_mappings; i++) {
1560 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1561 struct nd_interleave_set *nd_set = nd_region->nd_set;
1562 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1563 struct nd_label_ent *label_ent;
1564 bool found_uuid = false;
1566 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1567 struct nd_namespace_label *nd_label = label_ent->label;
1572 position = nsl_get_position(ndd, nd_label);
1574 if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
1577 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1580 if (!nsl_validate_type_guid(ndd, nd_label,
1581 &nd_set->type_guid))
1585 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1589 if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
1591 if (position != pos)
1599 return found != NULL;
1602 static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
1609 for (i = 0; i < nd_region->ndr_mappings; i++) {
1610 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1611 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1612 struct nd_namespace_label *nd_label = NULL;
1613 u64 hw_start, hw_end, pmem_start, pmem_end;
1614 struct nd_label_ent *label_ent;
1616 lockdep_assert_held(&nd_mapping->lock);
1617 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1618 nd_label = label_ent->label;
1621 if (nsl_uuid_equal(ndd, nd_label, pmem_id))
1632 * Check that this label is compliant with the dpa
1633 * range published in NFIT
1635 hw_start = nd_mapping->start;
1636 hw_end = hw_start + nd_mapping->size;
1637 pmem_start = nsl_get_dpa(ndd, nd_label);
1638 pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
1639 if (pmem_start >= hw_start && pmem_start < hw_end
1640 && pmem_end <= hw_end && pmem_end > hw_start)
1643 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1645 nsl_uuid_raw(ndd, nd_label));
1649 /* move recently validated label to the front of the list */
1650 list_move(&label_ent->list, &nd_mapping->labels);
1656 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1657 * @nd_region: region with mappings to validate
1658 * @nspm: target namespace to create
1659 * @nd_label: target pmem namespace label to evaluate
1661 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1662 struct nd_mapping *nd_mapping,
1663 struct nd_namespace_label *nd_label)
1665 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1666 struct nd_namespace_index *nsindex =
1667 to_namespace_index(ndd, ndd->ns_current);
1668 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1669 u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1670 struct nd_label_ent *label_ent;
1671 struct nd_namespace_pmem *nspm;
1672 resource_size_t size = 0;
1673 struct resource *res;
1680 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1681 return ERR_PTR(-ENXIO);
1684 if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
1685 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1686 nsl_uuid_raw(ndd, nd_label));
1687 if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
1688 return ERR_PTR(-EAGAIN);
1690 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1691 nsl_uuid_raw(ndd, nd_label));
1694 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1696 return ERR_PTR(-ENOMEM);
1699 dev = &nspm->nsio.common.dev;
1700 dev->type = &namespace_pmem_device_type;
1701 dev->parent = &nd_region->dev;
1702 res = &nspm->nsio.res;
1703 res->name = dev_name(&nd_region->dev);
1704 res->flags = IORESOURCE_MEM;
1706 for (i = 0; i < nd_region->ndr_mappings; i++) {
1707 nsl_get_uuid(ndd, nd_label, &uuid);
1708 if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
1710 if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
1715 if (i < nd_region->ndr_mappings) {
1716 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1719 * Give up if we don't find an instance of a uuid at each
1720 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1721 * find a dimm with two instances of the same uuid.
1723 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1724 nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
1730 * Fix up each mapping's 'labels' to have the validated pmem label for
1731 * that position at labels[0], and NULL at labels[1]. In the process,
1732 * check that the namespace aligns with interleave-set.
1734 nsl_get_uuid(ndd, nd_label, &uuid);
1735 rc = select_pmem_id(nd_region, &uuid);
1739 /* Calculate total size and populate namespace properties from label0 */
1740 for (i = 0; i < nd_region->ndr_mappings; i++) {
1741 struct nd_namespace_label *label0;
1742 struct nvdimm_drvdata *ndd;
1744 nd_mapping = &nd_region->mapping[i];
1745 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1746 typeof(*label_ent), list);
1747 label0 = label_ent ? label_ent->label : NULL;
1754 ndd = to_ndd(nd_mapping);
1755 size += nsl_get_rawsize(ndd, label0);
1756 if (nsl_get_position(ndd, label0) != 0)
1758 WARN_ON(nspm->alt_name || nspm->uuid);
1759 nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
1760 NSLABEL_NAME_LEN, GFP_KERNEL);
1761 nsl_get_uuid(ndd, label0, &uuid);
1762 nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1763 nspm->lbasize = nsl_get_lbasize(ndd, label0);
1764 nspm->nsio.common.claim_class =
1765 nsl_get_claim_class(ndd, label0);
1768 if (!nspm->alt_name || !nspm->uuid) {
1773 nd_namespace_pmem_set_resource(nd_region, nspm, size);
1777 namespace_pmem_release(dev);
1780 dev_dbg(&nd_region->dev, "invalid label(s)\n");
1783 dev_dbg(&nd_region->dev, "label not found\n");
1786 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
1792 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
1794 struct nd_namespace_pmem *nspm;
1795 struct resource *res;
1798 if (!is_memory(&nd_region->dev))
1801 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1805 dev = &nspm->nsio.common.dev;
1806 dev->type = &namespace_pmem_device_type;
1807 dev->parent = &nd_region->dev;
1808 res = &nspm->nsio.res;
1809 res->name = dev_name(&nd_region->dev);
1810 res->flags = IORESOURCE_MEM;
1812 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1817 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
1818 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
1823 static struct lock_class_key nvdimm_namespace_key;
1825 void nd_region_create_ns_seed(struct nd_region *nd_region)
1827 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1829 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
1832 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
1835 * Seed creation failures are not fatal, provisioning is simply
1836 * disabled until memory becomes available
1838 if (!nd_region->ns_seed)
1839 dev_err(&nd_region->dev, "failed to create namespace\n");
1841 device_initialize(nd_region->ns_seed);
1842 lockdep_set_class(&nd_region->ns_seed->mutex,
1843 &nvdimm_namespace_key);
1844 nd_device_register(nd_region->ns_seed);
1848 void nd_region_create_dax_seed(struct nd_region *nd_region)
1850 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1851 nd_region->dax_seed = nd_dax_create(nd_region);
1853 * Seed creation failures are not fatal, provisioning is simply
1854 * disabled until memory becomes available
1856 if (!nd_region->dax_seed)
1857 dev_err(&nd_region->dev, "failed to create dax namespace\n");
1860 void nd_region_create_pfn_seed(struct nd_region *nd_region)
1862 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1863 nd_region->pfn_seed = nd_pfn_create(nd_region);
1865 * Seed creation failures are not fatal, provisioning is simply
1866 * disabled until memory becomes available
1868 if (!nd_region->pfn_seed)
1869 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1872 void nd_region_create_btt_seed(struct nd_region *nd_region)
1874 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1875 nd_region->btt_seed = nd_btt_create(nd_region);
1877 * Seed creation failures are not fatal, provisioning is simply
1878 * disabled until memory becomes available
1880 if (!nd_region->btt_seed)
1881 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1884 static int add_namespace_resource(struct nd_region *nd_region,
1885 struct nd_namespace_label *nd_label, struct device **devs,
1888 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1889 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1892 for (i = 0; i < count; i++) {
1893 uuid_t *uuid = namespace_to_uuid(devs[i]);
1900 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1902 dev_err(&nd_region->dev,
1903 "error: conflicting extents for uuid: %pUb\n", uuid);
1910 static int cmp_dpa(const void *a, const void *b)
1912 const struct device *dev_a = *(const struct device **) a;
1913 const struct device *dev_b = *(const struct device **) b;
1914 struct nd_namespace_pmem *nspm_a, *nspm_b;
1916 if (is_namespace_io(dev_a))
1919 nspm_a = to_nd_namespace_pmem(dev_a);
1920 nspm_b = to_nd_namespace_pmem(dev_b);
1922 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
1923 sizeof(resource_size_t));
1926 static struct device **scan_labels(struct nd_region *nd_region)
1929 struct device *dev, **devs = NULL;
1930 struct nd_label_ent *label_ent, *e;
1931 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1932 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1933 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
1935 /* "safe" because create_namespace_pmem() might list_move() label_ent */
1936 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1937 struct nd_namespace_label *nd_label = label_ent->label;
1938 struct device **__devs;
1943 /* skip labels that describe extents outside of the region */
1944 if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
1945 nsl_get_dpa(ndd, nd_label) > map_end)
1948 i = add_namespace_resource(nd_region, nd_label, devs, count);
1953 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1956 memcpy(__devs, devs, sizeof(dev) * count);
1960 dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
1962 switch (PTR_ERR(dev)) {
1964 /* skip invalid labels */
1967 /* fallthrough to seed creation */
1973 devs[count++] = dev;
1977 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
1978 count == 1 ? "" : "s");
1981 struct nd_namespace_pmem *nspm;
1983 /* Publish a zero-sized namespace for userspace to configure. */
1984 nd_mapping_free_labels(nd_mapping);
1986 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1990 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1993 dev = &nspm->nsio.common.dev;
1994 dev->type = &namespace_pmem_device_type;
1995 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
1996 dev->parent = &nd_region->dev;
1997 devs[count++] = dev;
1998 } else if (is_memory(&nd_region->dev)) {
1999 /* clean unselected labels */
2000 for (i = 0; i < nd_region->ndr_mappings; i++) {
2001 struct list_head *l, *e;
2005 nd_mapping = &nd_region->mapping[i];
2006 if (list_empty(&nd_mapping->labels)) {
2012 list_for_each_safe(l, e, &nd_mapping->labels) {
2015 list_move_tail(l, &list);
2017 nd_mapping_free_labels(nd_mapping);
2018 list_splice_init(&list, &nd_mapping->labels);
2023 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2029 for (i = 0; devs[i]; i++)
2030 namespace_pmem_release(devs[i]);
2036 static struct device **create_namespaces(struct nd_region *nd_region)
2038 struct nd_mapping *nd_mapping;
2039 struct device **devs;
2042 if (nd_region->ndr_mappings == 0)
2045 /* lock down all mappings while we scan labels */
2046 for (i = 0; i < nd_region->ndr_mappings; i++) {
2047 nd_mapping = &nd_region->mapping[i];
2048 mutex_lock_nested(&nd_mapping->lock, i);
2051 devs = scan_labels(nd_region);
2053 for (i = 0; i < nd_region->ndr_mappings; i++) {
2054 int reverse = nd_region->ndr_mappings - 1 - i;
2056 nd_mapping = &nd_region->mapping[reverse];
2057 mutex_unlock(&nd_mapping->lock);
2063 static void deactivate_labels(void *region)
2065 struct nd_region *nd_region = region;
2068 for (i = 0; i < nd_region->ndr_mappings; i++) {
2069 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2070 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2071 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2073 mutex_lock(&nd_mapping->lock);
2074 nd_mapping_free_labels(nd_mapping);
2075 mutex_unlock(&nd_mapping->lock);
2078 nd_mapping->ndd = NULL;
2080 atomic_dec(&nvdimm->busy);
2084 static int init_active_labels(struct nd_region *nd_region)
2088 for (i = 0; i < nd_region->ndr_mappings; i++) {
2089 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2090 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2091 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2092 struct nd_label_ent *label_ent;
2096 * If the dimm is disabled then we may need to prevent
2097 * the region from being activated.
2100 if (test_bit(NDD_LOCKED, &nvdimm->flags))
2101 /* fail, label data may be unreadable */;
2102 else if (test_bit(NDD_LABELING, &nvdimm->flags))
2103 /* fail, labels needed to disambiguate dpa */;
2107 dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2108 dev_name(&nd_mapping->nvdimm->dev),
2109 test_bit(NDD_LOCKED, &nvdimm->flags)
2110 ? "locked" : "disabled");
2114 nd_mapping->ndd = ndd;
2115 atomic_inc(&nvdimm->busy);
2118 count = nd_label_active_count(ndd);
2119 dev_dbg(ndd->dev, "count: %d\n", count);
2122 for (j = 0; j < count; j++) {
2123 struct nd_namespace_label *label;
2125 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2128 label = nd_label_active(ndd, j);
2129 label_ent->label = label;
2131 mutex_lock(&nd_mapping->lock);
2132 list_add_tail(&label_ent->list, &nd_mapping->labels);
2133 mutex_unlock(&nd_mapping->lock);
2140 if (i < nd_region->ndr_mappings)
2145 deactivate_labels(nd_region);
2149 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2153 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2155 struct device **devs = NULL;
2156 int i, rc = 0, type;
2159 nvdimm_bus_lock(&nd_region->dev);
2160 rc = init_active_labels(nd_region);
2162 nvdimm_bus_unlock(&nd_region->dev);
2166 type = nd_region_to_nstype(nd_region);
2168 case ND_DEVICE_NAMESPACE_IO:
2169 devs = create_namespace_io(nd_region);
2171 case ND_DEVICE_NAMESPACE_PMEM:
2172 devs = create_namespaces(nd_region);
2177 nvdimm_bus_unlock(&nd_region->dev);
2182 for (i = 0; devs[i]; i++) {
2183 struct device *dev = devs[i];
2186 if (type == ND_DEVICE_NAMESPACE_PMEM) {
2187 struct nd_namespace_pmem *nspm;
2189 nspm = to_nd_namespace_pmem(dev);
2190 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2198 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2199 device_initialize(dev);
2200 lockdep_set_class(&dev->mutex, &nvdimm_namespace_key);
2201 nd_device_register(dev);
2204 nd_region->ns_seed = devs[0];
2209 for (j = i; devs[j]; j++) {
2210 struct device *dev = devs[j];
2212 device_initialize(dev);
2217 * All of the namespaces we tried to register failed, so
2218 * fail region activation.