1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/uuid.h>
8 #include <linux/slab.h>
15 static guid_t nvdimm_btt_guid;
16 static guid_t nvdimm_btt2_guid;
17 static guid_t nvdimm_pfn_guid;
18 static guid_t nvdimm_dax_guid;
20 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
22 static u32 best_seq(u32 a, u32 b)
24 a &= NSINDEX_SEQ_MASK;
25 b &= NSINDEX_SEQ_MASK;
31 else if (nd_inc_seq(a) == b)
37 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
39 return ndd->nslabel_size;
42 static size_t __sizeof_namespace_index(u32 nslot)
44 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
48 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
51 return (ndd->nsarea.config_size - index_size * 2) /
52 sizeof_namespace_label(ndd);
55 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
59 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
60 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
62 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
65 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
67 u32 nslot, space, size;
70 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
71 * enough to hold 2 index blocks and 2 labels. The minimum index
72 * block size is 256 bytes. The label size is 128 for namespaces
73 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
75 nslot = nvdimm_num_label_slots(ndd);
76 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
77 size = __sizeof_namespace_index(nslot) * 2;
78 if (size <= space && nslot >= 2)
81 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
82 ndd->nsarea.config_size, sizeof_namespace_label(ndd));
86 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
89 * On media label format consists of two index blocks followed
90 * by an array of labels. None of these structures are ever
91 * updated in place. A sequence number tracks the current
92 * active index and the next one to write, while labels are
93 * written to free slots.
115 struct nd_namespace_index *nsindex[] = {
116 to_namespace_index(ndd, 0),
117 to_namespace_index(ndd, 1),
119 const int num_index = ARRAY_SIZE(nsindex);
120 struct device *dev = ndd->dev;
121 bool valid[2] = { 0 };
122 int i, num_valid = 0;
125 for (i = 0; i < num_index; i++) {
127 u8 sig[NSINDEX_SIG_LEN];
128 u64 sum_save, sum, size;
129 unsigned int version, labelsize;
131 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
132 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
133 dev_dbg(dev, "nsindex%d signature invalid\n", i);
137 /* label sizes larger than 128 arrived with v1.2 */
138 version = __le16_to_cpu(nsindex[i]->major) * 100
139 + __le16_to_cpu(nsindex[i]->minor);
141 labelsize = 1 << (7 + nsindex[i]->labelsize);
145 if (labelsize != sizeof_namespace_label(ndd)) {
146 dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
147 i, nsindex[i]->labelsize);
151 sum_save = __le64_to_cpu(nsindex[i]->checksum);
152 nsindex[i]->checksum = __cpu_to_le64(0);
153 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
154 nsindex[i]->checksum = __cpu_to_le64(sum_save);
155 if (sum != sum_save) {
156 dev_dbg(dev, "nsindex%d checksum invalid\n", i);
160 seq = __le32_to_cpu(nsindex[i]->seq);
161 if ((seq & NSINDEX_SEQ_MASK) == 0) {
162 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
166 /* sanity check the index against expected values */
167 if (__le64_to_cpu(nsindex[i]->myoff)
168 != i * sizeof_namespace_index(ndd)) {
169 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
170 i, (unsigned long long)
171 __le64_to_cpu(nsindex[i]->myoff));
174 if (__le64_to_cpu(nsindex[i]->otheroff)
175 != (!i) * sizeof_namespace_index(ndd)) {
176 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
177 i, (unsigned long long)
178 __le64_to_cpu(nsindex[i]->otheroff));
181 if (__le64_to_cpu(nsindex[i]->labeloff)
182 != 2 * sizeof_namespace_index(ndd)) {
183 dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
184 i, (unsigned long long)
185 __le64_to_cpu(nsindex[i]->labeloff));
189 size = __le64_to_cpu(nsindex[i]->mysize);
190 if (size > sizeof_namespace_index(ndd)
191 || size < sizeof(struct nd_namespace_index)) {
192 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
196 nslot = __le32_to_cpu(nsindex[i]->nslot);
197 if (nslot * sizeof_namespace_label(ndd)
198 + 2 * sizeof_namespace_index(ndd)
199 > ndd->nsarea.config_size) {
200 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
201 i, nslot, ndd->nsarea.config_size);
212 for (i = 0; i < num_index; i++)
215 /* can't have num_valid > 0 but valid[] = { false, false } */
219 /* pick the best index... */
220 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
221 __le32_to_cpu(nsindex[1]->seq));
222 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
232 static int nd_label_validate(struct nvdimm_drvdata *ndd)
235 * In order to probe for and validate namespace index blocks we
236 * need to know the size of the labels, and we can't trust the
237 * size of the labels until we validate the index blocks.
238 * Resolve this dependency loop by probing for known label
239 * sizes, but default to v1.2 256-byte namespace labels if
242 int label_size[] = { 128, 256 };
245 for (i = 0; i < ARRAY_SIZE(label_size); i++) {
246 ndd->nslabel_size = label_size[i];
247 rc = __nd_label_validate(ndd);
255 static void nd_label_copy(struct nvdimm_drvdata *ndd,
256 struct nd_namespace_index *dst,
257 struct nd_namespace_index *src)
259 /* just exit if either destination or source is NULL */
263 memcpy(dst, src, sizeof_namespace_index(ndd));
266 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
268 void *base = to_namespace_index(ndd, 0);
270 return base + 2 * sizeof_namespace_index(ndd);
273 static int to_slot(struct nvdimm_drvdata *ndd,
274 struct nd_namespace_label *nd_label)
276 unsigned long label, base;
278 label = (unsigned long) nd_label;
279 base = (unsigned long) nd_label_base(ndd);
281 return (label - base) / sizeof_namespace_label(ndd);
284 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
286 unsigned long label, base;
288 base = (unsigned long) nd_label_base(ndd);
289 label = base + sizeof_namespace_label(ndd) * slot;
291 return (struct nd_namespace_label *) label;
294 #define for_each_clear_bit_le(bit, addr, size) \
295 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
297 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
300 * preamble_index - common variable initialization for nd_label_* routines
301 * @ndd: dimm container for the relevant label set
302 * @idx: namespace_index index
303 * @nsindex_out: on return set to the currently active namespace index
304 * @free: on return set to the free label bitmap in the index
305 * @nslot: on return set to the number of slots in the label space
307 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
308 struct nd_namespace_index **nsindex_out,
309 unsigned long **free, u32 *nslot)
311 struct nd_namespace_index *nsindex;
313 nsindex = to_namespace_index(ndd, idx);
317 *free = (unsigned long *) nsindex->free;
318 *nslot = __le32_to_cpu(nsindex->nslot);
319 *nsindex_out = nsindex;
324 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
326 if (!label_id || !uuid)
328 snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
329 flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
333 static bool preamble_current(struct nvdimm_drvdata *ndd,
334 struct nd_namespace_index **nsindex,
335 unsigned long **free, u32 *nslot)
337 return preamble_index(ndd, ndd->ns_current, nsindex,
341 static bool preamble_next(struct nvdimm_drvdata *ndd,
342 struct nd_namespace_index **nsindex,
343 unsigned long **free, u32 *nslot)
345 return preamble_index(ndd, ndd->ns_next, nsindex,
349 static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
350 struct nd_namespace_label *nd_label)
354 if (!namespace_label_has(ndd, checksum))
357 sum_save = nsl_get_checksum(ndd, nd_label);
358 nsl_set_checksum(ndd, nd_label, 0);
359 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
360 nsl_set_checksum(ndd, nd_label, sum_save);
361 return sum == sum_save;
364 static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
365 struct nd_namespace_label *nd_label)
369 if (!namespace_label_has(ndd, checksum))
371 nsl_set_checksum(ndd, nd_label, 0);
372 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
373 nsl_set_checksum(ndd, nd_label, sum);
376 static bool slot_valid(struct nvdimm_drvdata *ndd,
377 struct nd_namespace_label *nd_label, u32 slot)
381 /* check that we are written where we expect to be written */
382 if (slot != nsl_get_slot(ndd, nd_label))
384 valid = nsl_validate_checksum(ndd, nd_label);
386 dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
390 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
392 struct nd_namespace_index *nsindex;
396 if (!preamble_current(ndd, &nsindex, &free, &nslot))
397 return 0; /* no label, nothing to reserve */
399 for_each_clear_bit_le(slot, free, nslot) {
400 struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
401 struct nd_namespace_label *nd_label;
402 struct nd_region *nd_region = NULL;
403 u8 label_uuid[NSLABEL_UUID_LEN];
404 struct nd_label_id label_id;
405 struct resource *res;
408 nd_label = to_label(ndd, slot);
410 if (!slot_valid(ndd, nd_label, slot))
413 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
414 flags = nsl_get_flags(ndd, nd_label);
415 if (test_bit(NDD_NOBLK, &nvdimm->flags))
416 flags &= ~NSLABEL_FLAG_LOCAL;
417 nd_label_gen_id(&label_id, label_uuid, flags);
418 res = nvdimm_allocate_dpa(ndd, &label_id,
419 nsl_get_dpa(ndd, nd_label),
420 nsl_get_rawsize(ndd, nd_label));
421 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
429 int nd_label_data_init(struct nvdimm_drvdata *ndd)
431 size_t config_size, read_size, max_xfer, offset;
432 struct nd_namespace_index *nsindex;
440 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
441 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
442 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
447 * We need to determine the maximum index area as this is the section
448 * we must read and validate before we can start processing labels.
450 * If the area is too small to contain the two indexes and 2 labels
453 * Start at a label size of 128 as this should result in the largest
454 * possible namespace index size.
456 ndd->nslabel_size = 128;
457 read_size = sizeof_namespace_index(ndd) * 2;
461 /* Allocate config data */
462 config_size = ndd->nsarea.config_size;
463 ndd->data = kvzalloc(config_size, GFP_KERNEL);
468 * We want to guarantee as few reads as possible while conserving
469 * memory. To do that we figure out how much unused space will be left
470 * in the last read, divide that by the total number of reads it is
471 * going to take given our maximum transfer size, and then reduce our
472 * maximum transfer size based on that result.
474 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
475 if (read_size < max_xfer) {
477 max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
478 DIV_ROUND_UP(config_size, max_xfer);
479 /* make certain we read indexes in exactly 1 read */
480 if (max_xfer < read_size)
481 max_xfer = read_size;
484 /* Make our initial read size a multiple of max_xfer size */
485 read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
488 /* Read the index data */
489 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
493 /* Validate index data, if not valid assume all labels are invalid */
494 ndd->ns_current = nd_label_validate(ndd);
495 if (ndd->ns_current < 0)
498 /* Record our index values */
499 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
501 /* Copy "current" index on top of the "next" index */
502 nsindex = to_current_namespace_index(ndd);
503 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
505 /* Determine starting offset for label data */
506 offset = __le64_to_cpu(nsindex->labeloff);
507 nslot = __le32_to_cpu(nsindex->nslot);
509 /* Loop through the free list pulling in any active labels */
510 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
511 size_t label_read_size;
513 /* zero out the unused labels */
514 if (test_bit_le(i, nsindex->free)) {
515 memset(ndd->data + offset, 0, ndd->nslabel_size);
519 /* if we already read past here then just continue */
520 if (offset + ndd->nslabel_size <= read_size)
523 /* if we haven't read in a while reset our read_size offset */
524 if (read_size < offset)
527 /* determine how much more will be read after this next call. */
528 label_read_size = offset + ndd->nslabel_size - read_size;
529 label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
532 /* truncate last read if needed */
533 if (read_size + label_read_size > config_size)
534 label_read_size = config_size - read_size;
536 /* Read the label data */
537 rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
538 read_size, label_read_size);
542 /* push read_size to next read offset */
543 read_size += label_read_size;
546 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
551 int nd_label_active_count(struct nvdimm_drvdata *ndd)
553 struct nd_namespace_index *nsindex;
558 if (!preamble_current(ndd, &nsindex, &free, &nslot))
561 for_each_clear_bit_le(slot, free, nslot) {
562 struct nd_namespace_label *nd_label;
564 nd_label = to_label(ndd, slot);
566 if (!slot_valid(ndd, nd_label, slot)) {
567 u32 label_slot = nsl_get_slot(ndd, nd_label);
568 u64 size = nsl_get_rawsize(ndd, nd_label);
569 u64 dpa = nsl_get_dpa(ndd, nd_label);
572 "slot%d invalid slot: %d dpa: %llx size: %llx\n",
573 slot, label_slot, dpa, size);
581 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
583 struct nd_namespace_index *nsindex;
587 if (!preamble_current(ndd, &nsindex, &free, &nslot))
590 for_each_clear_bit_le(slot, free, nslot) {
591 struct nd_namespace_label *nd_label;
593 nd_label = to_label(ndd, slot);
594 if (!slot_valid(ndd, nd_label, slot))
598 return to_label(ndd, slot);
604 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
606 struct nd_namespace_index *nsindex;
610 if (!preamble_next(ndd, &nsindex, &free, &nslot))
613 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
615 slot = find_next_bit_le(free, nslot, 0);
619 clear_bit_le(slot, free);
624 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
626 struct nd_namespace_index *nsindex;
630 if (!preamble_next(ndd, &nsindex, &free, &nslot))
633 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
636 return !test_and_set_bit_le(slot, free);
640 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
642 struct nd_namespace_index *nsindex;
646 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
648 if (!preamble_next(ndd, &nsindex, &free, &nslot))
649 return nvdimm_num_label_slots(ndd);
651 return bitmap_weight(free, nslot);
654 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
657 struct nd_namespace_index *nsindex;
658 unsigned long offset;
663 nsindex = to_namespace_index(ndd, index);
664 if (flags & ND_NSINDEX_INIT)
665 nslot = nvdimm_num_label_slots(ndd);
667 nslot = __le32_to_cpu(nsindex->nslot);
669 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
670 memset(&nsindex->flags, 0, 3);
671 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
672 nsindex->seq = __cpu_to_le32(seq);
673 offset = (unsigned long) nsindex
674 - (unsigned long) to_namespace_index(ndd, 0);
675 nsindex->myoff = __cpu_to_le64(offset);
676 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
677 offset = (unsigned long) to_namespace_index(ndd,
678 nd_label_next_nsindex(index))
679 - (unsigned long) to_namespace_index(ndd, 0);
680 nsindex->otheroff = __cpu_to_le64(offset);
681 offset = (unsigned long) nd_label_base(ndd)
682 - (unsigned long) to_namespace_index(ndd, 0);
683 nsindex->labeloff = __cpu_to_le64(offset);
684 nsindex->nslot = __cpu_to_le32(nslot);
685 nsindex->major = __cpu_to_le16(1);
686 if (sizeof_namespace_label(ndd) < 256)
687 nsindex->minor = __cpu_to_le16(1);
689 nsindex->minor = __cpu_to_le16(2);
690 nsindex->checksum = __cpu_to_le64(0);
691 if (flags & ND_NSINDEX_INIT) {
692 unsigned long *free = (unsigned long *) nsindex->free;
693 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
696 memset(nsindex->free, 0xff, nfree / 8);
697 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
698 clear_bit_le(nslot + i, free);
700 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
701 nsindex->checksum = __cpu_to_le64(checksum);
702 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
703 nsindex, sizeof_namespace_index(ndd));
707 if (flags & ND_NSINDEX_INIT)
710 /* copy the index we just wrote to the new 'next' */
711 WARN_ON(index != ndd->ns_next);
712 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
713 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
714 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
715 WARN_ON(ndd->ns_current == ndd->ns_next);
720 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
721 struct nd_namespace_label *nd_label)
723 return (unsigned long) nd_label
724 - (unsigned long) to_namespace_index(ndd, 0);
727 static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
729 if (guid_equal(guid, &nvdimm_btt_guid))
730 return NVDIMM_CCLASS_BTT;
731 else if (guid_equal(guid, &nvdimm_btt2_guid))
732 return NVDIMM_CCLASS_BTT2;
733 else if (guid_equal(guid, &nvdimm_pfn_guid))
734 return NVDIMM_CCLASS_PFN;
735 else if (guid_equal(guid, &nvdimm_dax_guid))
736 return NVDIMM_CCLASS_DAX;
737 else if (guid_equal(guid, &guid_null))
738 return NVDIMM_CCLASS_NONE;
740 return NVDIMM_CCLASS_UNKNOWN;
743 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
746 if (claim_class == NVDIMM_CCLASS_BTT)
747 return &nvdimm_btt_guid;
748 else if (claim_class == NVDIMM_CCLASS_BTT2)
749 return &nvdimm_btt2_guid;
750 else if (claim_class == NVDIMM_CCLASS_PFN)
751 return &nvdimm_pfn_guid;
752 else if (claim_class == NVDIMM_CCLASS_DAX)
753 return &nvdimm_dax_guid;
754 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
756 * If we're modifying a namespace for which we don't
757 * know the claim_class, don't touch the existing guid.
764 static void reap_victim(struct nd_mapping *nd_mapping,
765 struct nd_label_ent *victim)
767 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
768 u32 slot = to_slot(ndd, victim->label);
770 dev_dbg(ndd->dev, "free: %d\n", slot);
771 nd_label_free_slot(ndd, slot);
772 victim->label = NULL;
775 static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
776 struct nd_namespace_label *nd_label, guid_t *guid)
778 if (namespace_label_has(ndd, type_guid))
779 guid_copy(&nd_label->type_guid, guid);
782 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
783 struct nd_namespace_label *nd_label, guid_t *guid)
785 if (!namespace_label_has(ndd, type_guid))
787 if (!guid_equal(&nd_label->type_guid, guid)) {
788 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
789 &nd_label->type_guid);
795 static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
796 struct nd_namespace_label *nd_label,
797 enum nvdimm_claim_class claim_class)
799 if (!namespace_label_has(ndd, abstraction_guid))
801 guid_copy(&nd_label->abstraction_guid,
802 to_abstraction_guid(claim_class,
803 &nd_label->abstraction_guid));
806 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
807 struct nd_namespace_label *nd_label)
809 if (!namespace_label_has(ndd, abstraction_guid))
810 return NVDIMM_CCLASS_NONE;
811 return to_nvdimm_cclass(&nd_label->abstraction_guid);
814 static int __pmem_label_update(struct nd_region *nd_region,
815 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
816 int pos, unsigned long flags)
818 struct nd_namespace_common *ndns = &nspm->nsio.common;
819 struct nd_interleave_set *nd_set = nd_region->nd_set;
820 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
821 struct nd_namespace_label *nd_label;
822 struct nd_namespace_index *nsindex;
823 struct nd_label_ent *label_ent;
824 struct nd_label_id label_id;
825 struct resource *res;
832 if (!preamble_next(ndd, &nsindex, &free, &nslot))
835 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
836 nd_label_gen_id(&label_id, nspm->uuid, 0);
837 for_each_dpa_resource(ndd, res)
838 if (strcmp(res->name, label_id.id) == 0)
846 /* allocate and write the label to the staging (next) index */
847 slot = nd_label_alloc_slot(ndd);
848 if (slot == UINT_MAX)
850 dev_dbg(ndd->dev, "allocated: %d\n", slot);
852 nd_label = to_label(ndd, slot);
853 memset(nd_label, 0, sizeof_namespace_label(ndd));
854 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
855 nsl_set_name(ndd, nd_label, nspm->alt_name);
856 nsl_set_flags(ndd, nd_label, flags);
857 nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
858 nsl_set_position(ndd, nd_label, pos);
859 nsl_set_isetcookie(ndd, nd_label, cookie);
860 nsl_set_rawsize(ndd, nd_label, resource_size(res));
861 nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
862 nsl_set_dpa(ndd, nd_label, res->start);
863 nsl_set_slot(ndd, nd_label, slot);
864 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
865 nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
866 nsl_calculate_checksum(ndd, nd_label);
867 nd_dbg_dpa(nd_region, ndd, res, "\n");
870 offset = nd_label_offset(ndd, nd_label);
871 rc = nvdimm_set_config_data(ndd, offset, nd_label,
872 sizeof_namespace_label(ndd));
876 /* Garbage collect the previous label */
877 mutex_lock(&nd_mapping->lock);
878 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
879 if (!label_ent->label)
881 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
882 || memcmp(nspm->uuid, label_ent->label->uuid,
883 NSLABEL_UUID_LEN) == 0)
884 reap_victim(nd_mapping, label_ent);
888 rc = nd_label_write_index(ndd, ndd->ns_next,
889 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
891 list_for_each_entry(label_ent, &nd_mapping->labels, list)
892 if (!label_ent->label) {
893 label_ent->label = nd_label;
897 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
898 "failed to track label: %d\n",
899 to_slot(ndd, nd_label));
903 mutex_unlock(&nd_mapping->lock);
908 static bool is_old_resource(struct resource *res, struct resource **list, int n)
912 if (res->flags & DPA_RESOURCE_ADJUSTED)
914 for (i = 0; i < n; i++)
920 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
921 struct nd_namespace_label *nd_label)
923 struct resource *res;
925 for_each_dpa_resource(ndd, res) {
926 if (res->start != nsl_get_dpa(ndd, nd_label))
928 if (resource_size(res) != nsl_get_rawsize(ndd, nd_label))
937 * Use the presence of the type_guid as a flag to determine isetcookie
938 * usage and nlabel + position policy for blk-aperture namespaces.
940 static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd,
941 struct nd_namespace_label *nd_label,
944 if (namespace_label_has(ndd, type_guid)) {
945 nsl_set_isetcookie(ndd, nd_label, isetcookie);
948 nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */
951 bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
952 struct nd_namespace_label *nd_label,
955 if (!namespace_label_has(ndd, type_guid))
958 if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) {
959 dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie,
960 nsl_get_isetcookie(ndd, nd_label));
967 static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd,
968 struct nd_namespace_label *nd_label, int nlabel,
971 if (!namespace_label_has(ndd, type_guid)) {
972 nsl_set_nlabel(ndd, nd_label, 0); /* N/A */
975 nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff);
978 static void nsl_set_blk_position(struct nvdimm_drvdata *ndd,
979 struct nd_namespace_label *nd_label,
982 if (!namespace_label_has(ndd, type_guid)) {
983 nsl_set_position(ndd, nd_label, 0);
986 nsl_set_position(ndd, nd_label, first ? 0 : 0xffff);
990 * 1/ Account all the labels that can be freed after this update
991 * 2/ Allocate and write the label to the staging (next) index
992 * 3/ Record the resources in the namespace device
994 static int __blk_label_update(struct nd_region *nd_region,
995 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
998 int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
999 struct nd_interleave_set *nd_set = nd_region->nd_set;
1000 struct nd_namespace_common *ndns = &nsblk->common;
1001 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1002 struct nd_namespace_label *nd_label;
1003 struct nd_label_ent *label_ent, *e;
1004 struct nd_namespace_index *nsindex;
1005 unsigned long *free, *victim_map = NULL;
1006 struct resource *res, **old_res_list;
1007 struct nd_label_id label_id;
1008 u8 uuid[NSLABEL_UUID_LEN];
1009 int min_dpa_idx = 0;
1013 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1016 old_res_list = nsblk->res;
1017 nfree = nd_label_nfree(ndd);
1018 old_num_resources = nsblk->num_resources;
1019 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1022 * We need to loop over the old resources a few times, which seems a
1023 * bit inefficient, but we need to know that we have the label
1024 * space before we start mutating the tracking structures.
1025 * Otherwise the recovery method of last resort for userspace is
1026 * disable and re-enable the parent region.
1029 for_each_dpa_resource(ndd, res) {
1030 if (strcmp(res->name, label_id.id) != 0)
1032 if (!is_old_resource(res, old_res_list, old_num_resources))
1037 if (old_num_resources) {
1038 /* convert old local-label-map to dimm-slot victim-map */
1039 victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
1043 /* mark unused labels for garbage collection */
1044 for_each_clear_bit_le(slot, free, nslot) {
1045 nd_label = to_label(ndd, slot);
1046 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1047 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1049 res = to_resource(ndd, nd_label);
1050 if (res && is_old_resource(res, old_res_list,
1053 slot = to_slot(ndd, nd_label);
1054 set_bit(slot, victim_map);
1059 /* don't allow updates that consume the last label */
1060 if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
1061 dev_info(&nsblk->common.dev, "insufficient label space\n");
1062 bitmap_free(victim_map);
1065 /* from here on we need to abort on error */
1068 /* assign all resources to the namespace before writing the labels */
1070 nsblk->num_resources = 0;
1071 for_each_dpa_resource(ndd, res) {
1072 if (strcmp(res->name, label_id.id) != 0)
1074 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
1080 /* release slots associated with any invalidated UUIDs */
1081 mutex_lock(&nd_mapping->lock);
1082 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
1083 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
1084 reap_victim(nd_mapping, label_ent);
1085 list_move(&label_ent->list, &list);
1087 mutex_unlock(&nd_mapping->lock);
1090 * Find the resource associated with the first label in the set
1091 * per the v1.2 namespace specification.
1093 for (i = 0; i < nsblk->num_resources; i++) {
1094 struct resource *min = nsblk->res[min_dpa_idx];
1096 res = nsblk->res[i];
1097 if (res->start < min->start)
1101 for (i = 0; i < nsblk->num_resources; i++) {
1104 res = nsblk->res[i];
1105 if (is_old_resource(res, old_res_list, old_num_resources))
1106 continue; /* carry-over */
1107 slot = nd_label_alloc_slot(ndd);
1108 if (slot == UINT_MAX) {
1112 dev_dbg(ndd->dev, "allocated: %d\n", slot);
1114 nd_label = to_label(ndd, slot);
1115 memset(nd_label, 0, sizeof_namespace_label(ndd));
1116 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
1117 nsl_set_name(ndd, nd_label, nsblk->alt_name);
1118 nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL);
1120 nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources,
1122 nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx);
1123 nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2);
1125 nsl_set_dpa(ndd, nd_label, res->start);
1126 nsl_set_rawsize(ndd, nd_label, resource_size(res));
1127 nsl_set_lbasize(ndd, nd_label, nsblk->lbasize);
1128 nsl_set_slot(ndd, nd_label, slot);
1129 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
1130 nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
1131 nsl_calculate_checksum(ndd, nd_label);
1134 offset = nd_label_offset(ndd, nd_label);
1135 rc = nvdimm_set_config_data(ndd, offset, nd_label,
1136 sizeof_namespace_label(ndd));
1141 /* free up now unused slots in the new index */
1142 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
1143 dev_dbg(ndd->dev, "free: %d\n", slot);
1144 nd_label_free_slot(ndd, slot);
1148 rc = nd_label_write_index(ndd, ndd->ns_next,
1149 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1154 * Now that the on-dimm labels are up to date, fix up the tracking
1155 * entries in nd_mapping->labels
1158 mutex_lock(&nd_mapping->lock);
1159 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1160 nd_label = label_ent->label;
1164 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1165 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1168 list_move(&label_ent->list, &list);
1169 label_ent->label = NULL;
1171 list_splice_tail_init(&list, &nd_mapping->labels);
1172 mutex_unlock(&nd_mapping->lock);
1174 if (nlabel + nsblk->num_resources > num_labels) {
1176 * Bug, we can't end up with more resources than
1184 mutex_lock(&nd_mapping->lock);
1185 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1186 typeof(*label_ent), list);
1189 mutex_unlock(&nd_mapping->lock);
1193 for_each_clear_bit_le(slot, free, nslot) {
1194 nd_label = to_label(ndd, slot);
1195 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1196 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1198 res = to_resource(ndd, nd_label);
1199 res->flags &= ~DPA_RESOURCE_ADJUSTED;
1200 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1201 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1202 if (label_ent->label)
1204 label_ent->label = nd_label;
1209 dev_WARN(&nsblk->common.dev,
1210 "failed to track label slot%d\n", slot);
1212 mutex_unlock(&nd_mapping->lock);
1215 kfree(old_res_list);
1216 bitmap_free(victim_map);
1221 * 1/ repair the allocated label bitmap in the index
1222 * 2/ restore the resource list
1224 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1226 nsblk->res = old_res_list;
1227 nsblk->num_resources = old_num_resources;
1228 old_res_list = NULL;
1232 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1234 int i, old_num_labels = 0;
1235 struct nd_label_ent *label_ent;
1236 struct nd_namespace_index *nsindex;
1237 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1239 mutex_lock(&nd_mapping->lock);
1240 list_for_each_entry(label_ent, &nd_mapping->labels, list)
1242 mutex_unlock(&nd_mapping->lock);
1245 * We need to preserve all the old labels for the mapping so
1246 * they can be garbage collected after writing the new labels.
1248 for (i = old_num_labels; i < num_labels; i++) {
1249 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1252 mutex_lock(&nd_mapping->lock);
1253 list_add_tail(&label_ent->list, &nd_mapping->labels);
1254 mutex_unlock(&nd_mapping->lock);
1257 if (ndd->ns_current == -1 || ndd->ns_next == -1)
1260 return max(num_labels, old_num_labels);
1262 nsindex = to_namespace_index(ndd, 0);
1263 memset(nsindex, 0, ndd->nsarea.config_size);
1264 for (i = 0; i < 2; i++) {
1265 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1271 ndd->ns_current = 0;
1273 return max(num_labels, old_num_labels);
1276 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1278 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1279 struct nd_label_ent *label_ent, *e;
1280 struct nd_namespace_index *nsindex;
1281 u8 label_uuid[NSLABEL_UUID_LEN];
1282 unsigned long *free;
1290 /* no index || no labels == nothing to delete */
1291 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1294 mutex_lock(&nd_mapping->lock);
1295 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1296 struct nd_namespace_label *nd_label = label_ent->label;
1301 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1302 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1305 slot = to_slot(ndd, nd_label);
1306 nd_label_free_slot(ndd, slot);
1307 dev_dbg(ndd->dev, "free: %d\n", slot);
1308 list_move_tail(&label_ent->list, &list);
1309 label_ent->label = NULL;
1311 list_splice_tail_init(&list, &nd_mapping->labels);
1314 nd_mapping_free_labels(nd_mapping);
1315 dev_dbg(ndd->dev, "no more active labels\n");
1317 mutex_unlock(&nd_mapping->lock);
1319 return nd_label_write_index(ndd, ndd->ns_next,
1320 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1323 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1324 struct nd_namespace_pmem *nspm, resource_size_t size)
1328 for (i = 0; i < nd_region->ndr_mappings; i++) {
1329 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1330 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1331 struct resource *res;
1335 rc = del_labels(nd_mapping, nspm->uuid);
1341 for_each_dpa_resource(ndd, res)
1342 if (strncmp(res->name, "pmem", 4) == 0)
1344 WARN_ON_ONCE(!count);
1346 rc = init_labels(nd_mapping, count);
1350 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1351 NSLABEL_FLAG_UPDATING);
1359 /* Clear the UPDATING flag per UEFI 2.7 expectations */
1360 for (i = 0; i < nd_region->ndr_mappings; i++) {
1361 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1363 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1371 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1372 struct nd_namespace_blk *nsblk, resource_size_t size)
1374 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1375 struct resource *res;
1379 return del_labels(nd_mapping, nsblk->uuid);
1381 for_each_dpa_resource(to_ndd(nd_mapping), res)
1384 count = init_labels(nd_mapping, count);
1388 return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1391 int __init nd_label_init(void)
1393 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1394 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1395 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1396 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));