1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
5 #include <linux/memremap.h>
6 #include <linux/blkdev.h>
7 #include <linux/device.h>
8 #include <linux/sizes.h>
9 #include <linux/slab.h>
16 static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
18 static void nd_pfn_release(struct device *dev)
20 struct nd_region *nd_region = to_nd_region(dev->parent);
21 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
23 dev_dbg(dev, "trace\n");
24 nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
25 ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
30 struct nd_pfn *to_nd_pfn(struct device *dev)
32 struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
34 WARN_ON(!is_nd_pfn(dev));
37 EXPORT_SYMBOL(to_nd_pfn);
39 static ssize_t mode_show(struct device *dev,
40 struct device_attribute *attr, char *buf)
42 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
44 switch (nd_pfn->mode) {
46 return sprintf(buf, "ram\n");
48 return sprintf(buf, "pmem\n");
50 return sprintf(buf, "none\n");
54 static ssize_t mode_store(struct device *dev,
55 struct device_attribute *attr, const char *buf, size_t len)
57 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
67 if (strncmp(buf, "pmem\n", n) == 0
68 || strncmp(buf, "pmem", n) == 0) {
69 nd_pfn->mode = PFN_MODE_PMEM;
70 } else if (strncmp(buf, "ram\n", n) == 0
71 || strncmp(buf, "ram", n) == 0)
72 nd_pfn->mode = PFN_MODE_RAM;
73 else if (strncmp(buf, "none\n", n) == 0
74 || strncmp(buf, "none", n) == 0)
75 nd_pfn->mode = PFN_MODE_NONE;
79 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
80 buf[len - 1] == '\n' ? "" : "\n");
81 nvdimm_bus_unlock(dev);
86 static DEVICE_ATTR_RW(mode);
88 static ssize_t align_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
91 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
93 return sprintf(buf, "%ld\n", nd_pfn->align);
96 static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
99 alignments[0] = PAGE_SIZE;
101 if (has_transparent_hugepage()) {
102 alignments[1] = HPAGE_PMD_SIZE;
103 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
104 alignments[2] = HPAGE_PUD_SIZE;
111 * Use pmd mapping if supported as default alignment
113 static unsigned long nd_pfn_default_alignment(void)
116 if (has_transparent_hugepage())
117 return HPAGE_PMD_SIZE;
121 static ssize_t align_store(struct device *dev,
122 struct device_attribute *attr, const char *buf, size_t len)
124 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
125 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
129 nvdimm_bus_lock(dev);
130 rc = nd_size_select_store(dev, buf, &nd_pfn->align,
131 nd_pfn_supported_alignments(aligns));
132 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
133 buf[len - 1] == '\n' ? "" : "\n");
134 nvdimm_bus_unlock(dev);
137 return rc ? rc : len;
139 static DEVICE_ATTR_RW(align);
141 static ssize_t uuid_show(struct device *dev,
142 struct device_attribute *attr, char *buf)
144 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
147 return sprintf(buf, "%pUb\n", nd_pfn->uuid);
148 return sprintf(buf, "\n");
151 static ssize_t uuid_store(struct device *dev,
152 struct device_attribute *attr, const char *buf, size_t len)
154 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
158 rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
159 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
160 buf[len - 1] == '\n' ? "" : "\n");
163 return rc ? rc : len;
165 static DEVICE_ATTR_RW(uuid);
167 static ssize_t namespace_show(struct device *dev,
168 struct device_attribute *attr, char *buf)
170 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
173 nvdimm_bus_lock(dev);
174 rc = sprintf(buf, "%s\n", nd_pfn->ndns
175 ? dev_name(&nd_pfn->ndns->dev) : "");
176 nvdimm_bus_unlock(dev);
180 static ssize_t namespace_store(struct device *dev,
181 struct device_attribute *attr, const char *buf, size_t len)
183 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
187 nvdimm_bus_lock(dev);
188 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
189 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
190 buf[len - 1] == '\n' ? "" : "\n");
191 nvdimm_bus_unlock(dev);
196 static DEVICE_ATTR_RW(namespace);
198 static ssize_t resource_show(struct device *dev,
199 struct device_attribute *attr, char *buf)
201 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
206 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
207 u64 offset = __le64_to_cpu(pfn_sb->dataoff);
208 struct nd_namespace_common *ndns = nd_pfn->ndns;
209 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
210 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
212 rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
213 + start_pad + offset);
215 /* no address to convey if the pfn instance is disabled */
222 static DEVICE_ATTR_ADMIN_RO(resource);
224 static ssize_t size_show(struct device *dev,
225 struct device_attribute *attr, char *buf)
227 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
232 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
233 u64 offset = __le64_to_cpu(pfn_sb->dataoff);
234 struct nd_namespace_common *ndns = nd_pfn->ndns;
235 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
236 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
237 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
239 rc = sprintf(buf, "%llu\n", (unsigned long long)
240 resource_size(&nsio->res) - start_pad
241 - end_trunc - offset);
243 /* no size to convey if the pfn instance is disabled */
250 static DEVICE_ATTR_RO(size);
252 static ssize_t supported_alignments_show(struct device *dev,
253 struct device_attribute *attr, char *buf)
255 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
257 return nd_size_select_show(0,
258 nd_pfn_supported_alignments(aligns), buf);
260 static DEVICE_ATTR_RO(supported_alignments);
262 static struct attribute *nd_pfn_attributes[] = {
264 &dev_attr_namespace.attr,
266 &dev_attr_align.attr,
267 &dev_attr_resource.attr,
269 &dev_attr_supported_alignments.attr,
273 static struct attribute_group nd_pfn_attribute_group = {
274 .attrs = nd_pfn_attributes,
277 const struct attribute_group *nd_pfn_attribute_groups[] = {
278 &nd_pfn_attribute_group,
279 &nd_device_attribute_group,
280 &nd_numa_attribute_group,
284 static const struct device_type nd_pfn_device_type = {
286 .release = nd_pfn_release,
287 .groups = nd_pfn_attribute_groups,
290 bool is_nd_pfn(struct device *dev)
292 return dev ? dev->type == &nd_pfn_device_type : false;
294 EXPORT_SYMBOL(is_nd_pfn);
296 static struct lock_class_key nvdimm_pfn_key;
298 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
299 struct nd_namespace_common *ndns)
306 nd_pfn->mode = PFN_MODE_NONE;
307 nd_pfn->align = nd_pfn_default_alignment();
309 device_initialize(&nd_pfn->dev);
310 lockdep_set_class(&nd_pfn->dev.mutex, &nvdimm_pfn_key);
311 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
312 dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
313 dev_name(ndns->claim));
320 static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
322 struct nd_pfn *nd_pfn;
325 nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
329 nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
330 if (nd_pfn->id < 0) {
336 dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
337 dev->type = &nd_pfn_device_type;
338 dev->parent = &nd_region->dev;
343 struct device *nd_pfn_create(struct nd_region *nd_region)
345 struct nd_pfn *nd_pfn;
348 if (!is_memory(&nd_region->dev))
351 nd_pfn = nd_pfn_alloc(nd_region);
352 dev = nd_pfn_devinit(nd_pfn, NULL);
354 nd_device_register(dev);
359 * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
360 * space associated with the namespace. If the memmap is set to DRAM, then
361 * this is a no-op. Since the memmap area is freshly initialized during
362 * probe, we have an opportunity to clear any badblocks in this area.
364 static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
366 struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
367 struct nd_namespace_common *ndns = nd_pfn->ndns;
368 void *zero_page = page_address(ZERO_PAGE(0));
369 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
370 int num_bad, meta_num, rc, bb_present;
371 sector_t first_bad, meta_start;
372 struct nd_namespace_io *nsio;
374 if (nd_pfn->mode != PFN_MODE_PMEM)
377 nsio = to_nd_namespace_io(&ndns->dev);
378 meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
379 meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
382 * re-enable the namespace with correct size so that we can access
383 * the device memmap area.
385 devm_namespace_disable(&nd_pfn->dev, ndns);
386 rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
391 unsigned long zero_len;
394 bb_present = badblocks_check(&nd_region->bb, meta_start,
395 meta_num, &first_bad, &num_bad);
397 dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
399 nsoff = ALIGN_DOWN((nd_region->ndr_start
400 + (first_bad << 9)) - nsio->res.start,
402 zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
404 unsigned long chunk = min(zero_len, PAGE_SIZE);
406 rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
415 dev_err(&nd_pfn->dev,
416 "error clearing %x badblocks at %llx\n",
421 } while (bb_present);
426 static bool nd_supported_alignment(unsigned long align)
429 unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
434 nd_pfn_supported_alignments(supported);
435 for (i = 0; supported[i]; i++)
436 if (align == supported[i])
442 * nd_pfn_validate - read and validate info-block
443 * @nd_pfn: fsdax namespace runtime state / properties
444 * @sig: 'devdax' or 'fsdax' signature
446 * Upon return the info-block buffer contents (->pfn_sb) are
447 * indeterminate when validation fails, and a coherent info-block
450 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
452 u64 checksum, offset;
453 struct resource *res;
454 enum nd_pfn_mode mode;
455 struct nd_namespace_io *nsio;
456 unsigned long align, start_pad;
457 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
458 struct nd_namespace_common *ndns = nd_pfn->ndns;
459 const uuid_t *parent_uuid = nd_dev_to_uuid(&ndns->dev);
461 if (!pfn_sb || !ndns)
464 if (!is_memory(nd_pfn->dev.parent))
467 if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
470 if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
473 checksum = le64_to_cpu(pfn_sb->checksum);
474 pfn_sb->checksum = 0;
475 if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
477 pfn_sb->checksum = cpu_to_le64(checksum);
479 if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
482 if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
483 pfn_sb->start_pad = 0;
484 pfn_sb->end_trunc = 0;
487 if (__le16_to_cpu(pfn_sb->version_minor) < 2)
490 if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
491 pfn_sb->page_struct_size = cpu_to_le16(64);
492 pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
495 switch (le32_to_cpu(pfn_sb->mode)) {
503 align = le32_to_cpu(pfn_sb->align);
504 offset = le64_to_cpu(pfn_sb->dataoff);
505 start_pad = le32_to_cpu(pfn_sb->start_pad);
507 align = 1UL << ilog2(offset);
508 mode = le32_to_cpu(pfn_sb->mode);
510 if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
511 (mode == PFN_MODE_PMEM)) {
512 dev_err(&nd_pfn->dev,
513 "init failed, page size mismatch %d\n",
514 le32_to_cpu(pfn_sb->page_size));
518 if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
519 (mode == PFN_MODE_PMEM)) {
520 dev_err(&nd_pfn->dev,
521 "init failed, struct page size mismatch %d\n",
522 le16_to_cpu(pfn_sb->page_struct_size));
527 * Check whether the we support the alignment. For Dax if the
528 * superblock alignment is not matching, we won't initialize
531 if (!nd_supported_alignment(align) &&
532 !memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
533 dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
534 "%ld:%ld\n", nd_pfn->align, align);
540 * When probing a namepace via nd_pfn_probe() the uuid
541 * is NULL (see: nd_pfn_devinit()) we init settings from
544 nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
547 nd_pfn->align = align;
551 * When probing a pfn / dax instance we validate the
552 * live settings against the pfn_sb
554 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
558 * If the uuid validates, but other settings mismatch
559 * return EINVAL because userspace has managed to change
560 * the configuration without specifying new
563 if (nd_pfn->align != align || nd_pfn->mode != mode) {
564 dev_err(&nd_pfn->dev,
565 "init failed, settings mismatch\n");
566 dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
567 nd_pfn->align, align, nd_pfn->mode,
573 if (align > nvdimm_namespace_capacity(ndns)) {
574 dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
575 align, nvdimm_namespace_capacity(ndns));
580 * These warnings are verbose because they can only trigger in
581 * the case where the physical address alignment of the
582 * namespace has changed since the pfn superblock was
585 nsio = to_nd_namespace_io(&ndns->dev);
587 if (offset >= resource_size(res)) {
588 dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
589 dev_name(&ndns->dev));
593 if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
594 || !IS_ALIGNED(offset, PAGE_SIZE)) {
595 dev_err(&nd_pfn->dev,
596 "bad offset: %#llx dax disabled align: %#lx\n",
601 if (!IS_ALIGNED(res->start + le32_to_cpu(pfn_sb->start_pad),
602 memremap_compat_align())) {
603 dev_err(&nd_pfn->dev, "resource start misaligned\n");
607 if (!IS_ALIGNED(res->end + 1 - le32_to_cpu(pfn_sb->end_trunc),
608 memremap_compat_align())) {
609 dev_err(&nd_pfn->dev, "resource end misaligned\n");
615 EXPORT_SYMBOL(nd_pfn_validate);
617 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
620 struct nd_pfn *nd_pfn;
621 struct device *pfn_dev;
622 struct nd_pfn_sb *pfn_sb;
623 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
628 switch (ndns->claim_class) {
629 case NVDIMM_CCLASS_NONE:
630 case NVDIMM_CCLASS_PFN:
636 nvdimm_bus_lock(&ndns->dev);
637 nd_pfn = nd_pfn_alloc(nd_region);
638 pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
639 nvdimm_bus_unlock(&ndns->dev);
642 pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
643 nd_pfn = to_nd_pfn(pfn_dev);
644 nd_pfn->pfn_sb = pfn_sb;
645 rc = nd_pfn_validate(nd_pfn, PFN_SIG);
646 dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
648 nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
651 nd_device_register(pfn_dev);
655 EXPORT_SYMBOL(nd_pfn_probe);
658 * We hotplug memory at sub-section granularity, pad the reserved area
659 * from the previous section base to the namespace base address.
661 static unsigned long init_altmap_base(resource_size_t base)
663 unsigned long base_pfn = PHYS_PFN(base);
665 return SUBSECTION_ALIGN_DOWN(base_pfn);
668 static unsigned long init_altmap_reserve(resource_size_t base)
670 unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
671 unsigned long base_pfn = PHYS_PFN(base);
673 reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
677 static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
679 struct range *range = &pgmap->range;
680 struct vmem_altmap *altmap = &pgmap->altmap;
681 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
682 u64 offset = le64_to_cpu(pfn_sb->dataoff);
683 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
684 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
685 u32 reserve = nd_info_block_reserve();
686 struct nd_namespace_common *ndns = nd_pfn->ndns;
687 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
688 resource_size_t base = nsio->res.start + start_pad;
689 resource_size_t end = nsio->res.end - end_trunc;
690 struct vmem_altmap __altmap = {
691 .base_pfn = init_altmap_base(base),
692 .reserve = init_altmap_reserve(base),
693 .end_pfn = PHYS_PFN(end),
696 *range = (struct range) {
697 .start = nsio->res.start + start_pad,
698 .end = nsio->res.end - end_trunc,
701 if (nd_pfn->mode == PFN_MODE_RAM) {
702 if (offset < reserve)
704 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
705 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
706 nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
707 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
708 dev_info(&nd_pfn->dev,
709 "number of pfns truncated from %lld to %ld\n",
710 le64_to_cpu(nd_pfn->pfn_sb->npfns),
712 memcpy(altmap, &__altmap, sizeof(*altmap));
713 altmap->free = PHYS_PFN(offset - reserve);
715 pgmap->flags |= PGMAP_ALTMAP_VALID;
722 static int nd_pfn_init(struct nd_pfn *nd_pfn)
724 struct nd_namespace_common *ndns = nd_pfn->ndns;
725 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
726 resource_size_t start, size;
727 struct nd_region *nd_region;
728 unsigned long npfns, align;
730 struct nd_pfn_sb *pfn_sb;
736 pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
740 nd_pfn->pfn_sb = pfn_sb;
741 if (is_nd_dax(&nd_pfn->dev))
746 rc = nd_pfn_validate(nd_pfn, sig);
748 return nd_pfn_clear_memmap_errors(nd_pfn);
752 /* no info block, do init */;
753 memset(pfn_sb, 0, sizeof(*pfn_sb));
755 nd_region = to_nd_region(nd_pfn->dev.parent);
757 dev_info(&nd_pfn->dev,
758 "%s is read-only, unable to init metadata\n",
759 dev_name(&nd_region->dev));
763 start = nsio->res.start;
764 size = resource_size(&nsio->res);
765 npfns = PHYS_PFN(size - SZ_8K);
766 align = max(nd_pfn->align, memremap_compat_align());
769 * When @start is misaligned fail namespace creation. See
770 * the 'struct nd_pfn_sb' commentary on why ->start_pad is not
773 if (!IS_ALIGNED(start, memremap_compat_align())) {
774 dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
775 dev_name(&ndns->dev), &start,
776 memremap_compat_align());
779 end_trunc = start + size - ALIGN_DOWN(start + size, align);
780 if (nd_pfn->mode == PFN_MODE_PMEM) {
781 unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
784 * The altmap should be padded out to the block size used
785 * when populating the vmemmap. This *should* be equal to
786 * PMD_SIZE for most architectures.
788 * Also make sure size of struct page is less than
789 * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
790 * face of production kernel configurations that reduce the
791 * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
792 * kernel configurations that increase the 'struct page' size
793 * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
794 * for continuing with the capacity that will be wasted when
795 * reverting to a production kernel configuration. Otherwise,
796 * those configurations are blocked by default.
798 if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
799 if (page_struct_override)
800 page_map_size = sizeof(struct page) * npfns;
802 dev_err(&nd_pfn->dev,
803 "Memory debug options prevent using pmem for the page map\n");
807 offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
808 } else if (nd_pfn->mode == PFN_MODE_RAM)
809 offset = ALIGN(start + SZ_8K, align) - start;
813 if (offset >= size) {
814 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
815 dev_name(&ndns->dev));
819 npfns = PHYS_PFN(size - offset - end_trunc);
820 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
821 pfn_sb->dataoff = cpu_to_le64(offset);
822 pfn_sb->npfns = cpu_to_le64(npfns);
823 memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
824 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
825 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
826 pfn_sb->version_major = cpu_to_le16(1);
827 pfn_sb->version_minor = cpu_to_le16(4);
828 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
829 pfn_sb->align = cpu_to_le32(nd_pfn->align);
830 if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
831 pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
833 pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
834 pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
835 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
836 pfn_sb->checksum = cpu_to_le64(checksum);
838 rc = nd_pfn_clear_memmap_errors(nd_pfn);
842 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
846 * Determine the effective resource range and vmem_altmap from an nd_pfn
849 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
853 if (!nd_pfn->uuid || !nd_pfn->ndns)
856 rc = nd_pfn_init(nd_pfn);
860 /* we need a valid pfn_sb before we can init a dev_pagemap */
861 return __nvdimm_setup_pfn(nd_pfn, pgmap);
863 EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);