Merge tag 'io_uring-6.5-2023-07-21' of git://git.kernel.dk/linux
[linux-2.6-microblaze.git] / drivers / nvdimm / pfn_devs.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
4  */
5 #include <linux/memremap.h>
6 #include <linux/blkdev.h>
7 #include <linux/device.h>
8 #include <linux/sizes.h>
9 #include <linux/slab.h>
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include "nd-core.h"
13 #include "pfn.h"
14 #include "nd.h"
15
16 static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
17
18 static void nd_pfn_release(struct device *dev)
19 {
20         struct nd_region *nd_region = to_nd_region(dev->parent);
21         struct nd_pfn *nd_pfn = to_nd_pfn(dev);
22
23         dev_dbg(dev, "trace\n");
24         nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
25         ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
26         kfree(nd_pfn->uuid);
27         kfree(nd_pfn);
28 }
29
30 struct nd_pfn *to_nd_pfn(struct device *dev)
31 {
32         struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
33
34         WARN_ON(!is_nd_pfn(dev));
35         return nd_pfn;
36 }
37 EXPORT_SYMBOL(to_nd_pfn);
38
39 static ssize_t mode_show(struct device *dev,
40                 struct device_attribute *attr, char *buf)
41 {
42         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
43
44         switch (nd_pfn->mode) {
45         case PFN_MODE_RAM:
46                 return sprintf(buf, "ram\n");
47         case PFN_MODE_PMEM:
48                 return sprintf(buf, "pmem\n");
49         default:
50                 return sprintf(buf, "none\n");
51         }
52 }
53
54 static ssize_t mode_store(struct device *dev,
55                 struct device_attribute *attr, const char *buf, size_t len)
56 {
57         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
58         ssize_t rc = 0;
59
60         device_lock(dev);
61         nvdimm_bus_lock(dev);
62         if (dev->driver)
63                 rc = -EBUSY;
64         else {
65                 size_t n = len - 1;
66
67                 if (strncmp(buf, "pmem\n", n) == 0
68                                 || strncmp(buf, "pmem", n) == 0) {
69                         nd_pfn->mode = PFN_MODE_PMEM;
70                 } else if (strncmp(buf, "ram\n", n) == 0
71                                 || strncmp(buf, "ram", n) == 0)
72                         nd_pfn->mode = PFN_MODE_RAM;
73                 else if (strncmp(buf, "none\n", n) == 0
74                                 || strncmp(buf, "none", n) == 0)
75                         nd_pfn->mode = PFN_MODE_NONE;
76                 else
77                         rc = -EINVAL;
78         }
79         dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
80                         buf[len - 1] == '\n' ? "" : "\n");
81         nvdimm_bus_unlock(dev);
82         device_unlock(dev);
83
84         return rc ? rc : len;
85 }
86 static DEVICE_ATTR_RW(mode);
87
88 static ssize_t align_show(struct device *dev,
89                 struct device_attribute *attr, char *buf)
90 {
91         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
92
93         return sprintf(buf, "%ld\n", nd_pfn->align);
94 }
95
96 static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
97 {
98
99         alignments[0] = PAGE_SIZE;
100
101         if (has_transparent_hugepage()) {
102                 alignments[1] = HPAGE_PMD_SIZE;
103                 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
104                         alignments[2] = HPAGE_PUD_SIZE;
105         }
106
107         return alignments;
108 }
109
110 /*
111  * Use pmd mapping if supported as default alignment
112  */
113 static unsigned long nd_pfn_default_alignment(void)
114 {
115
116         if (has_transparent_hugepage())
117                 return HPAGE_PMD_SIZE;
118         return PAGE_SIZE;
119 }
120
121 static ssize_t align_store(struct device *dev,
122                 struct device_attribute *attr, const char *buf, size_t len)
123 {
124         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
125         unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
126         ssize_t rc;
127
128         device_lock(dev);
129         nvdimm_bus_lock(dev);
130         rc = nd_size_select_store(dev, buf, &nd_pfn->align,
131                         nd_pfn_supported_alignments(aligns));
132         dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
133                         buf[len - 1] == '\n' ? "" : "\n");
134         nvdimm_bus_unlock(dev);
135         device_unlock(dev);
136
137         return rc ? rc : len;
138 }
139 static DEVICE_ATTR_RW(align);
140
141 static ssize_t uuid_show(struct device *dev,
142                 struct device_attribute *attr, char *buf)
143 {
144         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
145
146         if (nd_pfn->uuid)
147                 return sprintf(buf, "%pUb\n", nd_pfn->uuid);
148         return sprintf(buf, "\n");
149 }
150
151 static ssize_t uuid_store(struct device *dev,
152                 struct device_attribute *attr, const char *buf, size_t len)
153 {
154         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
155         ssize_t rc;
156
157         device_lock(dev);
158         rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
159         dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
160                         buf[len - 1] == '\n' ? "" : "\n");
161         device_unlock(dev);
162
163         return rc ? rc : len;
164 }
165 static DEVICE_ATTR_RW(uuid);
166
167 static ssize_t namespace_show(struct device *dev,
168                 struct device_attribute *attr, char *buf)
169 {
170         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
171         ssize_t rc;
172
173         nvdimm_bus_lock(dev);
174         rc = sprintf(buf, "%s\n", nd_pfn->ndns
175                         ? dev_name(&nd_pfn->ndns->dev) : "");
176         nvdimm_bus_unlock(dev);
177         return rc;
178 }
179
180 static ssize_t namespace_store(struct device *dev,
181                 struct device_attribute *attr, const char *buf, size_t len)
182 {
183         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
184         ssize_t rc;
185
186         device_lock(dev);
187         nvdimm_bus_lock(dev);
188         rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
189         dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
190                         buf[len - 1] == '\n' ? "" : "\n");
191         nvdimm_bus_unlock(dev);
192         device_unlock(dev);
193
194         return rc;
195 }
196 static DEVICE_ATTR_RW(namespace);
197
198 static ssize_t resource_show(struct device *dev,
199                 struct device_attribute *attr, char *buf)
200 {
201         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
202         ssize_t rc;
203
204         device_lock(dev);
205         if (dev->driver) {
206                 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
207                 u64 offset = __le64_to_cpu(pfn_sb->dataoff);
208                 struct nd_namespace_common *ndns = nd_pfn->ndns;
209                 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
210                 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
211
212                 rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
213                                 + start_pad + offset);
214         } else {
215                 /* no address to convey if the pfn instance is disabled */
216                 rc = -ENXIO;
217         }
218         device_unlock(dev);
219
220         return rc;
221 }
222 static DEVICE_ATTR_ADMIN_RO(resource);
223
224 static ssize_t size_show(struct device *dev,
225                 struct device_attribute *attr, char *buf)
226 {
227         struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
228         ssize_t rc;
229
230         device_lock(dev);
231         if (dev->driver) {
232                 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
233                 u64 offset = __le64_to_cpu(pfn_sb->dataoff);
234                 struct nd_namespace_common *ndns = nd_pfn->ndns;
235                 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
236                 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
237                 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
238
239                 rc = sprintf(buf, "%llu\n", (unsigned long long)
240                                 resource_size(&nsio->res) - start_pad
241                                 - end_trunc - offset);
242         } else {
243                 /* no size to convey if the pfn instance is disabled */
244                 rc = -ENXIO;
245         }
246         device_unlock(dev);
247
248         return rc;
249 }
250 static DEVICE_ATTR_RO(size);
251
252 static ssize_t supported_alignments_show(struct device *dev,
253                 struct device_attribute *attr, char *buf)
254 {
255         unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
256
257         return nd_size_select_show(0,
258                         nd_pfn_supported_alignments(aligns), buf);
259 }
260 static DEVICE_ATTR_RO(supported_alignments);
261
262 static struct attribute *nd_pfn_attributes[] = {
263         &dev_attr_mode.attr,
264         &dev_attr_namespace.attr,
265         &dev_attr_uuid.attr,
266         &dev_attr_align.attr,
267         &dev_attr_resource.attr,
268         &dev_attr_size.attr,
269         &dev_attr_supported_alignments.attr,
270         NULL,
271 };
272
273 static struct attribute_group nd_pfn_attribute_group = {
274         .attrs = nd_pfn_attributes,
275 };
276
277 const struct attribute_group *nd_pfn_attribute_groups[] = {
278         &nd_pfn_attribute_group,
279         &nd_device_attribute_group,
280         &nd_numa_attribute_group,
281         NULL,
282 };
283
284 static const struct device_type nd_pfn_device_type = {
285         .name = "nd_pfn",
286         .release = nd_pfn_release,
287         .groups = nd_pfn_attribute_groups,
288 };
289
290 bool is_nd_pfn(struct device *dev)
291 {
292         return dev ? dev->type == &nd_pfn_device_type : false;
293 }
294 EXPORT_SYMBOL(is_nd_pfn);
295
296 static struct lock_class_key nvdimm_pfn_key;
297
298 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
299                 struct nd_namespace_common *ndns)
300 {
301         struct device *dev;
302
303         if (!nd_pfn)
304                 return NULL;
305
306         nd_pfn->mode = PFN_MODE_NONE;
307         nd_pfn->align = nd_pfn_default_alignment();
308         dev = &nd_pfn->dev;
309         device_initialize(&nd_pfn->dev);
310         lockdep_set_class(&nd_pfn->dev.mutex, &nvdimm_pfn_key);
311         if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
312                 dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
313                                 dev_name(ndns->claim));
314                 put_device(dev);
315                 return NULL;
316         }
317         return dev;
318 }
319
320 static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
321 {
322         struct nd_pfn *nd_pfn;
323         struct device *dev;
324
325         nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
326         if (!nd_pfn)
327                 return NULL;
328
329         nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
330         if (nd_pfn->id < 0) {
331                 kfree(nd_pfn);
332                 return NULL;
333         }
334
335         dev = &nd_pfn->dev;
336         dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
337         dev->type = &nd_pfn_device_type;
338         dev->parent = &nd_region->dev;
339
340         return nd_pfn;
341 }
342
343 struct device *nd_pfn_create(struct nd_region *nd_region)
344 {
345         struct nd_pfn *nd_pfn;
346         struct device *dev;
347
348         if (!is_memory(&nd_region->dev))
349                 return NULL;
350
351         nd_pfn = nd_pfn_alloc(nd_region);
352         dev = nd_pfn_devinit(nd_pfn, NULL);
353
354         nd_device_register(dev);
355         return dev;
356 }
357
358 /*
359  * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
360  * space associated with the namespace. If the memmap is set to DRAM, then
361  * this is a no-op. Since the memmap area is freshly initialized during
362  * probe, we have an opportunity to clear any badblocks in this area.
363  */
364 static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
365 {
366         struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
367         struct nd_namespace_common *ndns = nd_pfn->ndns;
368         void *zero_page = page_address(ZERO_PAGE(0));
369         struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
370         int num_bad, meta_num, rc, bb_present;
371         sector_t first_bad, meta_start;
372         struct nd_namespace_io *nsio;
373
374         if (nd_pfn->mode != PFN_MODE_PMEM)
375                 return 0;
376
377         nsio = to_nd_namespace_io(&ndns->dev);
378         meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
379         meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
380
381         /*
382          * re-enable the namespace with correct size so that we can access
383          * the device memmap area.
384          */
385         devm_namespace_disable(&nd_pfn->dev, ndns);
386         rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
387         if (rc)
388                 return rc;
389
390         do {
391                 unsigned long zero_len;
392                 u64 nsoff;
393
394                 bb_present = badblocks_check(&nd_region->bb, meta_start,
395                                 meta_num, &first_bad, &num_bad);
396                 if (bb_present) {
397                         dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
398                                         num_bad, first_bad);
399                         nsoff = ALIGN_DOWN((nd_region->ndr_start
400                                         + (first_bad << 9)) - nsio->res.start,
401                                         PAGE_SIZE);
402                         zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
403                         while (zero_len) {
404                                 unsigned long chunk = min(zero_len, PAGE_SIZE);
405
406                                 rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
407                                                         chunk, 0);
408                                 if (rc)
409                                         break;
410
411                                 zero_len -= chunk;
412                                 nsoff += chunk;
413                         }
414                         if (rc) {
415                                 dev_err(&nd_pfn->dev,
416                                         "error clearing %x badblocks at %llx\n",
417                                         num_bad, first_bad);
418                                 return rc;
419                         }
420                 }
421         } while (bb_present);
422
423         return 0;
424 }
425
426 static bool nd_supported_alignment(unsigned long align)
427 {
428         int i;
429         unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
430
431         if (align == 0)
432                 return false;
433
434         nd_pfn_supported_alignments(supported);
435         for (i = 0; supported[i]; i++)
436                 if (align == supported[i])
437                         return true;
438         return false;
439 }
440
441 /**
442  * nd_pfn_validate - read and validate info-block
443  * @nd_pfn: fsdax namespace runtime state / properties
444  * @sig: 'devdax' or 'fsdax' signature
445  *
446  * Upon return the info-block buffer contents (->pfn_sb) are
447  * indeterminate when validation fails, and a coherent info-block
448  * otherwise.
449  */
450 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
451 {
452         u64 checksum, offset;
453         struct resource *res;
454         enum nd_pfn_mode mode;
455         struct nd_namespace_io *nsio;
456         unsigned long align, start_pad;
457         struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
458         struct nd_namespace_common *ndns = nd_pfn->ndns;
459         const uuid_t *parent_uuid = nd_dev_to_uuid(&ndns->dev);
460
461         if (!pfn_sb || !ndns)
462                 return -ENODEV;
463
464         if (!is_memory(nd_pfn->dev.parent))
465                 return -ENODEV;
466
467         if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
468                 return -ENXIO;
469
470         if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
471                 return -ENODEV;
472
473         checksum = le64_to_cpu(pfn_sb->checksum);
474         pfn_sb->checksum = 0;
475         if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
476                 return -ENODEV;
477         pfn_sb->checksum = cpu_to_le64(checksum);
478
479         if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
480                 return -ENODEV;
481
482         if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
483                 pfn_sb->start_pad = 0;
484                 pfn_sb->end_trunc = 0;
485         }
486
487         if (__le16_to_cpu(pfn_sb->version_minor) < 2)
488                 pfn_sb->align = 0;
489
490         if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
491                 pfn_sb->page_struct_size = cpu_to_le16(64);
492                 pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
493         }
494
495         switch (le32_to_cpu(pfn_sb->mode)) {
496         case PFN_MODE_RAM:
497         case PFN_MODE_PMEM:
498                 break;
499         default:
500                 return -ENXIO;
501         }
502
503         align = le32_to_cpu(pfn_sb->align);
504         offset = le64_to_cpu(pfn_sb->dataoff);
505         start_pad = le32_to_cpu(pfn_sb->start_pad);
506         if (align == 0)
507                 align = 1UL << ilog2(offset);
508         mode = le32_to_cpu(pfn_sb->mode);
509
510         if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
511                         (mode == PFN_MODE_PMEM)) {
512                 dev_err(&nd_pfn->dev,
513                                 "init failed, page size mismatch %d\n",
514                                 le32_to_cpu(pfn_sb->page_size));
515                 return -EOPNOTSUPP;
516         }
517
518         if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
519                         (mode == PFN_MODE_PMEM)) {
520                 dev_err(&nd_pfn->dev,
521                                 "init failed, struct page size mismatch %d\n",
522                                 le16_to_cpu(pfn_sb->page_struct_size));
523                 return -EOPNOTSUPP;
524         }
525
526         /*
527          * Check whether the we support the alignment. For Dax if the
528          * superblock alignment is not matching, we won't initialize
529          * the device.
530          */
531         if (!nd_supported_alignment(align) &&
532                         !memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
533                 dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
534                                 "%ld:%ld\n", nd_pfn->align, align);
535                 return -EOPNOTSUPP;
536         }
537
538         if (!nd_pfn->uuid) {
539                 /*
540                  * When probing a namepace via nd_pfn_probe() the uuid
541                  * is NULL (see: nd_pfn_devinit()) we init settings from
542                  * pfn_sb
543                  */
544                 nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
545                 if (!nd_pfn->uuid)
546                         return -ENOMEM;
547                 nd_pfn->align = align;
548                 nd_pfn->mode = mode;
549         } else {
550                 /*
551                  * When probing a pfn / dax instance we validate the
552                  * live settings against the pfn_sb
553                  */
554                 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
555                         return -ENODEV;
556
557                 /*
558                  * If the uuid validates, but other settings mismatch
559                  * return EINVAL because userspace has managed to change
560                  * the configuration without specifying new
561                  * identification.
562                  */
563                 if (nd_pfn->align != align || nd_pfn->mode != mode) {
564                         dev_err(&nd_pfn->dev,
565                                         "init failed, settings mismatch\n");
566                         dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
567                                         nd_pfn->align, align, nd_pfn->mode,
568                                         mode);
569                         return -EOPNOTSUPP;
570                 }
571         }
572
573         if (align > nvdimm_namespace_capacity(ndns)) {
574                 dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
575                                 align, nvdimm_namespace_capacity(ndns));
576                 return -EOPNOTSUPP;
577         }
578
579         /*
580          * These warnings are verbose because they can only trigger in
581          * the case where the physical address alignment of the
582          * namespace has changed since the pfn superblock was
583          * established.
584          */
585         nsio = to_nd_namespace_io(&ndns->dev);
586         res = &nsio->res;
587         if (offset >= resource_size(res)) {
588                 dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
589                                 dev_name(&ndns->dev));
590                 return -EOPNOTSUPP;
591         }
592
593         if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
594                         || !IS_ALIGNED(offset, PAGE_SIZE)) {
595                 dev_err(&nd_pfn->dev,
596                                 "bad offset: %#llx dax disabled align: %#lx\n",
597                                 offset, align);
598                 return -EOPNOTSUPP;
599         }
600
601         if (!IS_ALIGNED(res->start + le32_to_cpu(pfn_sb->start_pad),
602                                 memremap_compat_align())) {
603                 dev_err(&nd_pfn->dev, "resource start misaligned\n");
604                 return -EOPNOTSUPP;
605         }
606
607         if (!IS_ALIGNED(res->end + 1 - le32_to_cpu(pfn_sb->end_trunc),
608                                 memremap_compat_align())) {
609                 dev_err(&nd_pfn->dev, "resource end misaligned\n");
610                 return -EOPNOTSUPP;
611         }
612
613         return 0;
614 }
615 EXPORT_SYMBOL(nd_pfn_validate);
616
617 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
618 {
619         int rc;
620         struct nd_pfn *nd_pfn;
621         struct device *pfn_dev;
622         struct nd_pfn_sb *pfn_sb;
623         struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
624
625         if (ndns->force_raw)
626                 return -ENODEV;
627
628         switch (ndns->claim_class) {
629         case NVDIMM_CCLASS_NONE:
630         case NVDIMM_CCLASS_PFN:
631                 break;
632         default:
633                 return -ENODEV;
634         }
635
636         nvdimm_bus_lock(&ndns->dev);
637         nd_pfn = nd_pfn_alloc(nd_region);
638         pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
639         nvdimm_bus_unlock(&ndns->dev);
640         if (!pfn_dev)
641                 return -ENOMEM;
642         pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
643         nd_pfn = to_nd_pfn(pfn_dev);
644         nd_pfn->pfn_sb = pfn_sb;
645         rc = nd_pfn_validate(nd_pfn, PFN_SIG);
646         dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
647         if (rc < 0) {
648                 nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
649                 put_device(pfn_dev);
650         } else
651                 nd_device_register(pfn_dev);
652
653         return rc;
654 }
655 EXPORT_SYMBOL(nd_pfn_probe);
656
657 /*
658  * We hotplug memory at sub-section granularity, pad the reserved area
659  * from the previous section base to the namespace base address.
660  */
661 static unsigned long init_altmap_base(resource_size_t base)
662 {
663         unsigned long base_pfn = PHYS_PFN(base);
664
665         return SUBSECTION_ALIGN_DOWN(base_pfn);
666 }
667
668 static unsigned long init_altmap_reserve(resource_size_t base)
669 {
670         unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
671         unsigned long base_pfn = PHYS_PFN(base);
672
673         reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
674         return reserve;
675 }
676
677 static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
678 {
679         struct range *range = &pgmap->range;
680         struct vmem_altmap *altmap = &pgmap->altmap;
681         struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
682         u64 offset = le64_to_cpu(pfn_sb->dataoff);
683         u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
684         u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
685         u32 reserve = nd_info_block_reserve();
686         struct nd_namespace_common *ndns = nd_pfn->ndns;
687         struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
688         resource_size_t base = nsio->res.start + start_pad;
689         resource_size_t end = nsio->res.end - end_trunc;
690         struct vmem_altmap __altmap = {
691                 .base_pfn = init_altmap_base(base),
692                 .reserve = init_altmap_reserve(base),
693                 .end_pfn = PHYS_PFN(end),
694         };
695
696         *range = (struct range) {
697                 .start = nsio->res.start + start_pad,
698                 .end = nsio->res.end - end_trunc,
699         };
700         pgmap->nr_range = 1;
701         if (nd_pfn->mode == PFN_MODE_RAM) {
702                 if (offset < reserve)
703                         return -EINVAL;
704                 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
705         } else if (nd_pfn->mode == PFN_MODE_PMEM) {
706                 nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
707                 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
708                         dev_info(&nd_pfn->dev,
709                                         "number of pfns truncated from %lld to %ld\n",
710                                         le64_to_cpu(nd_pfn->pfn_sb->npfns),
711                                         nd_pfn->npfns);
712                 memcpy(altmap, &__altmap, sizeof(*altmap));
713                 altmap->free = PHYS_PFN(offset - reserve);
714                 altmap->alloc = 0;
715                 pgmap->flags |= PGMAP_ALTMAP_VALID;
716         } else
717                 return -ENXIO;
718
719         return 0;
720 }
721
722 static int nd_pfn_init(struct nd_pfn *nd_pfn)
723 {
724         struct nd_namespace_common *ndns = nd_pfn->ndns;
725         struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
726         resource_size_t start, size;
727         struct nd_region *nd_region;
728         unsigned long npfns, align;
729         u32 end_trunc;
730         struct nd_pfn_sb *pfn_sb;
731         phys_addr_t offset;
732         const char *sig;
733         u64 checksum;
734         int rc;
735
736         pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
737         if (!pfn_sb)
738                 return -ENOMEM;
739
740         nd_pfn->pfn_sb = pfn_sb;
741         if (is_nd_dax(&nd_pfn->dev))
742                 sig = DAX_SIG;
743         else
744                 sig = PFN_SIG;
745
746         rc = nd_pfn_validate(nd_pfn, sig);
747         if (rc == 0)
748                 return nd_pfn_clear_memmap_errors(nd_pfn);
749         if (rc != -ENODEV)
750                 return rc;
751
752         /* no info block, do init */;
753         memset(pfn_sb, 0, sizeof(*pfn_sb));
754
755         nd_region = to_nd_region(nd_pfn->dev.parent);
756         if (nd_region->ro) {
757                 dev_info(&nd_pfn->dev,
758                                 "%s is read-only, unable to init metadata\n",
759                                 dev_name(&nd_region->dev));
760                 return -ENXIO;
761         }
762
763         start = nsio->res.start;
764         size = resource_size(&nsio->res);
765         npfns = PHYS_PFN(size - SZ_8K);
766         align = max(nd_pfn->align, memremap_compat_align());
767
768         /*
769          * When @start is misaligned fail namespace creation. See
770          * the 'struct nd_pfn_sb' commentary on why ->start_pad is not
771          * an option.
772          */
773         if (!IS_ALIGNED(start, memremap_compat_align())) {
774                 dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
775                                 dev_name(&ndns->dev), &start,
776                                 memremap_compat_align());
777                 return -EINVAL;
778         }
779         end_trunc = start + size - ALIGN_DOWN(start + size, align);
780         if (nd_pfn->mode == PFN_MODE_PMEM) {
781                 unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
782
783                 /*
784                  * The altmap should be padded out to the block size used
785                  * when populating the vmemmap. This *should* be equal to
786                  * PMD_SIZE for most architectures.
787                  *
788                  * Also make sure size of struct page is less than
789                  * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
790                  * face of production kernel configurations that reduce the
791                  * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
792                  * kernel configurations that increase the 'struct page' size
793                  * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
794                  * for continuing with the capacity that will be wasted when
795                  * reverting to a production kernel configuration. Otherwise,
796                  * those configurations are blocked by default.
797                  */
798                 if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
799                         if (page_struct_override)
800                                 page_map_size = sizeof(struct page) * npfns;
801                         else {
802                                 dev_err(&nd_pfn->dev,
803                                         "Memory debug options prevent using pmem for the page map\n");
804                                 return -EINVAL;
805                         }
806                 }
807                 offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
808         } else if (nd_pfn->mode == PFN_MODE_RAM)
809                 offset = ALIGN(start + SZ_8K, align) - start;
810         else
811                 return -ENXIO;
812
813         if (offset >= size) {
814                 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
815                                 dev_name(&ndns->dev));
816                 return -ENXIO;
817         }
818
819         npfns = PHYS_PFN(size - offset - end_trunc);
820         pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
821         pfn_sb->dataoff = cpu_to_le64(offset);
822         pfn_sb->npfns = cpu_to_le64(npfns);
823         memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
824         memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
825         memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
826         pfn_sb->version_major = cpu_to_le16(1);
827         pfn_sb->version_minor = cpu_to_le16(4);
828         pfn_sb->end_trunc = cpu_to_le32(end_trunc);
829         pfn_sb->align = cpu_to_le32(nd_pfn->align);
830         if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
831                 pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
832         else
833                 pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
834         pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
835         checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
836         pfn_sb->checksum = cpu_to_le64(checksum);
837
838         rc = nd_pfn_clear_memmap_errors(nd_pfn);
839         if (rc)
840                 return rc;
841
842         return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
843 }
844
845 /*
846  * Determine the effective resource range and vmem_altmap from an nd_pfn
847  * instance.
848  */
849 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
850 {
851         int rc;
852
853         if (!nd_pfn->uuid || !nd_pfn->ndns)
854                 return -ENODEV;
855
856         rc = nd_pfn_init(nd_pfn);
857         if (rc)
858                 return rc;
859
860         /* we need a valid pfn_sb before we can init a dev_pagemap */
861         return __nvdimm_setup_pfn(nd_pfn, pgmap);
862 }
863 EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);