libnvdimm, region: Use struct_size() in kzalloc()
[linux-2.6-microblaze.git] / drivers / nvdimm / region_devs.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/hash.h>
10 #include <linux/sort.h>
11 #include <linux/io.h>
12 #include <linux/nd.h>
13 #include "nd-core.h"
14 #include "nd.h"
15
16 /*
17  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
18  * irrelevant.
19  */
20 #include <linux/io-64-nonatomic-hi-lo.h>
21
22 static DEFINE_IDA(region_ida);
23 static DEFINE_PER_CPU(int, flush_idx);
24
25 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
26                 struct nd_region_data *ndrd)
27 {
28         int i, j;
29
30         dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
31                         nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
32         for (i = 0; i < (1 << ndrd->hints_shift); i++) {
33                 struct resource *res = &nvdimm->flush_wpq[i];
34                 unsigned long pfn = PHYS_PFN(res->start);
35                 void __iomem *flush_page;
36
37                 /* check if flush hints share a page */
38                 for (j = 0; j < i; j++) {
39                         struct resource *res_j = &nvdimm->flush_wpq[j];
40                         unsigned long pfn_j = PHYS_PFN(res_j->start);
41
42                         if (pfn == pfn_j)
43                                 break;
44                 }
45
46                 if (j < i)
47                         flush_page = (void __iomem *) ((unsigned long)
48                                         ndrd_get_flush_wpq(ndrd, dimm, j)
49                                         & PAGE_MASK);
50                 else
51                         flush_page = devm_nvdimm_ioremap(dev,
52                                         PFN_PHYS(pfn), PAGE_SIZE);
53                 if (!flush_page)
54                         return -ENXIO;
55                 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
56                                 + (res->start & ~PAGE_MASK));
57         }
58
59         return 0;
60 }
61
62 int nd_region_activate(struct nd_region *nd_region)
63 {
64         int i, j, num_flush = 0;
65         struct nd_region_data *ndrd;
66         struct device *dev = &nd_region->dev;
67         size_t flush_data_size = sizeof(void *);
68
69         nvdimm_bus_lock(&nd_region->dev);
70         for (i = 0; i < nd_region->ndr_mappings; i++) {
71                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
72                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
73
74                 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
75                         nvdimm_bus_unlock(&nd_region->dev);
76                         return -EBUSY;
77                 }
78
79                 /* at least one null hint slot per-dimm for the "no-hint" case */
80                 flush_data_size += sizeof(void *);
81                 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
82                 if (!nvdimm->num_flush)
83                         continue;
84                 flush_data_size += nvdimm->num_flush * sizeof(void *);
85         }
86         nvdimm_bus_unlock(&nd_region->dev);
87
88         ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
89         if (!ndrd)
90                 return -ENOMEM;
91         dev_set_drvdata(dev, ndrd);
92
93         if (!num_flush)
94                 return 0;
95
96         ndrd->hints_shift = ilog2(num_flush);
97         for (i = 0; i < nd_region->ndr_mappings; i++) {
98                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
99                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
100                 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
101
102                 if (rc)
103                         return rc;
104         }
105
106         /*
107          * Clear out entries that are duplicates. This should prevent the
108          * extra flushings.
109          */
110         for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
111                 /* ignore if NULL already */
112                 if (!ndrd_get_flush_wpq(ndrd, i, 0))
113                         continue;
114
115                 for (j = i + 1; j < nd_region->ndr_mappings; j++)
116                         if (ndrd_get_flush_wpq(ndrd, i, 0) ==
117                             ndrd_get_flush_wpq(ndrd, j, 0))
118                                 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
119         }
120
121         return 0;
122 }
123
124 static void nd_region_release(struct device *dev)
125 {
126         struct nd_region *nd_region = to_nd_region(dev);
127         u16 i;
128
129         for (i = 0; i < nd_region->ndr_mappings; i++) {
130                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
131                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
132
133                 put_device(&nvdimm->dev);
134         }
135         free_percpu(nd_region->lane);
136         ida_simple_remove(&region_ida, nd_region->id);
137         if (is_nd_blk(dev))
138                 kfree(to_nd_blk_region(dev));
139         else
140                 kfree(nd_region);
141 }
142
143 static struct device_type nd_blk_device_type = {
144         .name = "nd_blk",
145         .release = nd_region_release,
146 };
147
148 static struct device_type nd_pmem_device_type = {
149         .name = "nd_pmem",
150         .release = nd_region_release,
151 };
152
153 static struct device_type nd_volatile_device_type = {
154         .name = "nd_volatile",
155         .release = nd_region_release,
156 };
157
158 bool is_nd_pmem(struct device *dev)
159 {
160         return dev ? dev->type == &nd_pmem_device_type : false;
161 }
162
163 bool is_nd_blk(struct device *dev)
164 {
165         return dev ? dev->type == &nd_blk_device_type : false;
166 }
167
168 bool is_nd_volatile(struct device *dev)
169 {
170         return dev ? dev->type == &nd_volatile_device_type : false;
171 }
172
173 struct nd_region *to_nd_region(struct device *dev)
174 {
175         struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
176
177         WARN_ON(dev->type->release != nd_region_release);
178         return nd_region;
179 }
180 EXPORT_SYMBOL_GPL(to_nd_region);
181
182 struct device *nd_region_dev(struct nd_region *nd_region)
183 {
184         if (!nd_region)
185                 return NULL;
186         return &nd_region->dev;
187 }
188 EXPORT_SYMBOL_GPL(nd_region_dev);
189
190 struct nd_blk_region *to_nd_blk_region(struct device *dev)
191 {
192         struct nd_region *nd_region = to_nd_region(dev);
193
194         WARN_ON(!is_nd_blk(dev));
195         return container_of(nd_region, struct nd_blk_region, nd_region);
196 }
197 EXPORT_SYMBOL_GPL(to_nd_blk_region);
198
199 void *nd_region_provider_data(struct nd_region *nd_region)
200 {
201         return nd_region->provider_data;
202 }
203 EXPORT_SYMBOL_GPL(nd_region_provider_data);
204
205 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
206 {
207         return ndbr->blk_provider_data;
208 }
209 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
210
211 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
212 {
213         ndbr->blk_provider_data = data;
214 }
215 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
216
217 /**
218  * nd_region_to_nstype() - region to an integer namespace type
219  * @nd_region: region-device to interrogate
220  *
221  * This is the 'nstype' attribute of a region as well, an input to the
222  * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
223  * namespace devices with namespace drivers.
224  */
225 int nd_region_to_nstype(struct nd_region *nd_region)
226 {
227         if (is_memory(&nd_region->dev)) {
228                 u16 i, alias;
229
230                 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
231                         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
232                         struct nvdimm *nvdimm = nd_mapping->nvdimm;
233
234                         if (test_bit(NDD_ALIASING, &nvdimm->flags))
235                                 alias++;
236                 }
237                 if (alias)
238                         return ND_DEVICE_NAMESPACE_PMEM;
239                 else
240                         return ND_DEVICE_NAMESPACE_IO;
241         } else if (is_nd_blk(&nd_region->dev)) {
242                 return ND_DEVICE_NAMESPACE_BLK;
243         }
244
245         return 0;
246 }
247 EXPORT_SYMBOL(nd_region_to_nstype);
248
249 static ssize_t size_show(struct device *dev,
250                 struct device_attribute *attr, char *buf)
251 {
252         struct nd_region *nd_region = to_nd_region(dev);
253         unsigned long long size = 0;
254
255         if (is_memory(dev)) {
256                 size = nd_region->ndr_size;
257         } else if (nd_region->ndr_mappings == 1) {
258                 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
259
260                 size = nd_mapping->size;
261         }
262
263         return sprintf(buf, "%llu\n", size);
264 }
265 static DEVICE_ATTR_RO(size);
266
267 static ssize_t deep_flush_show(struct device *dev,
268                 struct device_attribute *attr, char *buf)
269 {
270         struct nd_region *nd_region = to_nd_region(dev);
271
272         /*
273          * NOTE: in the nvdimm_has_flush() error case this attribute is
274          * not visible.
275          */
276         return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
277 }
278
279 static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
280                 const char *buf, size_t len)
281 {
282         bool flush;
283         int rc = strtobool(buf, &flush);
284         struct nd_region *nd_region = to_nd_region(dev);
285
286         if (rc)
287                 return rc;
288         if (!flush)
289                 return -EINVAL;
290         rc = nvdimm_flush(nd_region, NULL);
291         if (rc)
292                 return rc;
293
294         return len;
295 }
296 static DEVICE_ATTR_RW(deep_flush);
297
298 static ssize_t mappings_show(struct device *dev,
299                 struct device_attribute *attr, char *buf)
300 {
301         struct nd_region *nd_region = to_nd_region(dev);
302
303         return sprintf(buf, "%d\n", nd_region->ndr_mappings);
304 }
305 static DEVICE_ATTR_RO(mappings);
306
307 static ssize_t nstype_show(struct device *dev,
308                 struct device_attribute *attr, char *buf)
309 {
310         struct nd_region *nd_region = to_nd_region(dev);
311
312         return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
313 }
314 static DEVICE_ATTR_RO(nstype);
315
316 static ssize_t set_cookie_show(struct device *dev,
317                 struct device_attribute *attr, char *buf)
318 {
319         struct nd_region *nd_region = to_nd_region(dev);
320         struct nd_interleave_set *nd_set = nd_region->nd_set;
321         ssize_t rc = 0;
322
323         if (is_memory(dev) && nd_set)
324                 /* pass, should be precluded by region_visible */;
325         else
326                 return -ENXIO;
327
328         /*
329          * The cookie to show depends on which specification of the
330          * labels we are using. If there are not labels then default to
331          * the v1.1 namespace label cookie definition. To read all this
332          * data we need to wait for probing to settle.
333          */
334         nd_device_lock(dev);
335         nvdimm_bus_lock(dev);
336         wait_nvdimm_bus_probe_idle(dev);
337         if (nd_region->ndr_mappings) {
338                 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
339                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
340
341                 if (ndd) {
342                         struct nd_namespace_index *nsindex;
343
344                         nsindex = to_namespace_index(ndd, ndd->ns_current);
345                         rc = sprintf(buf, "%#llx\n",
346                                         nd_region_interleave_set_cookie(nd_region,
347                                                 nsindex));
348                 }
349         }
350         nvdimm_bus_unlock(dev);
351         nd_device_unlock(dev);
352
353         if (rc)
354                 return rc;
355         return sprintf(buf, "%#llx\n", nd_set->cookie1);
356 }
357 static DEVICE_ATTR_RO(set_cookie);
358
359 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
360 {
361         resource_size_t blk_max_overlap = 0, available, overlap;
362         int i;
363
364         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
365
366  retry:
367         available = 0;
368         overlap = blk_max_overlap;
369         for (i = 0; i < nd_region->ndr_mappings; i++) {
370                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
371                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
372
373                 /* if a dimm is disabled the available capacity is zero */
374                 if (!ndd)
375                         return 0;
376
377                 if (is_memory(&nd_region->dev)) {
378                         available += nd_pmem_available_dpa(nd_region,
379                                         nd_mapping, &overlap);
380                         if (overlap > blk_max_overlap) {
381                                 blk_max_overlap = overlap;
382                                 goto retry;
383                         }
384                 } else if (is_nd_blk(&nd_region->dev))
385                         available += nd_blk_available_dpa(nd_region);
386         }
387
388         return available;
389 }
390
391 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
392 {
393         resource_size_t available = 0;
394         int i;
395
396         if (is_memory(&nd_region->dev))
397                 available = PHYS_ADDR_MAX;
398
399         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
400         for (i = 0; i < nd_region->ndr_mappings; i++) {
401                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
402
403                 if (is_memory(&nd_region->dev))
404                         available = min(available,
405                                         nd_pmem_max_contiguous_dpa(nd_region,
406                                                                    nd_mapping));
407                 else if (is_nd_blk(&nd_region->dev))
408                         available += nd_blk_available_dpa(nd_region);
409         }
410         if (is_memory(&nd_region->dev))
411                 return available * nd_region->ndr_mappings;
412         return available;
413 }
414
415 static ssize_t available_size_show(struct device *dev,
416                 struct device_attribute *attr, char *buf)
417 {
418         struct nd_region *nd_region = to_nd_region(dev);
419         unsigned long long available = 0;
420
421         /*
422          * Flush in-flight updates and grab a snapshot of the available
423          * size.  Of course, this value is potentially invalidated the
424          * memory nvdimm_bus_lock() is dropped, but that's userspace's
425          * problem to not race itself.
426          */
427         nd_device_lock(dev);
428         nvdimm_bus_lock(dev);
429         wait_nvdimm_bus_probe_idle(dev);
430         available = nd_region_available_dpa(nd_region);
431         nvdimm_bus_unlock(dev);
432         nd_device_unlock(dev);
433
434         return sprintf(buf, "%llu\n", available);
435 }
436 static DEVICE_ATTR_RO(available_size);
437
438 static ssize_t max_available_extent_show(struct device *dev,
439                 struct device_attribute *attr, char *buf)
440 {
441         struct nd_region *nd_region = to_nd_region(dev);
442         unsigned long long available = 0;
443
444         nd_device_lock(dev);
445         nvdimm_bus_lock(dev);
446         wait_nvdimm_bus_probe_idle(dev);
447         available = nd_region_allocatable_dpa(nd_region);
448         nvdimm_bus_unlock(dev);
449         nd_device_unlock(dev);
450
451         return sprintf(buf, "%llu\n", available);
452 }
453 static DEVICE_ATTR_RO(max_available_extent);
454
455 static ssize_t init_namespaces_show(struct device *dev,
456                 struct device_attribute *attr, char *buf)
457 {
458         struct nd_region_data *ndrd = dev_get_drvdata(dev);
459         ssize_t rc;
460
461         nvdimm_bus_lock(dev);
462         if (ndrd)
463                 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
464         else
465                 rc = -ENXIO;
466         nvdimm_bus_unlock(dev);
467
468         return rc;
469 }
470 static DEVICE_ATTR_RO(init_namespaces);
471
472 static ssize_t namespace_seed_show(struct device *dev,
473                 struct device_attribute *attr, char *buf)
474 {
475         struct nd_region *nd_region = to_nd_region(dev);
476         ssize_t rc;
477
478         nvdimm_bus_lock(dev);
479         if (nd_region->ns_seed)
480                 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
481         else
482                 rc = sprintf(buf, "\n");
483         nvdimm_bus_unlock(dev);
484         return rc;
485 }
486 static DEVICE_ATTR_RO(namespace_seed);
487
488 static ssize_t btt_seed_show(struct device *dev,
489                 struct device_attribute *attr, char *buf)
490 {
491         struct nd_region *nd_region = to_nd_region(dev);
492         ssize_t rc;
493
494         nvdimm_bus_lock(dev);
495         if (nd_region->btt_seed)
496                 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
497         else
498                 rc = sprintf(buf, "\n");
499         nvdimm_bus_unlock(dev);
500
501         return rc;
502 }
503 static DEVICE_ATTR_RO(btt_seed);
504
505 static ssize_t pfn_seed_show(struct device *dev,
506                 struct device_attribute *attr, char *buf)
507 {
508         struct nd_region *nd_region = to_nd_region(dev);
509         ssize_t rc;
510
511         nvdimm_bus_lock(dev);
512         if (nd_region->pfn_seed)
513                 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
514         else
515                 rc = sprintf(buf, "\n");
516         nvdimm_bus_unlock(dev);
517
518         return rc;
519 }
520 static DEVICE_ATTR_RO(pfn_seed);
521
522 static ssize_t dax_seed_show(struct device *dev,
523                 struct device_attribute *attr, char *buf)
524 {
525         struct nd_region *nd_region = to_nd_region(dev);
526         ssize_t rc;
527
528         nvdimm_bus_lock(dev);
529         if (nd_region->dax_seed)
530                 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
531         else
532                 rc = sprintf(buf, "\n");
533         nvdimm_bus_unlock(dev);
534
535         return rc;
536 }
537 static DEVICE_ATTR_RO(dax_seed);
538
539 static ssize_t read_only_show(struct device *dev,
540                 struct device_attribute *attr, char *buf)
541 {
542         struct nd_region *nd_region = to_nd_region(dev);
543
544         return sprintf(buf, "%d\n", nd_region->ro);
545 }
546
547 static ssize_t read_only_store(struct device *dev,
548                 struct device_attribute *attr, const char *buf, size_t len)
549 {
550         bool ro;
551         int rc = strtobool(buf, &ro);
552         struct nd_region *nd_region = to_nd_region(dev);
553
554         if (rc)
555                 return rc;
556
557         nd_region->ro = ro;
558         return len;
559 }
560 static DEVICE_ATTR_RW(read_only);
561
562 static ssize_t region_badblocks_show(struct device *dev,
563                 struct device_attribute *attr, char *buf)
564 {
565         struct nd_region *nd_region = to_nd_region(dev);
566         ssize_t rc;
567
568         nd_device_lock(dev);
569         if (dev->driver)
570                 rc = badblocks_show(&nd_region->bb, buf, 0);
571         else
572                 rc = -ENXIO;
573         nd_device_unlock(dev);
574
575         return rc;
576 }
577 static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
578
579 static ssize_t resource_show(struct device *dev,
580                 struct device_attribute *attr, char *buf)
581 {
582         struct nd_region *nd_region = to_nd_region(dev);
583
584         return sprintf(buf, "%#llx\n", nd_region->ndr_start);
585 }
586 static DEVICE_ATTR_RO(resource);
587
588 static ssize_t persistence_domain_show(struct device *dev,
589                 struct device_attribute *attr, char *buf)
590 {
591         struct nd_region *nd_region = to_nd_region(dev);
592
593         if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
594                 return sprintf(buf, "cpu_cache\n");
595         else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
596                 return sprintf(buf, "memory_controller\n");
597         else
598                 return sprintf(buf, "\n");
599 }
600 static DEVICE_ATTR_RO(persistence_domain);
601
602 static struct attribute *nd_region_attributes[] = {
603         &dev_attr_size.attr,
604         &dev_attr_nstype.attr,
605         &dev_attr_mappings.attr,
606         &dev_attr_btt_seed.attr,
607         &dev_attr_pfn_seed.attr,
608         &dev_attr_dax_seed.attr,
609         &dev_attr_deep_flush.attr,
610         &dev_attr_read_only.attr,
611         &dev_attr_set_cookie.attr,
612         &dev_attr_available_size.attr,
613         &dev_attr_max_available_extent.attr,
614         &dev_attr_namespace_seed.attr,
615         &dev_attr_init_namespaces.attr,
616         &dev_attr_badblocks.attr,
617         &dev_attr_resource.attr,
618         &dev_attr_persistence_domain.attr,
619         NULL,
620 };
621
622 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
623 {
624         struct device *dev = container_of(kobj, typeof(*dev), kobj);
625         struct nd_region *nd_region = to_nd_region(dev);
626         struct nd_interleave_set *nd_set = nd_region->nd_set;
627         int type = nd_region_to_nstype(nd_region);
628
629         if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
630                 return 0;
631
632         if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
633                 return 0;
634
635         if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
636                 return 0;
637
638         if (a == &dev_attr_resource.attr) {
639                 if (is_nd_pmem(dev))
640                         return 0400;
641                 else
642                         return 0;
643         }
644
645         if (a == &dev_attr_deep_flush.attr) {
646                 int has_flush = nvdimm_has_flush(nd_region);
647
648                 if (has_flush == 1)
649                         return a->mode;
650                 else if (has_flush == 0)
651                         return 0444;
652                 else
653                         return 0;
654         }
655
656         if (a == &dev_attr_persistence_domain.attr) {
657                 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
658                                         | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
659                         return 0;
660                 return a->mode;
661         }
662
663         if (a != &dev_attr_set_cookie.attr
664                         && a != &dev_attr_available_size.attr)
665                 return a->mode;
666
667         if ((type == ND_DEVICE_NAMESPACE_PMEM
668                                 || type == ND_DEVICE_NAMESPACE_BLK)
669                         && a == &dev_attr_available_size.attr)
670                 return a->mode;
671         else if (is_memory(dev) && nd_set)
672                 return a->mode;
673
674         return 0;
675 }
676
677 struct attribute_group nd_region_attribute_group = {
678         .attrs = nd_region_attributes,
679         .is_visible = region_visible,
680 };
681 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
682
683 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
684                 struct nd_namespace_index *nsindex)
685 {
686         struct nd_interleave_set *nd_set = nd_region->nd_set;
687
688         if (!nd_set)
689                 return 0;
690
691         if (nsindex && __le16_to_cpu(nsindex->major) == 1
692                         && __le16_to_cpu(nsindex->minor) == 1)
693                 return nd_set->cookie1;
694         return nd_set->cookie2;
695 }
696
697 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
698 {
699         struct nd_interleave_set *nd_set = nd_region->nd_set;
700
701         if (nd_set)
702                 return nd_set->altcookie;
703         return 0;
704 }
705
706 void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
707 {
708         struct nd_label_ent *label_ent, *e;
709
710         lockdep_assert_held(&nd_mapping->lock);
711         list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
712                 list_del(&label_ent->list);
713                 kfree(label_ent);
714         }
715 }
716
717 /*
718  * Upon successful probe/remove, take/release a reference on the
719  * associated interleave set (if present), and plant new btt + namespace
720  * seeds.  Also, on the removal of a BLK region, notify the provider to
721  * disable the region.
722  */
723 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
724                 struct device *dev, bool probe)
725 {
726         struct nd_region *nd_region;
727
728         if (!probe && is_nd_region(dev)) {
729                 int i;
730
731                 nd_region = to_nd_region(dev);
732                 for (i = 0; i < nd_region->ndr_mappings; i++) {
733                         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
734                         struct nvdimm_drvdata *ndd = nd_mapping->ndd;
735                         struct nvdimm *nvdimm = nd_mapping->nvdimm;
736
737                         mutex_lock(&nd_mapping->lock);
738                         nd_mapping_free_labels(nd_mapping);
739                         mutex_unlock(&nd_mapping->lock);
740
741                         put_ndd(ndd);
742                         nd_mapping->ndd = NULL;
743                         if (ndd)
744                                 atomic_dec(&nvdimm->busy);
745                 }
746         }
747         if (dev->parent && is_nd_region(dev->parent) && probe) {
748                 nd_region = to_nd_region(dev->parent);
749                 nvdimm_bus_lock(dev);
750                 if (nd_region->ns_seed == dev)
751                         nd_region_create_ns_seed(nd_region);
752                 nvdimm_bus_unlock(dev);
753         }
754         if (is_nd_btt(dev) && probe) {
755                 struct nd_btt *nd_btt = to_nd_btt(dev);
756
757                 nd_region = to_nd_region(dev->parent);
758                 nvdimm_bus_lock(dev);
759                 if (nd_region->btt_seed == dev)
760                         nd_region_create_btt_seed(nd_region);
761                 if (nd_region->ns_seed == &nd_btt->ndns->dev)
762                         nd_region_create_ns_seed(nd_region);
763                 nvdimm_bus_unlock(dev);
764         }
765         if (is_nd_pfn(dev) && probe) {
766                 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
767
768                 nd_region = to_nd_region(dev->parent);
769                 nvdimm_bus_lock(dev);
770                 if (nd_region->pfn_seed == dev)
771                         nd_region_create_pfn_seed(nd_region);
772                 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
773                         nd_region_create_ns_seed(nd_region);
774                 nvdimm_bus_unlock(dev);
775         }
776         if (is_nd_dax(dev) && probe) {
777                 struct nd_dax *nd_dax = to_nd_dax(dev);
778
779                 nd_region = to_nd_region(dev->parent);
780                 nvdimm_bus_lock(dev);
781                 if (nd_region->dax_seed == dev)
782                         nd_region_create_dax_seed(nd_region);
783                 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
784                         nd_region_create_ns_seed(nd_region);
785                 nvdimm_bus_unlock(dev);
786         }
787 }
788
789 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
790 {
791         nd_region_notify_driver_action(nvdimm_bus, dev, true);
792 }
793
794 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
795 {
796         nd_region_notify_driver_action(nvdimm_bus, dev, false);
797 }
798
799 static ssize_t mappingN(struct device *dev, char *buf, int n)
800 {
801         struct nd_region *nd_region = to_nd_region(dev);
802         struct nd_mapping *nd_mapping;
803         struct nvdimm *nvdimm;
804
805         if (n >= nd_region->ndr_mappings)
806                 return -ENXIO;
807         nd_mapping = &nd_region->mapping[n];
808         nvdimm = nd_mapping->nvdimm;
809
810         return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
811                         nd_mapping->start, nd_mapping->size,
812                         nd_mapping->position);
813 }
814
815 #define REGION_MAPPING(idx) \
816 static ssize_t mapping##idx##_show(struct device *dev,          \
817                 struct device_attribute *attr, char *buf)       \
818 {                                                               \
819         return mappingN(dev, buf, idx);                         \
820 }                                                               \
821 static DEVICE_ATTR_RO(mapping##idx)
822
823 /*
824  * 32 should be enough for a while, even in the presence of socket
825  * interleave a 32-way interleave set is a degenerate case.
826  */
827 REGION_MAPPING(0);
828 REGION_MAPPING(1);
829 REGION_MAPPING(2);
830 REGION_MAPPING(3);
831 REGION_MAPPING(4);
832 REGION_MAPPING(5);
833 REGION_MAPPING(6);
834 REGION_MAPPING(7);
835 REGION_MAPPING(8);
836 REGION_MAPPING(9);
837 REGION_MAPPING(10);
838 REGION_MAPPING(11);
839 REGION_MAPPING(12);
840 REGION_MAPPING(13);
841 REGION_MAPPING(14);
842 REGION_MAPPING(15);
843 REGION_MAPPING(16);
844 REGION_MAPPING(17);
845 REGION_MAPPING(18);
846 REGION_MAPPING(19);
847 REGION_MAPPING(20);
848 REGION_MAPPING(21);
849 REGION_MAPPING(22);
850 REGION_MAPPING(23);
851 REGION_MAPPING(24);
852 REGION_MAPPING(25);
853 REGION_MAPPING(26);
854 REGION_MAPPING(27);
855 REGION_MAPPING(28);
856 REGION_MAPPING(29);
857 REGION_MAPPING(30);
858 REGION_MAPPING(31);
859
860 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
861 {
862         struct device *dev = container_of(kobj, struct device, kobj);
863         struct nd_region *nd_region = to_nd_region(dev);
864
865         if (n < nd_region->ndr_mappings)
866                 return a->mode;
867         return 0;
868 }
869
870 static struct attribute *mapping_attributes[] = {
871         &dev_attr_mapping0.attr,
872         &dev_attr_mapping1.attr,
873         &dev_attr_mapping2.attr,
874         &dev_attr_mapping3.attr,
875         &dev_attr_mapping4.attr,
876         &dev_attr_mapping5.attr,
877         &dev_attr_mapping6.attr,
878         &dev_attr_mapping7.attr,
879         &dev_attr_mapping8.attr,
880         &dev_attr_mapping9.attr,
881         &dev_attr_mapping10.attr,
882         &dev_attr_mapping11.attr,
883         &dev_attr_mapping12.attr,
884         &dev_attr_mapping13.attr,
885         &dev_attr_mapping14.attr,
886         &dev_attr_mapping15.attr,
887         &dev_attr_mapping16.attr,
888         &dev_attr_mapping17.attr,
889         &dev_attr_mapping18.attr,
890         &dev_attr_mapping19.attr,
891         &dev_attr_mapping20.attr,
892         &dev_attr_mapping21.attr,
893         &dev_attr_mapping22.attr,
894         &dev_attr_mapping23.attr,
895         &dev_attr_mapping24.attr,
896         &dev_attr_mapping25.attr,
897         &dev_attr_mapping26.attr,
898         &dev_attr_mapping27.attr,
899         &dev_attr_mapping28.attr,
900         &dev_attr_mapping29.attr,
901         &dev_attr_mapping30.attr,
902         &dev_attr_mapping31.attr,
903         NULL,
904 };
905
906 struct attribute_group nd_mapping_attribute_group = {
907         .is_visible = mapping_visible,
908         .attrs = mapping_attributes,
909 };
910 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
911
912 int nd_blk_region_init(struct nd_region *nd_region)
913 {
914         struct device *dev = &nd_region->dev;
915         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
916
917         if (!is_nd_blk(dev))
918                 return 0;
919
920         if (nd_region->ndr_mappings < 1) {
921                 dev_dbg(dev, "invalid BLK region\n");
922                 return -ENXIO;
923         }
924
925         return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
926 }
927
928 /**
929  * nd_region_acquire_lane - allocate and lock a lane
930  * @nd_region: region id and number of lanes possible
931  *
932  * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
933  * We optimize for the common case where there are 256 lanes, one
934  * per-cpu.  For larger systems we need to lock to share lanes.  For now
935  * this implementation assumes the cost of maintaining an allocator for
936  * free lanes is on the order of the lock hold time, so it implements a
937  * static lane = cpu % num_lanes mapping.
938  *
939  * In the case of a BTT instance on top of a BLK namespace a lane may be
940  * acquired recursively.  We lock on the first instance.
941  *
942  * In the case of a BTT instance on top of PMEM, we only acquire a lane
943  * for the BTT metadata updates.
944  */
945 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
946 {
947         unsigned int cpu, lane;
948
949         cpu = get_cpu();
950         if (nd_region->num_lanes < nr_cpu_ids) {
951                 struct nd_percpu_lane *ndl_lock, *ndl_count;
952
953                 lane = cpu % nd_region->num_lanes;
954                 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
955                 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
956                 if (ndl_count->count++ == 0)
957                         spin_lock(&ndl_lock->lock);
958         } else
959                 lane = cpu;
960
961         return lane;
962 }
963 EXPORT_SYMBOL(nd_region_acquire_lane);
964
965 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
966 {
967         if (nd_region->num_lanes < nr_cpu_ids) {
968                 unsigned int cpu = get_cpu();
969                 struct nd_percpu_lane *ndl_lock, *ndl_count;
970
971                 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
972                 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
973                 if (--ndl_count->count == 0)
974                         spin_unlock(&ndl_lock->lock);
975                 put_cpu();
976         }
977         put_cpu();
978 }
979 EXPORT_SYMBOL(nd_region_release_lane);
980
981 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
982                 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
983                 const char *caller)
984 {
985         struct nd_region *nd_region;
986         struct device *dev;
987         void *region_buf;
988         unsigned int i;
989         int ro = 0;
990
991         for (i = 0; i < ndr_desc->num_mappings; i++) {
992                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
993                 struct nvdimm *nvdimm = mapping->nvdimm;
994
995                 if ((mapping->start | mapping->size) % SZ_4K) {
996                         dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
997                                         caller, dev_name(&nvdimm->dev), i);
998
999                         return NULL;
1000                 }
1001
1002                 if (test_bit(NDD_UNARMED, &nvdimm->flags))
1003                         ro = 1;
1004
1005                 if (test_bit(NDD_NOBLK, &nvdimm->flags)
1006                                 && dev_type == &nd_blk_device_type) {
1007                         dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
1008                                         caller, dev_name(&nvdimm->dev), i);
1009                         return NULL;
1010                 }
1011         }
1012
1013         if (dev_type == &nd_blk_device_type) {
1014                 struct nd_blk_region_desc *ndbr_desc;
1015                 struct nd_blk_region *ndbr;
1016
1017                 ndbr_desc = to_blk_region_desc(ndr_desc);
1018                 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
1019                                 * ndr_desc->num_mappings,
1020                                 GFP_KERNEL);
1021                 if (ndbr) {
1022                         nd_region = &ndbr->nd_region;
1023                         ndbr->enable = ndbr_desc->enable;
1024                         ndbr->do_io = ndbr_desc->do_io;
1025                 }
1026                 region_buf = ndbr;
1027         } else {
1028                 nd_region = kzalloc(struct_size(nd_region, mapping,
1029                                                 ndr_desc->num_mappings),
1030                                     GFP_KERNEL);
1031                 region_buf = nd_region;
1032         }
1033
1034         if (!region_buf)
1035                 return NULL;
1036         nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
1037         if (nd_region->id < 0)
1038                 goto err_id;
1039
1040         nd_region->lane = alloc_percpu(struct nd_percpu_lane);
1041         if (!nd_region->lane)
1042                 goto err_percpu;
1043
1044         for (i = 0; i < nr_cpu_ids; i++) {
1045                 struct nd_percpu_lane *ndl;
1046
1047                 ndl = per_cpu_ptr(nd_region->lane, i);
1048                 spin_lock_init(&ndl->lock);
1049                 ndl->count = 0;
1050         }
1051
1052         for (i = 0; i < ndr_desc->num_mappings; i++) {
1053                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1054                 struct nvdimm *nvdimm = mapping->nvdimm;
1055
1056                 nd_region->mapping[i].nvdimm = nvdimm;
1057                 nd_region->mapping[i].start = mapping->start;
1058                 nd_region->mapping[i].size = mapping->size;
1059                 nd_region->mapping[i].position = mapping->position;
1060                 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1061                 mutex_init(&nd_region->mapping[i].lock);
1062
1063                 get_device(&nvdimm->dev);
1064         }
1065         nd_region->ndr_mappings = ndr_desc->num_mappings;
1066         nd_region->provider_data = ndr_desc->provider_data;
1067         nd_region->nd_set = ndr_desc->nd_set;
1068         nd_region->num_lanes = ndr_desc->num_lanes;
1069         nd_region->flags = ndr_desc->flags;
1070         nd_region->ro = ro;
1071         nd_region->numa_node = ndr_desc->numa_node;
1072         nd_region->target_node = ndr_desc->target_node;
1073         ida_init(&nd_region->ns_ida);
1074         ida_init(&nd_region->btt_ida);
1075         ida_init(&nd_region->pfn_ida);
1076         ida_init(&nd_region->dax_ida);
1077         dev = &nd_region->dev;
1078         dev_set_name(dev, "region%d", nd_region->id);
1079         dev->parent = &nvdimm_bus->dev;
1080         dev->type = dev_type;
1081         dev->groups = ndr_desc->attr_groups;
1082         dev->of_node = ndr_desc->of_node;
1083         nd_region->ndr_size = resource_size(ndr_desc->res);
1084         nd_region->ndr_start = ndr_desc->res->start;
1085         if (ndr_desc->flush)
1086                 nd_region->flush = ndr_desc->flush;
1087         else
1088                 nd_region->flush = NULL;
1089
1090         nd_device_register(dev);
1091
1092         return nd_region;
1093
1094  err_percpu:
1095         ida_simple_remove(&region_ida, nd_region->id);
1096  err_id:
1097         kfree(region_buf);
1098         return NULL;
1099 }
1100
1101 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1102                 struct nd_region_desc *ndr_desc)
1103 {
1104         ndr_desc->num_lanes = ND_MAX_LANES;
1105         return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1106                         __func__);
1107 }
1108 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1109
1110 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1111                 struct nd_region_desc *ndr_desc)
1112 {
1113         if (ndr_desc->num_mappings > 1)
1114                 return NULL;
1115         ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
1116         return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1117                         __func__);
1118 }
1119 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1120
1121 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1122                 struct nd_region_desc *ndr_desc)
1123 {
1124         ndr_desc->num_lanes = ND_MAX_LANES;
1125         return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1126                         __func__);
1127 }
1128 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
1129
1130 int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
1131 {
1132         int rc = 0;
1133
1134         if (!nd_region->flush)
1135                 rc = generic_nvdimm_flush(nd_region);
1136         else {
1137                 if (nd_region->flush(nd_region, bio))
1138                         rc = -EIO;
1139         }
1140
1141         return rc;
1142 }
1143 /**
1144  * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1145  * @nd_region: blk or interleaved pmem region
1146  */
1147 int generic_nvdimm_flush(struct nd_region *nd_region)
1148 {
1149         struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1150         int i, idx;
1151
1152         /*
1153          * Try to encourage some diversity in flush hint addresses
1154          * across cpus assuming a limited number of flush hints.
1155          */
1156         idx = this_cpu_read(flush_idx);
1157         idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1158
1159         /*
1160          * The first wmb() is needed to 'sfence' all previous writes
1161          * such that they are architecturally visible for the platform
1162          * buffer flush.  Note that we've already arranged for pmem
1163          * writes to avoid the cache via memcpy_flushcache().  The final
1164          * wmb() ensures ordering for the NVDIMM flush write.
1165          */
1166         wmb();
1167         for (i = 0; i < nd_region->ndr_mappings; i++)
1168                 if (ndrd_get_flush_wpq(ndrd, i, 0))
1169                         writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1170         wmb();
1171
1172         return 0;
1173 }
1174 EXPORT_SYMBOL_GPL(nvdimm_flush);
1175
1176 /**
1177  * nvdimm_has_flush - determine write flushing requirements
1178  * @nd_region: blk or interleaved pmem region
1179  *
1180  * Returns 1 if writes require flushing
1181  * Returns 0 if writes do not require flushing
1182  * Returns -ENXIO if flushing capability can not be determined
1183  */
1184 int nvdimm_has_flush(struct nd_region *nd_region)
1185 {
1186         int i;
1187
1188         /* no nvdimm or pmem api == flushing capability unknown */
1189         if (nd_region->ndr_mappings == 0
1190                         || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1191                 return -ENXIO;
1192
1193         for (i = 0; i < nd_region->ndr_mappings; i++) {
1194                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1195                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1196
1197                 /* flush hints present / available */
1198                 if (nvdimm->num_flush)
1199                         return 1;
1200         }
1201
1202         /*
1203          * The platform defines dimm devices without hints, assume
1204          * platform persistence mechanism like ADR
1205          */
1206         return 0;
1207 }
1208 EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1209
1210 int nvdimm_has_cache(struct nd_region *nd_region)
1211 {
1212         return is_nd_pmem(&nd_region->dev) &&
1213                 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
1214 }
1215 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1216
1217 bool is_nvdimm_sync(struct nd_region *nd_region)
1218 {
1219         return is_nd_pmem(&nd_region->dev) &&
1220                 !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1221 }
1222 EXPORT_SYMBOL_GPL(is_nvdimm_sync);
1223
1224 struct conflict_context {
1225         struct nd_region *nd_region;
1226         resource_size_t start, size;
1227 };
1228
1229 static int region_conflict(struct device *dev, void *data)
1230 {
1231         struct nd_region *nd_region;
1232         struct conflict_context *ctx = data;
1233         resource_size_t res_end, region_end, region_start;
1234
1235         if (!is_memory(dev))
1236                 return 0;
1237
1238         nd_region = to_nd_region(dev);
1239         if (nd_region == ctx->nd_region)
1240                 return 0;
1241
1242         res_end = ctx->start + ctx->size;
1243         region_start = nd_region->ndr_start;
1244         region_end = region_start + nd_region->ndr_size;
1245         if (ctx->start >= region_start && ctx->start < region_end)
1246                 return -EBUSY;
1247         if (res_end > region_start && res_end <= region_end)
1248                 return -EBUSY;
1249         return 0;
1250 }
1251
1252 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1253                 resource_size_t size)
1254 {
1255         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1256         struct conflict_context ctx = {
1257                 .nd_region = nd_region,
1258                 .start = start,
1259                 .size = size,
1260         };
1261
1262         return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1263 }
1264
1265 void __exit nd_region_devs_exit(void)
1266 {
1267         ida_destroy(&region_ida);
1268 }