2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/pmem.h>
19 #include <linux/sort.h>
26 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
29 #include <linux/io-64-nonatomic-hi-lo.h>
31 static DEFINE_IDA(region_ida);
32 static DEFINE_PER_CPU(int, flush_idx);
34 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
35 struct nd_region_data *ndrd)
39 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
40 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
41 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
42 struct resource *res = &nvdimm->flush_wpq[i];
43 unsigned long pfn = PHYS_PFN(res->start);
44 void __iomem *flush_page;
46 /* check if flush hints share a page */
47 for (j = 0; j < i; j++) {
48 struct resource *res_j = &nvdimm->flush_wpq[j];
49 unsigned long pfn_j = PHYS_PFN(res_j->start);
56 flush_page = (void __iomem *) ((unsigned long)
57 ndrd_get_flush_wpq(ndrd, dimm, j)
60 flush_page = devm_nvdimm_ioremap(dev,
61 PFN_PHYS(pfn), PAGE_SIZE);
64 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
65 + (res->start & ~PAGE_MASK));
71 int nd_region_activate(struct nd_region *nd_region)
73 int i, j, num_flush = 0;
74 struct nd_region_data *ndrd;
75 struct device *dev = &nd_region->dev;
76 size_t flush_data_size = sizeof(void *);
78 nvdimm_bus_lock(&nd_region->dev);
79 for (i = 0; i < nd_region->ndr_mappings; i++) {
80 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
81 struct nvdimm *nvdimm = nd_mapping->nvdimm;
83 /* at least one null hint slot per-dimm for the "no-hint" case */
84 flush_data_size += sizeof(void *);
85 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
86 if (!nvdimm->num_flush)
88 flush_data_size += nvdimm->num_flush * sizeof(void *);
90 nvdimm_bus_unlock(&nd_region->dev);
92 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
95 dev_set_drvdata(dev, ndrd);
100 ndrd->hints_shift = ilog2(num_flush);
101 for (i = 0; i < nd_region->ndr_mappings; i++) {
102 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
103 struct nvdimm *nvdimm = nd_mapping->nvdimm;
104 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
111 * Clear out entries that are duplicates. This should prevent the
114 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
115 /* ignore if NULL already */
116 if (!ndrd_get_flush_wpq(ndrd, i, 0))
119 for (j = i + 1; j < nd_region->ndr_mappings; j++)
120 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
121 ndrd_get_flush_wpq(ndrd, j, 0))
122 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
128 static void nd_region_release(struct device *dev)
130 struct nd_region *nd_region = to_nd_region(dev);
133 for (i = 0; i < nd_region->ndr_mappings; i++) {
134 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
135 struct nvdimm *nvdimm = nd_mapping->nvdimm;
137 put_device(&nvdimm->dev);
139 free_percpu(nd_region->lane);
140 ida_simple_remove(®ion_ida, nd_region->id);
142 kfree(to_nd_blk_region(dev));
147 static struct device_type nd_blk_device_type = {
149 .release = nd_region_release,
152 static struct device_type nd_pmem_device_type = {
154 .release = nd_region_release,
157 static struct device_type nd_volatile_device_type = {
158 .name = "nd_volatile",
159 .release = nd_region_release,
162 bool is_nd_pmem(struct device *dev)
164 return dev ? dev->type == &nd_pmem_device_type : false;
167 bool is_nd_blk(struct device *dev)
169 return dev ? dev->type == &nd_blk_device_type : false;
172 struct nd_region *to_nd_region(struct device *dev)
174 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
176 WARN_ON(dev->type->release != nd_region_release);
179 EXPORT_SYMBOL_GPL(to_nd_region);
181 struct nd_blk_region *to_nd_blk_region(struct device *dev)
183 struct nd_region *nd_region = to_nd_region(dev);
185 WARN_ON(!is_nd_blk(dev));
186 return container_of(nd_region, struct nd_blk_region, nd_region);
188 EXPORT_SYMBOL_GPL(to_nd_blk_region);
190 void *nd_region_provider_data(struct nd_region *nd_region)
192 return nd_region->provider_data;
194 EXPORT_SYMBOL_GPL(nd_region_provider_data);
196 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
198 return ndbr->blk_provider_data;
200 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
202 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
204 ndbr->blk_provider_data = data;
206 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
209 * nd_region_to_nstype() - region to an integer namespace type
210 * @nd_region: region-device to interrogate
212 * This is the 'nstype' attribute of a region as well, an input to the
213 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
214 * namespace devices with namespace drivers.
216 int nd_region_to_nstype(struct nd_region *nd_region)
218 if (is_nd_pmem(&nd_region->dev)) {
221 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
222 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
223 struct nvdimm *nvdimm = nd_mapping->nvdimm;
225 if (nvdimm->flags & NDD_ALIASING)
229 return ND_DEVICE_NAMESPACE_PMEM;
231 return ND_DEVICE_NAMESPACE_IO;
232 } else if (is_nd_blk(&nd_region->dev)) {
233 return ND_DEVICE_NAMESPACE_BLK;
238 EXPORT_SYMBOL(nd_region_to_nstype);
240 static ssize_t size_show(struct device *dev,
241 struct device_attribute *attr, char *buf)
243 struct nd_region *nd_region = to_nd_region(dev);
244 unsigned long long size = 0;
246 if (is_nd_pmem(dev)) {
247 size = nd_region->ndr_size;
248 } else if (nd_region->ndr_mappings == 1) {
249 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
251 size = nd_mapping->size;
254 return sprintf(buf, "%llu\n", size);
256 static DEVICE_ATTR_RO(size);
258 static ssize_t mappings_show(struct device *dev,
259 struct device_attribute *attr, char *buf)
261 struct nd_region *nd_region = to_nd_region(dev);
263 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
265 static DEVICE_ATTR_RO(mappings);
267 static ssize_t nstype_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
270 struct nd_region *nd_region = to_nd_region(dev);
272 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
274 static DEVICE_ATTR_RO(nstype);
276 static ssize_t set_cookie_show(struct device *dev,
277 struct device_attribute *attr, char *buf)
279 struct nd_region *nd_region = to_nd_region(dev);
280 struct nd_interleave_set *nd_set = nd_region->nd_set;
282 if (is_nd_pmem(dev) && nd_set)
283 /* pass, should be precluded by region_visible */;
287 return sprintf(buf, "%#llx\n", nd_set->cookie);
289 static DEVICE_ATTR_RO(set_cookie);
291 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
293 resource_size_t blk_max_overlap = 0, available, overlap;
296 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
300 overlap = blk_max_overlap;
301 for (i = 0; i < nd_region->ndr_mappings; i++) {
302 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
303 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
305 /* if a dimm is disabled the available capacity is zero */
309 if (is_nd_pmem(&nd_region->dev)) {
310 available += nd_pmem_available_dpa(nd_region,
311 nd_mapping, &overlap);
312 if (overlap > blk_max_overlap) {
313 blk_max_overlap = overlap;
316 } else if (is_nd_blk(&nd_region->dev))
317 available += nd_blk_available_dpa(nd_region);
323 static ssize_t available_size_show(struct device *dev,
324 struct device_attribute *attr, char *buf)
326 struct nd_region *nd_region = to_nd_region(dev);
327 unsigned long long available = 0;
330 * Flush in-flight updates and grab a snapshot of the available
331 * size. Of course, this value is potentially invalidated the
332 * memory nvdimm_bus_lock() is dropped, but that's userspace's
333 * problem to not race itself.
335 nvdimm_bus_lock(dev);
336 wait_nvdimm_bus_probe_idle(dev);
337 available = nd_region_available_dpa(nd_region);
338 nvdimm_bus_unlock(dev);
340 return sprintf(buf, "%llu\n", available);
342 static DEVICE_ATTR_RO(available_size);
344 static ssize_t init_namespaces_show(struct device *dev,
345 struct device_attribute *attr, char *buf)
347 struct nd_region_data *ndrd = dev_get_drvdata(dev);
350 nvdimm_bus_lock(dev);
352 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
355 nvdimm_bus_unlock(dev);
359 static DEVICE_ATTR_RO(init_namespaces);
361 static ssize_t namespace_seed_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
364 struct nd_region *nd_region = to_nd_region(dev);
367 nvdimm_bus_lock(dev);
368 if (nd_region->ns_seed)
369 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
371 rc = sprintf(buf, "\n");
372 nvdimm_bus_unlock(dev);
375 static DEVICE_ATTR_RO(namespace_seed);
377 static ssize_t btt_seed_show(struct device *dev,
378 struct device_attribute *attr, char *buf)
380 struct nd_region *nd_region = to_nd_region(dev);
383 nvdimm_bus_lock(dev);
384 if (nd_region->btt_seed)
385 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
387 rc = sprintf(buf, "\n");
388 nvdimm_bus_unlock(dev);
392 static DEVICE_ATTR_RO(btt_seed);
394 static ssize_t pfn_seed_show(struct device *dev,
395 struct device_attribute *attr, char *buf)
397 struct nd_region *nd_region = to_nd_region(dev);
400 nvdimm_bus_lock(dev);
401 if (nd_region->pfn_seed)
402 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
404 rc = sprintf(buf, "\n");
405 nvdimm_bus_unlock(dev);
409 static DEVICE_ATTR_RO(pfn_seed);
411 static ssize_t dax_seed_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
414 struct nd_region *nd_region = to_nd_region(dev);
417 nvdimm_bus_lock(dev);
418 if (nd_region->dax_seed)
419 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
421 rc = sprintf(buf, "\n");
422 nvdimm_bus_unlock(dev);
426 static DEVICE_ATTR_RO(dax_seed);
428 static ssize_t read_only_show(struct device *dev,
429 struct device_attribute *attr, char *buf)
431 struct nd_region *nd_region = to_nd_region(dev);
433 return sprintf(buf, "%d\n", nd_region->ro);
436 static ssize_t read_only_store(struct device *dev,
437 struct device_attribute *attr, const char *buf, size_t len)
440 int rc = strtobool(buf, &ro);
441 struct nd_region *nd_region = to_nd_region(dev);
449 static DEVICE_ATTR_RW(read_only);
451 static ssize_t nd_badblocks_show(struct device *dev,
452 struct device_attribute *attr, char *buf)
454 struct nd_region *nd_region = to_nd_region(dev);
456 return badblocks_show(&nd_region->bb, buf, 0);
458 static struct device_attribute dev_attr_nd_badblocks = {
463 .show = nd_badblocks_show,
466 static ssize_t resource_show(struct device *dev,
467 struct device_attribute *attr, char *buf)
469 struct nd_region *nd_region = to_nd_region(dev);
471 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
473 static DEVICE_ATTR_RO(resource);
475 static struct attribute *nd_region_attributes[] = {
477 &dev_attr_nstype.attr,
478 &dev_attr_mappings.attr,
479 &dev_attr_btt_seed.attr,
480 &dev_attr_pfn_seed.attr,
481 &dev_attr_dax_seed.attr,
482 &dev_attr_read_only.attr,
483 &dev_attr_set_cookie.attr,
484 &dev_attr_available_size.attr,
485 &dev_attr_namespace_seed.attr,
486 &dev_attr_init_namespaces.attr,
487 &dev_attr_nd_badblocks.attr,
488 &dev_attr_resource.attr,
492 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
494 struct device *dev = container_of(kobj, typeof(*dev), kobj);
495 struct nd_region *nd_region = to_nd_region(dev);
496 struct nd_interleave_set *nd_set = nd_region->nd_set;
497 int type = nd_region_to_nstype(nd_region);
499 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
502 if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
505 if (!is_nd_pmem(dev) && a == &dev_attr_nd_badblocks.attr)
508 if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
511 if (a != &dev_attr_set_cookie.attr
512 && a != &dev_attr_available_size.attr)
515 if ((type == ND_DEVICE_NAMESPACE_PMEM
516 || type == ND_DEVICE_NAMESPACE_BLK)
517 && a == &dev_attr_available_size.attr)
519 else if (is_nd_pmem(dev) && nd_set)
525 struct attribute_group nd_region_attribute_group = {
526 .attrs = nd_region_attributes,
527 .is_visible = region_visible,
529 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
531 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
533 struct nd_interleave_set *nd_set = nd_region->nd_set;
536 return nd_set->cookie;
540 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
542 struct nd_interleave_set *nd_set = nd_region->nd_set;
545 return nd_set->altcookie;
549 void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
551 struct nd_label_ent *label_ent, *e;
553 lockdep_assert_held(&nd_mapping->lock);
554 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
555 list_del(&label_ent->list);
561 * Upon successful probe/remove, take/release a reference on the
562 * associated interleave set (if present), and plant new btt + namespace
563 * seeds. Also, on the removal of a BLK region, notify the provider to
564 * disable the region.
566 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
567 struct device *dev, bool probe)
569 struct nd_region *nd_region;
571 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
574 nd_region = to_nd_region(dev);
575 for (i = 0; i < nd_region->ndr_mappings; i++) {
576 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
577 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
578 struct nvdimm *nvdimm = nd_mapping->nvdimm;
580 mutex_lock(&nd_mapping->lock);
581 nd_mapping_free_labels(nd_mapping);
582 mutex_unlock(&nd_mapping->lock);
585 nd_mapping->ndd = NULL;
587 atomic_dec(&nvdimm->busy);
593 if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
595 nd_region = to_nd_region(dev->parent);
596 nvdimm_bus_lock(dev);
597 if (nd_region->ns_seed == dev)
598 nd_region_create_ns_seed(nd_region);
599 nvdimm_bus_unlock(dev);
601 if (is_nd_btt(dev) && probe) {
602 struct nd_btt *nd_btt = to_nd_btt(dev);
604 nd_region = to_nd_region(dev->parent);
605 nvdimm_bus_lock(dev);
606 if (nd_region->btt_seed == dev)
607 nd_region_create_btt_seed(nd_region);
608 if (nd_region->ns_seed == &nd_btt->ndns->dev)
609 nd_region_create_ns_seed(nd_region);
610 nvdimm_bus_unlock(dev);
612 if (is_nd_pfn(dev) && probe) {
613 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
615 nd_region = to_nd_region(dev->parent);
616 nvdimm_bus_lock(dev);
617 if (nd_region->pfn_seed == dev)
618 nd_region_create_pfn_seed(nd_region);
619 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
620 nd_region_create_ns_seed(nd_region);
621 nvdimm_bus_unlock(dev);
623 if (is_nd_dax(dev) && probe) {
624 struct nd_dax *nd_dax = to_nd_dax(dev);
626 nd_region = to_nd_region(dev->parent);
627 nvdimm_bus_lock(dev);
628 if (nd_region->dax_seed == dev)
629 nd_region_create_dax_seed(nd_region);
630 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
631 nd_region_create_ns_seed(nd_region);
632 nvdimm_bus_unlock(dev);
636 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
638 nd_region_notify_driver_action(nvdimm_bus, dev, true);
641 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
643 nd_region_notify_driver_action(nvdimm_bus, dev, false);
646 static ssize_t mappingN(struct device *dev, char *buf, int n)
648 struct nd_region *nd_region = to_nd_region(dev);
649 struct nd_mapping *nd_mapping;
650 struct nvdimm *nvdimm;
652 if (n >= nd_region->ndr_mappings)
654 nd_mapping = &nd_region->mapping[n];
655 nvdimm = nd_mapping->nvdimm;
657 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
658 nd_mapping->start, nd_mapping->size);
661 #define REGION_MAPPING(idx) \
662 static ssize_t mapping##idx##_show(struct device *dev, \
663 struct device_attribute *attr, char *buf) \
665 return mappingN(dev, buf, idx); \
667 static DEVICE_ATTR_RO(mapping##idx)
670 * 32 should be enough for a while, even in the presence of socket
671 * interleave a 32-way interleave set is a degenerate case.
706 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
708 struct device *dev = container_of(kobj, struct device, kobj);
709 struct nd_region *nd_region = to_nd_region(dev);
711 if (n < nd_region->ndr_mappings)
716 static struct attribute *mapping_attributes[] = {
717 &dev_attr_mapping0.attr,
718 &dev_attr_mapping1.attr,
719 &dev_attr_mapping2.attr,
720 &dev_attr_mapping3.attr,
721 &dev_attr_mapping4.attr,
722 &dev_attr_mapping5.attr,
723 &dev_attr_mapping6.attr,
724 &dev_attr_mapping7.attr,
725 &dev_attr_mapping8.attr,
726 &dev_attr_mapping9.attr,
727 &dev_attr_mapping10.attr,
728 &dev_attr_mapping11.attr,
729 &dev_attr_mapping12.attr,
730 &dev_attr_mapping13.attr,
731 &dev_attr_mapping14.attr,
732 &dev_attr_mapping15.attr,
733 &dev_attr_mapping16.attr,
734 &dev_attr_mapping17.attr,
735 &dev_attr_mapping18.attr,
736 &dev_attr_mapping19.attr,
737 &dev_attr_mapping20.attr,
738 &dev_attr_mapping21.attr,
739 &dev_attr_mapping22.attr,
740 &dev_attr_mapping23.attr,
741 &dev_attr_mapping24.attr,
742 &dev_attr_mapping25.attr,
743 &dev_attr_mapping26.attr,
744 &dev_attr_mapping27.attr,
745 &dev_attr_mapping28.attr,
746 &dev_attr_mapping29.attr,
747 &dev_attr_mapping30.attr,
748 &dev_attr_mapping31.attr,
752 struct attribute_group nd_mapping_attribute_group = {
753 .is_visible = mapping_visible,
754 .attrs = mapping_attributes,
756 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
758 int nd_blk_region_init(struct nd_region *nd_region)
760 struct device *dev = &nd_region->dev;
761 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
766 if (nd_region->ndr_mappings < 1) {
767 dev_err(dev, "invalid BLK region\n");
771 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
775 * nd_region_acquire_lane - allocate and lock a lane
776 * @nd_region: region id and number of lanes possible
778 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
779 * We optimize for the common case where there are 256 lanes, one
780 * per-cpu. For larger systems we need to lock to share lanes. For now
781 * this implementation assumes the cost of maintaining an allocator for
782 * free lanes is on the order of the lock hold time, so it implements a
783 * static lane = cpu % num_lanes mapping.
785 * In the case of a BTT instance on top of a BLK namespace a lane may be
786 * acquired recursively. We lock on the first instance.
788 * In the case of a BTT instance on top of PMEM, we only acquire a lane
789 * for the BTT metadata updates.
791 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
793 unsigned int cpu, lane;
796 if (nd_region->num_lanes < nr_cpu_ids) {
797 struct nd_percpu_lane *ndl_lock, *ndl_count;
799 lane = cpu % nd_region->num_lanes;
800 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
801 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
802 if (ndl_count->count++ == 0)
803 spin_lock(&ndl_lock->lock);
809 EXPORT_SYMBOL(nd_region_acquire_lane);
811 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
813 if (nd_region->num_lanes < nr_cpu_ids) {
814 unsigned int cpu = get_cpu();
815 struct nd_percpu_lane *ndl_lock, *ndl_count;
817 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
818 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
819 if (--ndl_count->count == 0)
820 spin_unlock(&ndl_lock->lock);
825 EXPORT_SYMBOL(nd_region_release_lane);
827 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
828 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
831 struct nd_region *nd_region;
837 for (i = 0; i < ndr_desc->num_mappings; i++) {
838 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
839 struct nvdimm *nvdimm = mapping->nvdimm;
841 if ((mapping->start | mapping->size) % SZ_4K) {
842 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
843 caller, dev_name(&nvdimm->dev), i);
848 if (nvdimm->flags & NDD_UNARMED)
852 if (dev_type == &nd_blk_device_type) {
853 struct nd_blk_region_desc *ndbr_desc;
854 struct nd_blk_region *ndbr;
856 ndbr_desc = to_blk_region_desc(ndr_desc);
857 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
858 * ndr_desc->num_mappings,
861 nd_region = &ndbr->nd_region;
862 ndbr->enable = ndbr_desc->enable;
863 ndbr->do_io = ndbr_desc->do_io;
867 nd_region = kzalloc(sizeof(struct nd_region)
868 + sizeof(struct nd_mapping)
869 * ndr_desc->num_mappings,
871 region_buf = nd_region;
876 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL);
877 if (nd_region->id < 0)
880 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
881 if (!nd_region->lane)
884 for (i = 0; i < nr_cpu_ids; i++) {
885 struct nd_percpu_lane *ndl;
887 ndl = per_cpu_ptr(nd_region->lane, i);
888 spin_lock_init(&ndl->lock);
892 for (i = 0; i < ndr_desc->num_mappings; i++) {
893 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
894 struct nvdimm *nvdimm = mapping->nvdimm;
896 nd_region->mapping[i].nvdimm = nvdimm;
897 nd_region->mapping[i].start = mapping->start;
898 nd_region->mapping[i].size = mapping->size;
899 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
900 mutex_init(&nd_region->mapping[i].lock);
902 get_device(&nvdimm->dev);
904 nd_region->ndr_mappings = ndr_desc->num_mappings;
905 nd_region->provider_data = ndr_desc->provider_data;
906 nd_region->nd_set = ndr_desc->nd_set;
907 nd_region->num_lanes = ndr_desc->num_lanes;
908 nd_region->flags = ndr_desc->flags;
910 nd_region->numa_node = ndr_desc->numa_node;
911 ida_init(&nd_region->ns_ida);
912 ida_init(&nd_region->btt_ida);
913 ida_init(&nd_region->pfn_ida);
914 ida_init(&nd_region->dax_ida);
915 dev = &nd_region->dev;
916 dev_set_name(dev, "region%d", nd_region->id);
917 dev->parent = &nvdimm_bus->dev;
918 dev->type = dev_type;
919 dev->groups = ndr_desc->attr_groups;
920 nd_region->ndr_size = resource_size(ndr_desc->res);
921 nd_region->ndr_start = ndr_desc->res->start;
922 nd_device_register(dev);
927 ida_simple_remove(®ion_ida, nd_region->id);
933 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
934 struct nd_region_desc *ndr_desc)
936 ndr_desc->num_lanes = ND_MAX_LANES;
937 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
940 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
942 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
943 struct nd_region_desc *ndr_desc)
945 if (ndr_desc->num_mappings > 1)
947 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
948 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
951 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
953 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
954 struct nd_region_desc *ndr_desc)
956 ndr_desc->num_lanes = ND_MAX_LANES;
957 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
960 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
963 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
964 * @nd_region: blk or interleaved pmem region
966 void nvdimm_flush(struct nd_region *nd_region)
968 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
972 * Try to encourage some diversity in flush hint addresses
973 * across cpus assuming a limited number of flush hints.
975 idx = this_cpu_read(flush_idx);
976 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
979 * The first wmb() is needed to 'sfence' all previous writes
980 * such that they are architecturally visible for the platform
981 * buffer flush. Note that we've already arranged for pmem
982 * writes to avoid the cache via arch_memcpy_to_pmem(). The
983 * final wmb() ensures ordering for the NVDIMM flush write.
986 for (i = 0; i < nd_region->ndr_mappings; i++)
987 if (ndrd_get_flush_wpq(ndrd, i, 0))
988 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
991 EXPORT_SYMBOL_GPL(nvdimm_flush);
994 * nvdimm_has_flush - determine write flushing requirements
995 * @nd_region: blk or interleaved pmem region
997 * Returns 1 if writes require flushing
998 * Returns 0 if writes do not require flushing
999 * Returns -ENXIO if flushing capability can not be determined
1001 int nvdimm_has_flush(struct nd_region *nd_region)
1003 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1006 /* no nvdimm == flushing capability unknown */
1007 if (nd_region->ndr_mappings == 0)
1010 for (i = 0; i < nd_region->ndr_mappings; i++)
1011 /* flush hints present, flushing required */
1012 if (ndrd_get_flush_wpq(ndrd, i, 0))
1016 * The platform defines dimm devices without hints, assume
1017 * platform persistence mechanism like ADR
1021 EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1023 void __exit nd_region_devs_exit(void)
1025 ida_destroy(®ion_ida);