1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/memregion.h>
6 #include <linux/cpumask.h>
7 #include <linux/module.h>
8 #include <linux/device.h>
13 static int nd_region_probe(struct device *dev)
16 static unsigned long once;
17 struct nd_region_data *ndrd;
18 struct nd_region *nd_region = to_nd_region(dev);
19 struct range range = {
20 .start = nd_region->ndr_start,
21 .end = nd_region->ndr_start + nd_region->ndr_size - 1,
24 if (nd_region->num_lanes > num_online_cpus()
25 && nd_region->num_lanes < num_possible_cpus()
26 && !test_and_set_bit(0, &once)) {
27 dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
28 num_online_cpus(), nd_region->num_lanes,
30 dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
31 nd_region->num_lanes);
34 rc = nd_region_activate(nd_region);
38 if (devm_init_badblocks(dev, &nd_region->bb))
41 sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks");
42 if (!nd_region->bb_state)
43 dev_warn(dev, "'badblocks' notification disabled\n");
44 nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
46 rc = nd_region_register_namespaces(nd_region, &err);
50 ndrd = dev_get_drvdata(dev);
52 ndrd->ns_count = rc + err;
54 if (rc && err && rc == err)
57 nd_region->btt_seed = nd_btt_create(nd_region);
58 nd_region->pfn_seed = nd_pfn_create(nd_region);
59 nd_region->dax_seed = nd_dax_create(nd_region);
64 * Given multiple namespaces per region, we do not want to
65 * disable all the successfully registered peer namespaces upon
66 * a single registration failure. If userspace is missing a
67 * namespace that it expects it can disable/re-enable the region
68 * to retry discovery after correcting the failure.
69 * <regionX>/namespaces returns the current
70 * "<async-registered>/<total>" namespace count.
72 dev_err(dev, "failed to register %d namespace%s, continuing...\n",
73 err, err == 1 ? "" : "s");
77 static int child_unregister(struct device *dev, void *data)
79 nd_device_unregister(dev, ND_SYNC);
83 static void nd_region_remove(struct device *dev)
85 struct nd_region *nd_region = to_nd_region(dev);
87 device_for_each_child(dev, NULL, child_unregister);
89 /* flush attribute readers and disable */
91 nd_region->ns_seed = NULL;
92 nd_region->btt_seed = NULL;
93 nd_region->pfn_seed = NULL;
94 nd_region->dax_seed = NULL;
95 dev_set_drvdata(dev, NULL);
96 nvdimm_bus_unlock(dev);
99 * Note, this assumes device_lock() context to not race
102 sysfs_put(nd_region->bb_state);
103 nd_region->bb_state = NULL;
106 * Try to flush caches here since a disabled region may be subject to
107 * secure erase while disabled, and previous dirty data should not be
108 * written back to a new instance of the region. This only matters on
109 * bare metal where security commands are available, so silent failure
112 if (cpu_cache_has_invalidate_memregion())
113 cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
116 static int child_notify(struct device *dev, void *data)
118 nd_device_notify(dev, *(enum nvdimm_event *) data);
122 static void nd_region_notify(struct device *dev, enum nvdimm_event event)
124 if (event == NVDIMM_REVALIDATE_POISON) {
125 struct nd_region *nd_region = to_nd_region(dev);
127 if (is_memory(&nd_region->dev)) {
128 struct range range = {
129 .start = nd_region->ndr_start,
130 .end = nd_region->ndr_start +
131 nd_region->ndr_size - 1,
134 nvdimm_badblocks_populate(nd_region,
135 &nd_region->bb, &range);
136 if (nd_region->bb_state)
137 sysfs_notify_dirent(nd_region->bb_state);
140 device_for_each_child(dev, &event, child_notify);
143 static struct nd_device_driver nd_region_driver = {
144 .probe = nd_region_probe,
145 .remove = nd_region_remove,
146 .notify = nd_region_notify,
150 .type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
153 int __init nd_region_init(void)
155 return nd_driver_register(&nd_region_driver);
158 void nd_region_exit(void)
160 driver_unregister(&nd_region_driver.drv);
163 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);