1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
12 * The core CXL PMEM infrastructure supports persistent memory
13 * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
14 * 'bridge' device is added at the root of a CXL device topology if
15 * platform firmware advertises at least one persistent memory capable
16 * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
17 * device. Then for each cxl_memdev in the CXL device topology a bridge
18 * device is added to host a LIBNVDIMM dimm object. When these bridges
19 * are registered native LIBNVDIMM uapis are translated to CXL
20 * operations, for example, namespace label access commands.
23 static void cxl_nvdimm_bridge_release(struct device *dev)
25 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
30 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
31 &cxl_base_attribute_group,
35 const struct device_type cxl_nvdimm_bridge_type = {
36 .name = "cxl_nvdimm_bridge",
37 .release = cxl_nvdimm_bridge_release,
38 .groups = cxl_nvdimm_bridge_attribute_groups,
41 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
43 if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
44 "not a cxl_nvdimm_bridge device\n"))
46 return container_of(dev, struct cxl_nvdimm_bridge, dev);
48 EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
50 static struct cxl_nvdimm_bridge *
51 cxl_nvdimm_bridge_alloc(struct cxl_port *port)
53 struct cxl_nvdimm_bridge *cxl_nvb;
56 cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
58 return ERR_PTR(-ENOMEM);
62 cxl_nvb->state = CXL_NVB_NEW;
63 device_initialize(dev);
64 device_set_pm_not_required(dev);
65 dev->parent = &port->dev;
66 dev->bus = &cxl_bus_type;
67 dev->type = &cxl_nvdimm_bridge_type;
72 static void unregister_nvb(void *_cxl_nvb)
74 struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
78 * If the bridge was ever activated then there might be in-flight state
79 * work to flush. Once the state has been changed to 'dead' then no new
80 * work can be queued by user-triggered bind.
82 device_lock(&cxl_nvb->dev);
83 flush = cxl_nvb->state != CXL_NVB_NEW;
84 cxl_nvb->state = CXL_NVB_DEAD;
85 device_unlock(&cxl_nvb->dev);
88 * Even though the device core will trigger device_release_driver()
89 * before the unregister, it does not know about the fact that
90 * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
91 * release not and flush it before tearing down the nvdimm device
94 device_release_driver(&cxl_nvb->dev);
96 flush_work(&cxl_nvb->state_work);
97 device_unregister(&cxl_nvb->dev);
101 * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
102 * @host: platform firmware root device
103 * @port: CXL port at the root of a CXL topology
105 * Return: bridge device that can host cxl_nvdimm objects
107 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
108 struct cxl_port *port)
110 struct cxl_nvdimm_bridge *cxl_nvb;
114 if (!IS_ENABLED(CONFIG_CXL_PMEM))
115 return ERR_PTR(-ENXIO);
117 cxl_nvb = cxl_nvdimm_bridge_alloc(port);
122 rc = dev_set_name(dev, "nvdimm-bridge");
126 rc = device_add(dev);
130 rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
140 EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
142 static void cxl_nvdimm_release(struct device *dev)
144 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
149 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
150 &cxl_base_attribute_group,
154 const struct device_type cxl_nvdimm_type = {
155 .name = "cxl_nvdimm",
156 .release = cxl_nvdimm_release,
157 .groups = cxl_nvdimm_attribute_groups,
160 bool is_cxl_nvdimm(struct device *dev)
162 return dev->type == &cxl_nvdimm_type;
164 EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
166 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
168 if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
169 "not a cxl_nvdimm device\n"))
171 return container_of(dev, struct cxl_nvdimm, dev);
173 EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
175 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
177 struct cxl_nvdimm *cxl_nvd;
180 cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
182 return ERR_PTR(-ENOMEM);
185 cxl_nvd->cxlmd = cxlmd;
186 device_initialize(dev);
187 device_set_pm_not_required(dev);
188 dev->parent = &cxlmd->dev;
189 dev->bus = &cxl_bus_type;
190 dev->type = &cxl_nvdimm_type;
196 * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
197 * @host: same host as @cxlmd
198 * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
200 * Return: 0 on success negative error code on failure.
202 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
204 struct cxl_nvdimm *cxl_nvd;
208 cxl_nvd = cxl_nvdimm_alloc(cxlmd);
210 return PTR_ERR(cxl_nvd);
213 rc = dev_set_name(dev, "pmem%d", cxlmd->id);
217 rc = device_add(dev);
221 dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
224 return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
230 EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);