1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <asm/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
14 * Ordered workqueue for cxl nvdimm device arrival and departure
15 * to coordinate bus rescans when a bridge arrives and trigger remove
16 * operations when the bridge is removed.
18 static struct workqueue_struct *cxl_pmem_wq;
20 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
22 static void clear_exclusive(void *cxlm)
24 clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
27 static void unregister_nvdimm(void *nvdimm)
29 nvdimm_delete(nvdimm);
32 static int cxl_nvdimm_probe(struct device *dev)
34 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
35 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
36 unsigned long flags = 0, cmd_mask = 0;
37 struct cxl_mem *cxlm = cxlmd->cxlm;
38 struct cxl_nvdimm_bridge *cxl_nvb;
39 struct nvdimm *nvdimm;
42 cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
46 device_lock(&cxl_nvb->dev);
47 if (!cxl_nvb->nvdimm_bus) {
52 set_exclusive_cxl_commands(cxlm, exclusive_cmds);
53 rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
57 set_bit(NDD_LABELING, &flags);
58 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
59 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
60 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
61 nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
68 dev_set_drvdata(dev, nvdimm);
69 rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
71 device_unlock(&cxl_nvb->dev);
72 put_device(&cxl_nvb->dev);
77 static struct cxl_driver cxl_nvdimm_driver = {
79 .probe = cxl_nvdimm_probe,
80 .id = CXL_DEVICE_NVDIMM,
83 static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
84 struct nd_cmd_get_config_size *cmd,
87 if (sizeof(*cmd) > buf_len)
90 *cmd = (struct nd_cmd_get_config_size) {
91 .config_size = cxlm->lsa_size,
92 .max_xfer = cxlm->payload_size,
98 static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
99 struct nd_cmd_get_config_data_hdr *cmd,
100 unsigned int buf_len)
102 struct cxl_mbox_get_lsa get_lsa;
105 if (sizeof(*cmd) > buf_len)
107 if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
110 get_lsa = (struct cxl_mbox_get_lsa) {
111 .offset = cmd->in_offset,
112 .length = cmd->in_length,
115 rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
116 sizeof(get_lsa), cmd->out_buf,
123 static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
124 struct nd_cmd_set_config_hdr *cmd,
125 unsigned int buf_len)
127 struct cxl_mbox_set_lsa *set_lsa;
130 if (sizeof(*cmd) > buf_len)
133 /* 4-byte status follows the input data in the payload */
134 if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len)
138 kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
142 *set_lsa = (struct cxl_mbox_set_lsa) {
143 .offset = cmd->in_offset,
145 memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
147 rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
148 struct_size(set_lsa, data, cmd->in_length),
152 * Set "firmware" status (4-packed bytes at the end of the input
155 put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
161 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
162 void *buf, unsigned int buf_len)
164 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
165 unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
166 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
167 struct cxl_mem *cxlm = cxlmd->cxlm;
169 if (!test_bit(cmd, &cmd_mask))
173 case ND_CMD_GET_CONFIG_SIZE:
174 return cxl_pmem_get_config_size(cxlm, buf, buf_len);
175 case ND_CMD_GET_CONFIG_DATA:
176 return cxl_pmem_get_config_data(cxlm, buf, buf_len);
177 case ND_CMD_SET_CONFIG_DATA:
178 return cxl_pmem_set_config_data(cxlm, buf, buf_len);
184 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
185 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
186 unsigned int buf_len, int *cmd_rc)
189 * No firmware response to translate, let the transport error
190 * code take precedence.
196 return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
199 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
201 if (cxl_nvb->nvdimm_bus)
203 cxl_nvb->nvdimm_bus =
204 nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
205 return cxl_nvb->nvdimm_bus != NULL;
208 static int cxl_nvdimm_release_driver(struct device *dev, void *data)
210 if (!is_cxl_nvdimm(dev))
212 device_release_driver(dev);
216 static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
222 * Set the state of cxl_nvdimm devices to unbound / idle before
223 * nvdimm_bus_unregister() rips the nvdimm objects out from
226 bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver);
227 nvdimm_bus_unregister(nvdimm_bus);
230 static void cxl_nvb_update_state(struct work_struct *work)
232 struct cxl_nvdimm_bridge *cxl_nvb =
233 container_of(work, typeof(*cxl_nvb), state_work);
234 struct nvdimm_bus *victim_bus = NULL;
235 bool release = false, rescan = false;
237 device_lock(&cxl_nvb->dev);
238 switch (cxl_nvb->state) {
240 if (!online_nvdimm_bus(cxl_nvb)) {
241 dev_err(&cxl_nvb->dev,
242 "failed to establish nvdimm bus\n");
247 case CXL_NVB_OFFLINE:
249 victim_bus = cxl_nvb->nvdimm_bus;
250 cxl_nvb->nvdimm_bus = NULL;
255 device_unlock(&cxl_nvb->dev);
258 device_release_driver(&cxl_nvb->dev);
260 int rc = bus_rescan_devices(&cxl_bus_type);
262 dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
264 offline_nvdimm_bus(victim_bus);
266 put_device(&cxl_nvb->dev);
269 static void cxl_nvdimm_bridge_remove(struct device *dev)
271 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
273 if (cxl_nvb->state == CXL_NVB_ONLINE)
274 cxl_nvb->state = CXL_NVB_OFFLINE;
275 if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
276 get_device(&cxl_nvb->dev);
279 static int cxl_nvdimm_bridge_probe(struct device *dev)
281 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
283 if (cxl_nvb->state == CXL_NVB_DEAD)
286 if (cxl_nvb->state == CXL_NVB_NEW) {
287 cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
288 .provider_name = "CXL",
289 .module = THIS_MODULE,
290 .ndctl = cxl_pmem_ctl,
293 INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state);
296 cxl_nvb->state = CXL_NVB_ONLINE;
297 if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
298 get_device(&cxl_nvb->dev);
303 static struct cxl_driver cxl_nvdimm_bridge_driver = {
304 .name = "cxl_nvdimm_bridge",
305 .probe = cxl_nvdimm_bridge_probe,
306 .remove = cxl_nvdimm_bridge_remove,
307 .id = CXL_DEVICE_NVDIMM_BRIDGE,
310 static __init int cxl_pmem_init(void)
314 set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds);
315 set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
316 set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
318 cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
322 rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
326 rc = cxl_driver_register(&cxl_nvdimm_driver);
333 cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
335 destroy_workqueue(cxl_pmem_wq);
339 static __exit void cxl_pmem_exit(void)
341 cxl_driver_unregister(&cxl_nvdimm_driver);
342 cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
343 destroy_workqueue(cxl_pmem_wq);
346 MODULE_LICENSE("GPL v2");
347 module_init(cxl_pmem_init);
348 module_exit(cxl_pmem_exit);
349 MODULE_IMPORT_NS(CXL);
350 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
351 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);