1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/device.h>
5 #include <linux/module.h>
7 #include <linux/slab.h>
15 * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
16 * point for cross-device interleave coordination through cxl ports.
19 static DEFINE_IDA(cxl_port_ida);
21 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
24 return sysfs_emit(buf, "%s\n", dev->type->name);
26 static DEVICE_ATTR_RO(devtype);
28 static struct attribute *cxl_base_attributes[] = {
29 &dev_attr_devtype.attr,
33 static struct attribute_group cxl_base_attribute_group = {
34 .attrs = cxl_base_attributes,
37 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
40 struct cxl_decoder *cxld = to_cxl_decoder(dev);
42 return sysfs_emit(buf, "%#llx\n", cxld->range.start);
44 static DEVICE_ATTR_RO(start);
46 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
49 struct cxl_decoder *cxld = to_cxl_decoder(dev);
51 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
53 static DEVICE_ATTR_RO(size);
55 #define CXL_DECODER_FLAG_ATTR(name, flag) \
56 static ssize_t name##_show(struct device *dev, \
57 struct device_attribute *attr, char *buf) \
59 struct cxl_decoder *cxld = to_cxl_decoder(dev); \
61 return sysfs_emit(buf, "%s\n", \
62 (cxld->flags & (flag)) ? "1" : "0"); \
64 static DEVICE_ATTR_RO(name)
66 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
67 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
68 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
69 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
70 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
72 static ssize_t target_type_show(struct device *dev,
73 struct device_attribute *attr, char *buf)
75 struct cxl_decoder *cxld = to_cxl_decoder(dev);
77 switch (cxld->target_type) {
78 case CXL_DECODER_ACCELERATOR:
79 return sysfs_emit(buf, "accelerator\n");
80 case CXL_DECODER_EXPANDER:
81 return sysfs_emit(buf, "expander\n");
85 static DEVICE_ATTR_RO(target_type);
87 static ssize_t target_list_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
90 struct cxl_decoder *cxld = to_cxl_decoder(dev);
95 for (i = 0; i < cxld->interleave_ways; i++) {
96 struct cxl_dport *dport = cxld->target[i];
97 struct cxl_dport *next = NULL;
102 if (i + 1 < cxld->interleave_ways)
103 next = cxld->target[i + 1];
104 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
115 rc = sysfs_emit_at(buf, offset, "\n");
121 static DEVICE_ATTR_RO(target_list);
123 static struct attribute *cxl_decoder_base_attrs[] = {
124 &dev_attr_start.attr,
126 &dev_attr_locked.attr,
127 &dev_attr_target_list.attr,
131 static struct attribute_group cxl_decoder_base_attribute_group = {
132 .attrs = cxl_decoder_base_attrs,
135 static struct attribute *cxl_decoder_root_attrs[] = {
136 &dev_attr_cap_pmem.attr,
137 &dev_attr_cap_ram.attr,
138 &dev_attr_cap_type2.attr,
139 &dev_attr_cap_type3.attr,
143 static struct attribute_group cxl_decoder_root_attribute_group = {
144 .attrs = cxl_decoder_root_attrs,
147 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
148 &cxl_decoder_root_attribute_group,
149 &cxl_decoder_base_attribute_group,
150 &cxl_base_attribute_group,
154 static struct attribute *cxl_decoder_switch_attrs[] = {
155 &dev_attr_target_type.attr,
159 static struct attribute_group cxl_decoder_switch_attribute_group = {
160 .attrs = cxl_decoder_switch_attrs,
163 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
164 &cxl_decoder_switch_attribute_group,
165 &cxl_decoder_base_attribute_group,
166 &cxl_base_attribute_group,
170 static void cxl_decoder_release(struct device *dev)
172 struct cxl_decoder *cxld = to_cxl_decoder(dev);
173 struct cxl_port *port = to_cxl_port(dev->parent);
175 ida_free(&port->decoder_ida, cxld->id);
179 static const struct device_type cxl_decoder_switch_type = {
180 .name = "cxl_decoder_switch",
181 .release = cxl_decoder_release,
182 .groups = cxl_decoder_switch_attribute_groups,
185 static const struct device_type cxl_decoder_root_type = {
186 .name = "cxl_decoder_root",
187 .release = cxl_decoder_release,
188 .groups = cxl_decoder_root_attribute_groups,
191 bool is_root_decoder(struct device *dev)
193 return dev->type == &cxl_decoder_root_type;
195 EXPORT_SYMBOL_GPL(is_root_decoder);
197 struct cxl_decoder *to_cxl_decoder(struct device *dev)
199 if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
200 "not a cxl_decoder device\n"))
202 return container_of(dev, struct cxl_decoder, dev);
204 EXPORT_SYMBOL_GPL(to_cxl_decoder);
206 static void cxl_dport_release(struct cxl_dport *dport)
208 list_del(&dport->list);
209 put_device(dport->dport);
213 static void cxl_port_release(struct device *dev)
215 struct cxl_port *port = to_cxl_port(dev);
216 struct cxl_dport *dport, *_d;
219 list_for_each_entry_safe(dport, _d, &port->dports, list)
220 cxl_dport_release(dport);
222 ida_free(&cxl_port_ida, port->id);
226 static const struct attribute_group *cxl_port_attribute_groups[] = {
227 &cxl_base_attribute_group,
231 static const struct device_type cxl_port_type = {
233 .release = cxl_port_release,
234 .groups = cxl_port_attribute_groups,
237 struct cxl_port *to_cxl_port(struct device *dev)
239 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
240 "not a cxl_port device\n"))
242 return container_of(dev, struct cxl_port, dev);
245 static void unregister_port(void *_port)
247 struct cxl_port *port = _port;
248 struct cxl_dport *dport;
250 device_lock(&port->dev);
251 list_for_each_entry(dport, &port->dports, list) {
252 char link_name[CXL_TARGET_STRLEN];
254 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
255 dport->port_id) >= CXL_TARGET_STRLEN)
257 sysfs_remove_link(&port->dev.kobj, link_name);
259 device_unlock(&port->dev);
260 device_unregister(&port->dev);
263 static void cxl_unlink_uport(void *_port)
265 struct cxl_port *port = _port;
267 sysfs_remove_link(&port->dev.kobj, "uport");
270 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
274 rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
277 return devm_add_action_or_reset(host, cxl_unlink_uport, port);
280 static struct cxl_port *cxl_port_alloc(struct device *uport,
281 resource_size_t component_reg_phys,
282 struct cxl_port *parent_port)
284 struct cxl_port *port;
288 port = kzalloc(sizeof(*port), GFP_KERNEL);
290 return ERR_PTR(-ENOMEM);
292 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
298 * The top-level cxl_port "cxl_root" does not have a cxl_port as
299 * its parent and it does not have any corresponding component
300 * registers as its decode is described by a fixed platform
305 dev->parent = &parent_port->dev;
310 port->component_reg_phys = component_reg_phys;
311 ida_init(&port->decoder_ida);
312 INIT_LIST_HEAD(&port->dports);
314 device_initialize(dev);
315 device_set_pm_not_required(dev);
316 dev->bus = &cxl_bus_type;
317 dev->type = &cxl_port_type;
327 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
328 * @host: host device for devm operations
329 * @uport: "physical" device implementing this upstream port
330 * @component_reg_phys: (optional) for configurable cxl_port instances
331 * @parent_port: next hop up in the CXL memory decode hierarchy
333 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
334 resource_size_t component_reg_phys,
335 struct cxl_port *parent_port)
337 struct cxl_port *port;
341 port = cxl_port_alloc(uport, component_reg_phys, parent_port);
347 rc = dev_set_name(dev, "port%d", port->id);
349 rc = dev_set_name(dev, "root%d", port->id);
353 rc = device_add(dev);
357 rc = devm_add_action_or_reset(host, unregister_port, port);
361 rc = devm_cxl_link_uport(host, port);
371 EXPORT_SYMBOL_GPL(devm_cxl_add_port);
373 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
375 struct cxl_dport *dport;
377 device_lock_assert(&port->dev);
378 list_for_each_entry (dport, &port->dports, list)
379 if (dport->port_id == id)
384 static int add_dport(struct cxl_port *port, struct cxl_dport *new)
386 struct cxl_dport *dup;
388 device_lock(&port->dev);
389 dup = find_dport(port, new->port_id);
392 "unable to add dport%d-%s non-unique port id (%s)\n",
393 new->port_id, dev_name(new->dport),
394 dev_name(dup->dport));
396 list_add_tail(&new->list, &port->dports);
397 device_unlock(&port->dev);
399 return dup ? -EEXIST : 0;
403 * cxl_add_dport - append downstream port data to a cxl_port
404 * @port: the cxl_port that references this dport
405 * @dport_dev: firmware or PCI device representing the dport
406 * @port_id: identifier for this dport in a decoder's target list
407 * @component_reg_phys: optional location of CXL component registers
409 * Note that all allocations and links are undone by cxl_port deletion
412 int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
413 resource_size_t component_reg_phys)
415 char link_name[CXL_TARGET_STRLEN];
416 struct cxl_dport *dport;
419 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
423 dport = kzalloc(sizeof(*dport), GFP_KERNEL);
427 INIT_LIST_HEAD(&dport->list);
428 dport->dport = get_device(dport_dev);
429 dport->port_id = port_id;
430 dport->component_reg_phys = component_reg_phys;
433 rc = add_dport(port, dport);
437 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
443 cxl_dport_release(dport);
446 EXPORT_SYMBOL_GPL(cxl_add_dport);
448 static struct cxl_decoder *
449 cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
450 resource_size_t len, int interleave_ways,
451 int interleave_granularity, enum cxl_decoder_type type,
454 struct cxl_decoder *cxld;
458 if (interleave_ways < 1)
459 return ERR_PTR(-EINVAL);
461 device_lock(&port->dev);
462 if (list_empty(&port->dports))
464 device_unlock(&port->dev);
468 cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
470 return ERR_PTR(-ENOMEM);
472 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
476 *cxld = (struct cxl_decoder) {
480 .end = base + len - 1,
483 .interleave_ways = interleave_ways,
484 .interleave_granularity = interleave_granularity,
488 /* handle implied target_list */
489 if (interleave_ways == 1)
491 list_first_entry(&port->dports, struct cxl_dport, list);
493 device_initialize(dev);
494 device_set_pm_not_required(dev);
495 dev->parent = &port->dev;
496 dev->bus = &cxl_bus_type;
498 /* root ports do not have a cxl_port_type parent */
499 if (port->dev.parent->type == &cxl_port_type)
500 dev->type = &cxl_decoder_switch_type;
502 dev->type = &cxl_decoder_root_type;
510 static void unregister_dev(void *dev)
512 device_unregister(dev);
516 devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
517 resource_size_t base, resource_size_t len,
518 int interleave_ways, int interleave_granularity,
519 enum cxl_decoder_type type, unsigned long flags)
521 struct cxl_decoder *cxld;
525 cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
526 interleave_granularity, type, flags);
531 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
535 rc = device_add(dev);
539 rc = devm_add_action_or_reset(host, unregister_dev, dev);
548 EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
551 * cxl_probe_component_regs() - Detect CXL Component register blocks
552 * @dev: Host device of the @base mapping
553 * @base: Mapping containing the HDM Decoder Capability Header
554 * @map: Map object describing the register block information found
556 * See CXL 2.0 8.2.4 Component Register Layout and Definition
557 * See CXL 2.0 8.2.5.5 CXL Device Register Interface
559 * Probe for component register information and return it in map object.
561 void cxl_probe_component_regs(struct device *dev, void __iomem *base,
562 struct cxl_component_reg_map *map)
567 *map = (struct cxl_component_reg_map) { 0 };
570 * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
571 * CXL 2.0 8.2.4 Table 141.
573 base += CXL_CM_OFFSET;
575 cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
577 if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
579 "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
583 /* It's assumed that future versions will be backward compatible */
584 cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
586 for (cap = 1; cap <= cap_count; cap++) {
587 void __iomem *register_block;
593 hdr = readl(base + cap * 0x4);
595 cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
596 offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
597 register_block = base + offset;
600 case CXL_CM_CAP_CAP_ID_HDM:
601 dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
604 hdr = readl(register_block);
606 decoder_cnt = cxl_hdm_decoder_count(hdr);
607 length = 0x20 * decoder_cnt + 0x10;
609 map->hdm_decoder.valid = true;
610 map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
611 map->hdm_decoder.size = length;
614 dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
620 EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
622 static void cxl_nvdimm_bridge_release(struct device *dev)
624 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
629 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
630 &cxl_base_attribute_group,
634 static const struct device_type cxl_nvdimm_bridge_type = {
635 .name = "cxl_nvdimm_bridge",
636 .release = cxl_nvdimm_bridge_release,
637 .groups = cxl_nvdimm_bridge_attribute_groups,
640 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
642 if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
643 "not a cxl_nvdimm_bridge device\n"))
645 return container_of(dev, struct cxl_nvdimm_bridge, dev);
647 EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
649 static struct cxl_nvdimm_bridge *
650 cxl_nvdimm_bridge_alloc(struct cxl_port *port)
652 struct cxl_nvdimm_bridge *cxl_nvb;
655 cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
657 return ERR_PTR(-ENOMEM);
660 cxl_nvb->port = port;
661 cxl_nvb->state = CXL_NVB_NEW;
662 device_initialize(dev);
663 device_set_pm_not_required(dev);
664 dev->parent = &port->dev;
665 dev->bus = &cxl_bus_type;
666 dev->type = &cxl_nvdimm_bridge_type;
671 static void unregister_nvb(void *_cxl_nvb)
673 struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
677 * If the bridge was ever activated then there might be in-flight state
678 * work to flush. Once the state has been changed to 'dead' then no new
679 * work can be queued by user-triggered bind.
681 device_lock(&cxl_nvb->dev);
682 flush = cxl_nvb->state != CXL_NVB_NEW;
683 cxl_nvb->state = CXL_NVB_DEAD;
684 device_unlock(&cxl_nvb->dev);
687 * Even though the device core will trigger device_release_driver()
688 * before the unregister, it does not know about the fact that
689 * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
690 * release not and flush it before tearing down the nvdimm device
693 device_release_driver(&cxl_nvb->dev);
695 flush_work(&cxl_nvb->state_work);
696 device_unregister(&cxl_nvb->dev);
699 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
700 struct cxl_port *port)
702 struct cxl_nvdimm_bridge *cxl_nvb;
706 if (!IS_ENABLED(CONFIG_CXL_PMEM))
707 return ERR_PTR(-ENXIO);
709 cxl_nvb = cxl_nvdimm_bridge_alloc(port);
714 rc = dev_set_name(dev, "nvdimm-bridge");
718 rc = device_add(dev);
722 rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
732 EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
734 static void cxl_nvdimm_release(struct device *dev)
736 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
741 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
742 &cxl_base_attribute_group,
746 static const struct device_type cxl_nvdimm_type = {
747 .name = "cxl_nvdimm",
748 .release = cxl_nvdimm_release,
749 .groups = cxl_nvdimm_attribute_groups,
752 bool is_cxl_nvdimm(struct device *dev)
754 return dev->type == &cxl_nvdimm_type;
756 EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
758 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
760 if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
761 "not a cxl_nvdimm device\n"))
763 return container_of(dev, struct cxl_nvdimm, dev);
765 EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
767 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
769 struct cxl_nvdimm *cxl_nvd;
772 cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
774 return ERR_PTR(-ENOMEM);
777 cxl_nvd->cxlmd = cxlmd;
778 device_initialize(dev);
779 device_set_pm_not_required(dev);
780 dev->parent = &cxlmd->dev;
781 dev->bus = &cxl_bus_type;
782 dev->type = &cxl_nvdimm_type;
787 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
789 struct cxl_nvdimm *cxl_nvd;
793 cxl_nvd = cxl_nvdimm_alloc(cxlmd);
795 return PTR_ERR(cxl_nvd);
798 rc = dev_set_name(dev, "pmem%d", cxlmd->id);
802 rc = device_add(dev);
806 dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
809 return devm_add_action_or_reset(host, unregister_dev, dev);
815 EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
818 * cxl_probe_device_regs() - Detect CXL Device register blocks
819 * @dev: Host device of the @base mapping
820 * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
821 * @map: Map object describing the register block information found
823 * Probe for device register information and return it in map object.
825 void cxl_probe_device_regs(struct device *dev, void __iomem *base,
826 struct cxl_device_reg_map *map)
831 *map = (struct cxl_device_reg_map){ 0 };
833 cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
834 if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
835 CXLDEV_CAP_ARRAY_CAP_ID)
838 cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
840 for (cap = 1; cap <= cap_count; cap++) {
844 cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
845 readl(base + cap * 0x10));
846 offset = readl(base + cap * 0x10 + 0x4);
847 length = readl(base + cap * 0x10 + 0x8);
850 case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
851 dev_dbg(dev, "found Status capability (0x%x)\n", offset);
853 map->status.valid = true;
854 map->status.offset = offset;
855 map->status.size = length;
857 case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
858 dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
859 map->mbox.valid = true;
860 map->mbox.offset = offset;
861 map->mbox.size = length;
863 case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
864 dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
866 case CXLDEV_CAP_CAP_ID_MEMDEV:
867 dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
868 map->memdev.valid = true;
869 map->memdev.offset = offset;
870 map->memdev.size = length;
873 if (cap_id >= 0x8000)
874 dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
876 dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
881 EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
883 static void __iomem *devm_cxl_iomap_block(struct device *dev,
884 resource_size_t addr,
885 resource_size_t length)
887 void __iomem *ret_val;
888 struct resource *res;
890 res = devm_request_mem_region(dev, addr, length, dev_name(dev));
892 resource_size_t end = addr + length - 1;
894 dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
898 ret_val = devm_ioremap(dev, addr, length);
900 dev_err(dev, "Failed to map region %pr\n", res);
905 int cxl_map_component_regs(struct pci_dev *pdev,
906 struct cxl_component_regs *regs,
907 struct cxl_register_map *map)
909 struct device *dev = &pdev->dev;
910 resource_size_t phys_addr;
911 resource_size_t length;
913 phys_addr = pci_resource_start(pdev, map->barno);
914 phys_addr += map->block_offset;
916 phys_addr += map->component_map.hdm_decoder.offset;
917 length = map->component_map.hdm_decoder.size;
918 regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
919 if (!regs->hdm_decoder)
924 EXPORT_SYMBOL_GPL(cxl_map_component_regs);
926 int cxl_map_device_regs(struct pci_dev *pdev,
927 struct cxl_device_regs *regs,
928 struct cxl_register_map *map)
930 struct device *dev = &pdev->dev;
931 resource_size_t phys_addr;
933 phys_addr = pci_resource_start(pdev, map->barno);
934 phys_addr += map->block_offset;
936 if (map->device_map.status.valid) {
937 resource_size_t addr;
938 resource_size_t length;
940 addr = phys_addr + map->device_map.status.offset;
941 length = map->device_map.status.size;
942 regs->status = devm_cxl_iomap_block(dev, addr, length);
947 if (map->device_map.mbox.valid) {
948 resource_size_t addr;
949 resource_size_t length;
951 addr = phys_addr + map->device_map.mbox.offset;
952 length = map->device_map.mbox.size;
953 regs->mbox = devm_cxl_iomap_block(dev, addr, length);
958 if (map->device_map.memdev.valid) {
959 resource_size_t addr;
960 resource_size_t length;
962 addr = phys_addr + map->device_map.memdev.offset;
963 length = map->device_map.memdev.size;
964 regs->memdev = devm_cxl_iomap_block(dev, addr, length);
971 EXPORT_SYMBOL_GPL(cxl_map_device_regs);
974 * __cxl_driver_register - register a driver for the cxl bus
975 * @cxl_drv: cxl driver structure to attach
976 * @owner: owning module/driver
977 * @modname: KBUILD_MODNAME for parent driver
979 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
982 if (!cxl_drv->probe) {
983 pr_debug("%s ->probe() must be specified\n", modname);
987 if (!cxl_drv->name) {
988 pr_debug("%s ->name must be specified\n", modname);
993 pr_debug("%s ->id must be specified\n", modname);
997 cxl_drv->drv.bus = &cxl_bus_type;
998 cxl_drv->drv.owner = owner;
999 cxl_drv->drv.mod_name = modname;
1000 cxl_drv->drv.name = cxl_drv->name;
1002 return driver_register(&cxl_drv->drv);
1004 EXPORT_SYMBOL_GPL(__cxl_driver_register);
1006 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1008 driver_unregister(&cxl_drv->drv);
1010 EXPORT_SYMBOL_GPL(cxl_driver_unregister);
1012 static int cxl_device_id(struct device *dev)
1014 if (dev->type == &cxl_nvdimm_bridge_type)
1015 return CXL_DEVICE_NVDIMM_BRIDGE;
1016 if (dev->type == &cxl_nvdimm_type)
1017 return CXL_DEVICE_NVDIMM;
1021 static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
1023 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1024 cxl_device_id(dev));
1027 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1029 return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1032 static int cxl_bus_probe(struct device *dev)
1034 return to_cxl_drv(dev->driver)->probe(dev);
1037 static int cxl_bus_remove(struct device *dev)
1039 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1041 if (cxl_drv->remove)
1042 cxl_drv->remove(dev);
1046 struct bus_type cxl_bus_type = {
1048 .uevent = cxl_bus_uevent,
1049 .match = cxl_bus_match,
1050 .probe = cxl_bus_probe,
1051 .remove = cxl_bus_remove,
1053 EXPORT_SYMBOL_GPL(cxl_bus_type);
1055 static __init int cxl_core_init(void)
1057 return bus_register(&cxl_bus_type);
1060 static void cxl_core_exit(void)
1062 bus_unregister(&cxl_bus_type);
1065 module_init(cxl_core_init);
1066 module_exit(cxl_core_exit);
1067 MODULE_LICENSE("GPL v2");