1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright(c) 2020 Intel Corporation. */
7 #include <linux/libnvdimm.h>
8 #include <linux/bitfield.h>
9 #include <linux/bitops.h>
15 * The CXL core objects like ports, decoders, and regions are shared
16 * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers
17 * (port-driver, region-driver, nvdimm object-drivers... etc).
20 /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/
21 #define CXL_CM_OFFSET 0x1000
22 #define CXL_CM_CAP_HDR_OFFSET 0x0
23 #define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0)
24 #define CM_CAP_HDR_CAP_ID 1
25 #define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16)
26 #define CM_CAP_HDR_CAP_VERSION 1
27 #define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20)
28 #define CM_CAP_HDR_CACHE_MEM_VERSION 1
29 #define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24)
30 #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20)
32 #define CXL_CM_CAP_CAP_ID_HDM 0x5
33 #define CXL_CM_CAP_CAP_HDM_VERSION 1
35 /* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */
36 #define CXL_HDM_DECODER_CAP_OFFSET 0x0
37 #define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0)
38 #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
39 #define CXL_HDM_DECODER0_BASE_LOW_OFFSET 0x10
40 #define CXL_HDM_DECODER0_BASE_HIGH_OFFSET 0x14
41 #define CXL_HDM_DECODER0_SIZE_LOW_OFFSET 0x18
42 #define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET 0x1c
43 #define CXL_HDM_DECODER0_CTRL_OFFSET 0x20
45 static inline int cxl_hdm_decoder_count(u32 cap_hdr)
47 int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr);
49 return val ? val * 2 : 1;
52 /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
53 #define CXLDEV_CAP_ARRAY_OFFSET 0x0
54 #define CXLDEV_CAP_ARRAY_CAP_ID 0
55 #define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0)
56 #define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32)
57 /* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */
58 #define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0)
59 /* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */
60 #define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1
61 #define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
62 #define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
63 #define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
65 /* CXL 2.0 8.2.8.4 Mailbox Registers */
66 #define CXLDEV_MBOX_CAPS_OFFSET 0x00
67 #define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
68 #define CXLDEV_MBOX_CTRL_OFFSET 0x04
69 #define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
70 #define CXLDEV_MBOX_CMD_OFFSET 0x08
71 #define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
72 #define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
73 #define CXLDEV_MBOX_STATUS_OFFSET 0x10
74 #define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
75 #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
76 #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
79 * Using struct_group() allows for per register-block-type helper routines,
80 * without requiring block-type agnostic code to include the prefix.
84 * Common set of CXL Component register block base pointers
85 * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
87 struct_group_tagged(cxl_component_regs, component,
88 void __iomem *hdm_decoder;
91 * Common set of CXL Device register block base pointers
92 * @status: CXL 2.0 8.2.8.3 Device Status Registers
93 * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
94 * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
96 struct_group_tagged(cxl_device_regs, device_regs,
97 void __iomem *status, *mbox, *memdev;
103 unsigned long offset;
107 struct cxl_component_reg_map {
108 struct cxl_reg_map hdm_decoder;
111 struct cxl_device_reg_map {
112 struct cxl_reg_map status;
113 struct cxl_reg_map mbox;
114 struct cxl_reg_map memdev;
117 struct cxl_register_map {
122 struct cxl_component_reg_map component_map;
123 struct cxl_device_reg_map device_map;
127 void cxl_probe_component_regs(struct device *dev, void __iomem *base,
128 struct cxl_component_reg_map *map);
129 void cxl_probe_device_regs(struct device *dev, void __iomem *base,
130 struct cxl_device_reg_map *map);
131 int cxl_map_component_regs(struct pci_dev *pdev,
132 struct cxl_component_regs *regs,
133 struct cxl_register_map *map);
134 int cxl_map_device_regs(struct pci_dev *pdev,
135 struct cxl_device_regs *regs,
136 struct cxl_register_map *map);
138 #define CXL_RESOURCE_NONE ((resource_size_t) -1)
139 #define CXL_TARGET_STRLEN 20
142 * cxl_decoder flags that define the type of memory / devices this
143 * decoder supports as well as configuration lock status See "CXL 2.0
144 * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
146 #define CXL_DECODER_F_RAM BIT(0)
147 #define CXL_DECODER_F_PMEM BIT(1)
148 #define CXL_DECODER_F_TYPE2 BIT(2)
149 #define CXL_DECODER_F_TYPE3 BIT(3)
150 #define CXL_DECODER_F_LOCK BIT(4)
151 #define CXL_DECODER_F_MASK GENMASK(4, 0)
153 enum cxl_decoder_type {
154 CXL_DECODER_ACCELERATOR = 2,
155 CXL_DECODER_EXPANDER = 3,
159 * struct cxl_decoder - CXL address range decode configuration
160 * @dev: this decoder's device
161 * @id: kernel device name id
162 * @range: address range considered by this decoder
163 * @interleave_ways: number of cxl_dports in this decode
164 * @interleave_granularity: data stride per dport
165 * @target_type: accelerator vs expander (type2 vs type3) selector
166 * @flags: memory type capabilities and locking
167 * @target: active ordered target list in current decoder configuration
174 int interleave_granularity;
175 enum cxl_decoder_type target_type;
177 struct cxl_dport *target[];
181 enum cxl_nvdimm_brige_state {
188 struct cxl_nvdimm_bridge {
190 struct cxl_port *port;
191 struct nvdimm_bus *nvdimm_bus;
192 struct nvdimm_bus_descriptor nd_desc;
193 struct work_struct state_work;
194 enum cxl_nvdimm_brige_state state;
199 struct cxl_memdev *cxlmd;
200 struct nvdimm *nvdimm;
204 * struct cxl_port - logical collection of upstream port devices and
205 * downstream port devices to construct a CXL memory
207 * @dev: this port's device
208 * @uport: PCI or platform device implementing the upstream port capability
209 * @id: id for port device-name
210 * @dports: cxl_dport instances referenced by decoders
211 * @decoder_ida: allocator for decoder ids
212 * @component_reg_phys: component register capability base address (optional)
216 struct device *uport;
218 struct list_head dports;
219 struct ida decoder_ida;
220 resource_size_t component_reg_phys;
224 * struct cxl_dport - CXL downstream port
225 * @dport: PCI bridge or firmware device representing the downstream link
226 * @port_id: unique hardware identifier for dport in decoder target list
227 * @component_reg_phys: downstream port component registers
228 * @port: reference to cxl_port that contains this downstream port
229 * @list: node for a cxl_port's list of cxl_dport instances
232 struct device *dport;
234 resource_size_t component_reg_phys;
235 struct cxl_port *port;
236 struct list_head list;
239 struct cxl_port *to_cxl_port(struct device *dev);
240 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
241 resource_size_t component_reg_phys,
242 struct cxl_port *parent_port);
244 int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id,
245 resource_size_t component_reg_phys);
247 struct cxl_decoder *to_cxl_decoder(struct device *dev);
248 bool is_root_decoder(struct device *dev);
250 devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
251 resource_size_t base, resource_size_t len,
252 int interleave_ways, int interleave_granularity,
253 enum cxl_decoder_type type, unsigned long flags);
256 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
257 * single ported host-bridges need not publish a decoder capability when a
258 * passthrough decode can be assumed, i.e. all transactions that the uport sees
259 * are claimed and passed to the single dport. Default the range a 0-base
260 * 0-length until the first CXL region is activated.
262 static inline struct cxl_decoder *
263 devm_cxl_add_passthrough_decoder(struct device *host, struct cxl_port *port)
265 return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE,
266 CXL_DECODER_EXPANDER, 0);
269 extern struct bus_type cxl_bus_type;
273 int (*probe)(struct device *dev);
274 void (*remove)(struct device *dev);
275 struct device_driver drv;
279 static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv)
281 return container_of(drv, struct cxl_driver, drv);
284 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
285 const char *modname);
286 #define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME)
287 void cxl_driver_unregister(struct cxl_driver *cxl_drv);
289 #define CXL_DEVICE_NVDIMM_BRIDGE 1
290 #define CXL_DEVICE_NVDIMM 2
292 #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
293 #define CXL_MODALIAS_FMT "cxl:t%d"
295 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
296 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
297 struct cxl_port *port);
298 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
299 bool is_cxl_nvdimm(struct device *dev);
300 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
301 #endif /* __CXL_H__ */