2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/pci_regs.h>
11 #include <linux/pci_ids.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
17 #include <linux/pci.h>
19 #include <linux/delay.h>
21 #include <asm/msi_bitmap.h>
22 #include <asm/pnv-pci.h>
30 #define CXL_PCI_VSEC_ID 0x1280
31 #define CXL_VSEC_MIN_SIZE 0x80
33 #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
35 pci_read_config_word(dev, vsec + 0x6, dest); \
38 #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
39 pci_read_config_byte(dev, vsec + 0x8, dest)
41 #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
42 pci_read_config_byte(dev, vsec + 0x9, dest)
43 #define CXL_STATUS_SECOND_PORT 0x80
44 #define CXL_STATUS_MSI_X_FULL 0x40
45 #define CXL_STATUS_MSI_X_SINGLE 0x20
46 #define CXL_STATUS_FLASH_RW 0x08
47 #define CXL_STATUS_FLASH_RO 0x04
48 #define CXL_STATUS_LOADABLE_AFU 0x02
49 #define CXL_STATUS_LOADABLE_PSL 0x01
50 /* If we see these features we won't try to use the card */
51 #define CXL_UNSUPPORTED_FEATURES \
52 (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
54 #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
55 pci_read_config_byte(dev, vsec + 0xa, dest)
56 #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
57 pci_write_config_byte(dev, vsec + 0xa, val)
58 #define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \
59 pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val)
60 #define CXL_VSEC_PROTOCOL_MASK 0xe0
61 #define CXL_VSEC_PROTOCOL_1024TB 0x80
62 #define CXL_VSEC_PROTOCOL_512TB 0x40
63 #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */
64 #define CXL_VSEC_PROTOCOL_ENABLE 0x01
66 #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
67 pci_read_config_word(dev, vsec + 0xc, dest)
68 #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
69 pci_read_config_byte(dev, vsec + 0xe, dest)
70 #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
71 pci_read_config_byte(dev, vsec + 0xf, dest)
72 #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
73 pci_read_config_word(dev, vsec + 0x10, dest)
75 #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
76 pci_read_config_byte(dev, vsec + 0x13, dest)
77 #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
78 pci_write_config_byte(dev, vsec + 0x13, val)
79 #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
80 #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
81 #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
83 #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
84 pci_read_config_dword(dev, vsec + 0x20, dest)
85 #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
86 pci_read_config_dword(dev, vsec + 0x24, dest)
87 #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
88 pci_read_config_dword(dev, vsec + 0x28, dest)
89 #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
90 pci_read_config_dword(dev, vsec + 0x2c, dest)
93 /* This works a little different than the p1/p2 register accesses to make it
94 * easier to pull out individual fields */
95 #define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
96 #define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
97 #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
98 #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
100 #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
101 #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
102 #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
103 #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
104 #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
105 #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
106 #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
107 #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
108 #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
109 #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
110 #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
111 #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
112 #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
113 #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
114 #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
115 #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
116 #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
117 #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
118 #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
119 #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
121 static const struct pci_device_id cxl_pci_tbl[] = {
122 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
123 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
124 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
125 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
126 { PCI_DEVICE_CLASS(0x120000, ~0), },
130 MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
134 * Mostly using these wrappers to avoid confusion:
135 * priv 1 is BAR2, while priv 2 is BAR0
137 static inline resource_size_t p1_base(struct pci_dev *dev)
139 return pci_resource_start(dev, 2);
142 static inline resource_size_t p1_size(struct pci_dev *dev)
144 return pci_resource_len(dev, 2);
147 static inline resource_size_t p2_base(struct pci_dev *dev)
149 return pci_resource_start(dev, 0);
152 static inline resource_size_t p2_size(struct pci_dev *dev)
154 return pci_resource_len(dev, 0);
157 static int find_cxl_vsec(struct pci_dev *dev)
162 while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
163 pci_read_config_word(dev, vsec + 0x4, &val);
164 if (val == CXL_PCI_VSEC_ID)
171 static void dump_cxl_config_space(struct pci_dev *dev)
176 dev_info(&dev->dev, "dump_cxl_config_space\n");
178 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
179 dev_info(&dev->dev, "BAR0: %#.8x\n", val);
180 pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
181 dev_info(&dev->dev, "BAR1: %#.8x\n", val);
182 pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
183 dev_info(&dev->dev, "BAR2: %#.8x\n", val);
184 pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
185 dev_info(&dev->dev, "BAR3: %#.8x\n", val);
186 pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
187 dev_info(&dev->dev, "BAR4: %#.8x\n", val);
188 pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
189 dev_info(&dev->dev, "BAR5: %#.8x\n", val);
191 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
192 p1_base(dev), p1_size(dev));
193 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
194 p2_base(dev), p2_size(dev));
195 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
196 pci_resource_start(dev, 4), pci_resource_len(dev, 4));
198 if (!(vsec = find_cxl_vsec(dev)))
201 #define show_reg(name, what) \
202 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
204 pci_read_config_dword(dev, vsec + 0x0, &val);
205 show_reg("Cap ID", (val >> 0) & 0xffff);
206 show_reg("Cap Ver", (val >> 16) & 0xf);
207 show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
208 pci_read_config_dword(dev, vsec + 0x4, &val);
209 show_reg("VSEC ID", (val >> 0) & 0xffff);
210 show_reg("VSEC Rev", (val >> 16) & 0xf);
211 show_reg("VSEC Length", (val >> 20) & 0xfff);
212 pci_read_config_dword(dev, vsec + 0x8, &val);
213 show_reg("Num AFUs", (val >> 0) & 0xff);
214 show_reg("Status", (val >> 8) & 0xff);
215 show_reg("Mode Control", (val >> 16) & 0xff);
216 show_reg("Reserved", (val >> 24) & 0xff);
217 pci_read_config_dword(dev, vsec + 0xc, &val);
218 show_reg("PSL Rev", (val >> 0) & 0xffff);
219 show_reg("CAIA Ver", (val >> 16) & 0xffff);
220 pci_read_config_dword(dev, vsec + 0x10, &val);
221 show_reg("Base Image Rev", (val >> 0) & 0xffff);
222 show_reg("Reserved", (val >> 16) & 0x0fff);
223 show_reg("Image Control", (val >> 28) & 0x3);
224 show_reg("Reserved", (val >> 30) & 0x1);
225 show_reg("Image Loaded", (val >> 31) & 0x1);
227 pci_read_config_dword(dev, vsec + 0x14, &val);
228 show_reg("Reserved", val);
229 pci_read_config_dword(dev, vsec + 0x18, &val);
230 show_reg("Reserved", val);
231 pci_read_config_dword(dev, vsec + 0x1c, &val);
232 show_reg("Reserved", val);
234 pci_read_config_dword(dev, vsec + 0x20, &val);
235 show_reg("AFU Descriptor Offset", val);
236 pci_read_config_dword(dev, vsec + 0x24, &val);
237 show_reg("AFU Descriptor Size", val);
238 pci_read_config_dword(dev, vsec + 0x28, &val);
239 show_reg("Problem State Offset", val);
240 pci_read_config_dword(dev, vsec + 0x2c, &val);
241 show_reg("Problem State Size", val);
243 pci_read_config_dword(dev, vsec + 0x30, &val);
244 show_reg("Reserved", val);
245 pci_read_config_dword(dev, vsec + 0x34, &val);
246 show_reg("Reserved", val);
247 pci_read_config_dword(dev, vsec + 0x38, &val);
248 show_reg("Reserved", val);
249 pci_read_config_dword(dev, vsec + 0x3c, &val);
250 show_reg("Reserved", val);
252 pci_read_config_dword(dev, vsec + 0x40, &val);
253 show_reg("PSL Programming Port", val);
254 pci_read_config_dword(dev, vsec + 0x44, &val);
255 show_reg("PSL Programming Control", val);
257 pci_read_config_dword(dev, vsec + 0x48, &val);
258 show_reg("Reserved", val);
259 pci_read_config_dword(dev, vsec + 0x4c, &val);
260 show_reg("Reserved", val);
262 pci_read_config_dword(dev, vsec + 0x50, &val);
263 show_reg("Flash Address Register", val);
264 pci_read_config_dword(dev, vsec + 0x54, &val);
265 show_reg("Flash Size Register", val);
266 pci_read_config_dword(dev, vsec + 0x58, &val);
267 show_reg("Flash Status/Control Register", val);
268 pci_read_config_dword(dev, vsec + 0x58, &val);
269 show_reg("Flash Data Port", val);
274 static void dump_afu_descriptor(struct cxl_afu *afu)
276 u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
279 #define show_reg(name, what) \
280 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
282 val = AFUD_READ_INFO(afu);
283 show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
284 show_reg("num_of_processes", AFUD_NUM_PROCS(val));
285 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
286 show_reg("req_prog_mode", val & 0xffffULL);
287 afu_cr_num = AFUD_NUM_CRS(val);
289 val = AFUD_READ(afu, 0x8);
290 show_reg("Reserved", val);
291 val = AFUD_READ(afu, 0x10);
292 show_reg("Reserved", val);
293 val = AFUD_READ(afu, 0x18);
294 show_reg("Reserved", val);
296 val = AFUD_READ_CR(afu);
297 show_reg("Reserved", (val >> (63-7)) & 0xff);
298 show_reg("AFU_CR_len", AFUD_CR_LEN(val));
299 afu_cr_len = AFUD_CR_LEN(val) * 256;
301 val = AFUD_READ_CR_OFF(afu);
303 show_reg("AFU_CR_offset", val);
305 val = AFUD_READ_PPPSA(afu);
306 show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
307 show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
309 val = AFUD_READ_PPPSA_OFF(afu);
310 show_reg("PerProcessPSA_offset", val);
312 val = AFUD_READ_EB(afu);
313 show_reg("Reserved", (val >> (63-7)) & 0xff);
314 show_reg("AFU_EB_len", AFUD_EB_LEN(val));
316 val = AFUD_READ_EB_OFF(afu);
317 show_reg("AFU_EB_offset", val);
319 for (i = 0; i < afu_cr_num; i++) {
320 val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
321 show_reg("CR Vendor", val & 0xffff);
322 show_reg("CR Device", (val >> 16) & 0xffff);
327 #define CAPP_UNIT0_ID 0xBA
328 #define CAPP_UNIT1_ID 0XBE
330 static u64 get_capp_unit_id(struct device_node *np)
335 * For chips other than POWER8NVL, we only have CAPP 0,
336 * irrespective of which PHB is used.
338 if (!pvr_version_is(PVR_POWER8NVL))
339 return CAPP_UNIT0_ID;
342 * For POWER8NVL, assume CAPP 0 is attached to PHB0 and
343 * CAPP 1 is attached to PHB1.
345 if (of_property_read_u32(np, "ibm,phb-index", &phb_index))
349 return CAPP_UNIT0_ID;
352 return CAPP_UNIT1_ID;
357 static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id)
359 struct device_node *np;
362 if (!(np = pnv_pci_get_phb_node(dev)))
365 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
366 np = of_get_next_parent(np);
369 *chipid = be32_to_cpup(prop);
370 *capp_unit_id = get_capp_unit_id(np);
372 if (!*capp_unit_id) {
373 pr_err("cxl: invalid capp unit id\n");
380 static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
387 rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
391 psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
392 psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
393 /* Tell PSL where to route data to */
394 psl_dsnctl |= (chipid << (63-5));
395 psl_dsnctl |= (capp_unit_id << (63-13));
397 cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
398 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
399 /* snoop write mask */
400 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
402 cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
403 /* for debugging with trace arrays */
404 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
409 static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev)
416 rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
420 /* Tell XSL where to route data to */
421 xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5));
422 xsl_dsnctl |= (capp_unit_id << (63-13));
423 cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl);
429 #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
430 #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
431 /* For the PSL this is a multiple for 0 < n <= 7: */
432 #define PSL_2048_250MHZ_CYCLES 1
434 static void write_timebase_ctrl_psl(struct cxl *adapter)
436 cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
437 TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
441 #define TBSYNC_ENA (1ULL << 63)
442 /* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */
443 #define XSL_2000_CLOCKS 1
444 #define XSL_4000_CLOCKS 2
445 #define XSL_8000_CLOCKS 3
447 static void write_timebase_ctrl_xsl(struct cxl *adapter)
449 cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT,
452 TBSYNC_CNT(XSL_4000_CLOCKS));
455 static u64 timebase_read_psl(struct cxl *adapter)
457 return cxl_p1_read(adapter, CXL_PSL_Timebase);
460 static u64 timebase_read_xsl(struct cxl *adapter)
462 return cxl_p1_read(adapter, CXL_XSL_Timebase);
465 static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
469 unsigned int retry = 0;
470 struct device_node *np;
472 adapter->psl_timebase_synced = false;
474 if (!(np = pnv_pci_get_phb_node(dev)))
477 /* Do not fail when CAPP timebase sync is not supported by OPAL */
479 if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
481 dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
487 * Setup PSL Timebase Control and Status register
488 * with the recommended Timebase Sync Count value
490 adapter->native->sl_ops->write_timebase_ctrl(adapter);
492 /* Enable PSL Timebase */
493 cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
494 cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
496 /* Wait until CORE TB and PSL TB difference <= 16usecs */
500 dev_info(&dev->dev, "PSL timebase can't synchronize\n");
503 psl_tb = adapter->native->sl_ops->timebase_read(adapter);
504 delta = mftb() - psl_tb;
507 } while (tb_to_ns(delta) > 16000);
509 adapter->psl_timebase_synced = true;
513 static int init_implementation_afu_psl_regs(struct cxl_afu *afu)
515 /* read/write masks for this slice */
516 cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
517 /* APC read/write masks for this slice */
518 cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
519 /* for debugging with trace arrays */
520 cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
521 cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
526 int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
529 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
531 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
534 int cxl_update_image_control(struct cxl *adapter)
536 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
541 if (!(vsec = find_cxl_vsec(dev))) {
542 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
546 if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
547 dev_err(&dev->dev, "failed to read image state: %i\n", rc);
551 if (adapter->perst_loads_image)
552 image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
554 image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
556 if (adapter->perst_select_user)
557 image_state |= CXL_VSEC_PERST_SELECT_USER;
559 image_state &= ~CXL_VSEC_PERST_SELECT_USER;
561 if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
562 dev_err(&dev->dev, "failed to update image control: %i\n", rc);
569 int cxl_pci_alloc_one_irq(struct cxl *adapter)
571 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
573 return pnv_cxl_alloc_hwirqs(dev, 1);
576 void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
578 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
580 return pnv_cxl_release_hwirqs(dev, hwirq, 1);
583 int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
584 struct cxl *adapter, unsigned int num)
586 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
588 return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
591 void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
594 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
596 pnv_cxl_release_hwirq_ranges(irqs, dev);
599 static int setup_cxl_bars(struct pci_dev *dev)
601 /* Safety check in case we get backported to < 3.17 without M64 */
602 if ((p1_base(dev) < 0x100000000ULL) ||
603 (p2_base(dev) < 0x100000000ULL)) {
604 dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
609 * BAR 4/5 has a special meaning for CXL and must be programmed with a
610 * special value corresponding to the CXL protocol address range.
611 * For POWER 8 that means bits 48:49 must be set to 10
613 pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
614 pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
619 #ifdef CONFIG_CXL_BIMODAL
621 struct cxl_switch_work {
623 struct work_struct work;
628 static void switch_card_to_cxl(struct work_struct *work)
630 struct cxl_switch_work *switch_work =
631 container_of(work, struct cxl_switch_work, work);
632 struct pci_dev *dev = switch_work->dev;
633 struct pci_bus *bus = dev->bus;
634 struct pci_controller *hose = pci_bus_to_host(bus);
635 struct pci_dev *bridge;
636 struct pnv_php_slot *php_slot;
641 dev_info(&bus->dev, "cxl: Preparing for mode switch...\n");
642 bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev,
645 dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n");
649 php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge));
651 dev_err(&bus->dev, "cxl: Failed to find slot hotplug "
652 "information. You may need to upgrade "
653 "skiboot. Aborting.\n");
657 rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val);
659 dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc);
664 /* Release the reference obtained in cxl_check_and_switch_mode() */
667 dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n");
668 pci_lock_rescan_remove();
669 pci_hp_remove_devices(bridge->subordinate);
670 pci_unlock_rescan_remove();
672 /* Switch the CXL protocol on the card */
673 if (switch_work->mode == CXL_BIMODE_CXL) {
674 dev_info(&bus->dev, "cxl: Switching card to CXL mode\n");
675 val &= ~CXL_VSEC_PROTOCOL_MASK;
676 val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
677 rc = pnv_cxl_enable_phb_kernel_api(hose, true);
679 dev_err(&bus->dev, "cxl: Failed to enable kernel API"
680 " on real PHB, aborting\n");
684 dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n");
688 rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val);
690 dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc);
695 * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states
696 * we must wait 100ms after this mode switch before touching PCIe config
702 * Hot reset to cause the card to come back in cxl mode. A
703 * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support
704 * in skiboot, so we use a hot reset instead.
706 * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is
707 * guaranteed to sit directly under the root port, and setting the reset
708 * state on a device directly under the root port is equivalent to doing
709 * it on the root port iself.
711 dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n");
712 pci_set_pcie_reset_state(bridge, pcie_hot_reset);
713 pci_set_pcie_reset_state(bridge, pcie_deassert_reset);
715 dev_dbg(&bus->dev, "cxl: Offlining slot\n");
716 rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE);
718 dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc);
722 dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n");
723 rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE);
725 dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc);
729 pci_lock_rescan_remove();
730 pci_hp_add_devices(bridge->subordinate);
731 pci_unlock_rescan_remove();
733 dev_info(&bus->dev, "cxl: CAPI mode switch completed\n");
738 /* Release the reference obtained in cxl_check_and_switch_mode() */
744 int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec)
746 struct cxl_switch_work *work;
750 if (!cpu_has_feature(CPU_FTR_HVMODE))
754 vsec = find_cxl_vsec(dev);
756 dev_info(&dev->dev, "CXL VSEC not found\n");
761 rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
763 dev_err(&dev->dev, "Failed to read current mode control: %i", rc);
767 if (mode == CXL_BIMODE_PCI) {
768 if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
769 dev_info(&dev->dev, "Card is already in PCI mode\n");
773 * TODO: Before it's safe to switch the card back to PCI mode
774 * we need to disable the CAPP and make sure any cachelines the
775 * card holds have been flushed out. Needs skiboot support.
777 dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n");
781 if (val & CXL_VSEC_PROTOCOL_ENABLE) {
782 dev_info(&dev->dev, "Card is already in CXL mode\n");
786 dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread "
787 "to switch to CXL mode\n");
789 work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL);
797 INIT_WORK(&work->work, switch_card_to_cxl);
799 schedule_work(&work->work);
802 * We return a failure now to abort the driver init. Once the
803 * link has been cycled and the card is in cxl mode we will
804 * come back (possibly using the generic cxl driver), but
805 * return success as the card should then be in cxl mode.
807 * TODO: What if the card comes back in PCI mode even after
808 * the switch? Don't want to spin endlessly.
812 EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode);
814 #endif /* CONFIG_CXL_BIMODAL */
816 static int setup_cxl_protocol_area(struct pci_dev *dev)
820 int vsec = find_cxl_vsec(dev);
823 dev_info(&dev->dev, "CXL VSEC not found\n");
827 rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
829 dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc);
833 if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
834 dev_err(&dev->dev, "Card not in CAPI mode!\n");
838 if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) {
839 val &= ~CXL_VSEC_PROTOCOL_MASK;
840 val |= CXL_VSEC_PROTOCOL_256TB;
841 rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val);
843 dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc);
851 static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
853 u64 p1n_base, p2n_base, afu_desc;
854 const u64 p1n_size = 0x100;
855 const u64 p2n_size = 0x1000;
857 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
858 p2n_base = p2_base(dev) + (afu->slice * p2n_size);
859 afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
860 afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
862 if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
864 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
867 if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
873 iounmap(afu->p2n_mmio);
875 iounmap(afu->native->p1n_mmio);
877 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
881 static void pci_unmap_slice_regs(struct cxl_afu *afu)
884 iounmap(afu->p2n_mmio);
885 afu->p2n_mmio = NULL;
887 if (afu->native->p1n_mmio) {
888 iounmap(afu->native->p1n_mmio);
889 afu->native->p1n_mmio = NULL;
891 if (afu->native->afu_desc_mmio) {
892 iounmap(afu->native->afu_desc_mmio);
893 afu->native->afu_desc_mmio = NULL;
897 void cxl_pci_release_afu(struct device *dev)
899 struct cxl_afu *afu = to_cxl_afu(dev);
901 pr_devel("%s\n", __func__);
903 idr_destroy(&afu->contexts_idr);
904 cxl_release_spa(afu);
910 /* Expects AFU struct to have recently been zeroed out */
911 static int cxl_read_afu_descriptor(struct cxl_afu *afu)
915 val = AFUD_READ_INFO(afu);
916 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
917 afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
918 afu->crs_num = AFUD_NUM_CRS(val);
920 if (AFUD_AFU_DIRECTED(val))
921 afu->modes_supported |= CXL_MODE_DIRECTED;
922 if (AFUD_DEDICATED_PROCESS(val))
923 afu->modes_supported |= CXL_MODE_DEDICATED;
924 if (AFUD_TIME_SLICED(val))
925 afu->modes_supported |= CXL_MODE_TIME_SLICED;
927 val = AFUD_READ_PPPSA(afu);
928 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
929 afu->psa = AFUD_PPPSA_PSA(val);
930 if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
931 afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
933 val = AFUD_READ_CR(afu);
934 afu->crs_len = AFUD_CR_LEN(val) * 256;
935 afu->crs_offset = AFUD_READ_CR_OFF(afu);
938 /* eb_len is in multiple of 4K */
939 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
940 afu->eb_offset = AFUD_READ_EB_OFF(afu);
942 /* eb_off is 4K aligned so lower 12 bits are always zero */
943 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
945 "Invalid AFU error buffer offset %Lx\n",
948 "Ignoring AFU error buffer in the descriptor\n");
949 /* indicate that no afu buffer exists */
956 static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
961 if (afu->psa && afu->adapter->ps_size <
962 (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
963 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
967 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
968 dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
970 for (i = 0; i < afu->crs_num; i++) {
971 rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
972 if (rc || val == 0) {
973 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
978 if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
980 * We could also check this for the dedicated process model
981 * since the architecture indicates it should be set to 1, but
982 * in that case we ignore the value and I'd rather not risk
983 * breaking any existing dedicated process AFUs that left it as
984 * 0 (not that I'm aware of any). It is clearly an error for an
985 * AFU directed AFU to set this to 0, and would have previously
986 * triggered a bug resulting in the maximum not being enforced
987 * at all since idr_alloc treats 0 as no maximum.
989 dev_err(&afu->dev, "AFU does not support any processes\n");
996 static int sanitise_afu_regs(struct cxl_afu *afu)
1001 * Clear out any regs that contain either an IVTE or address or may be
1002 * waiting on an acknowledgement to try to be a bit safer as we bring
1005 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
1006 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
1007 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
1008 if (cxl_ops->afu_reset(afu))
1010 if (cxl_afu_disable(afu))
1012 if (cxl_psl_purge(afu))
1015 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
1016 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
1017 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
1018 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
1019 cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
1020 cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
1021 cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
1022 cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
1023 cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
1024 cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
1025 cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
1026 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1028 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
1029 if (reg & CXL_PSL_DSISR_TRANS)
1030 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1032 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1034 if (afu->adapter->native->sl_ops->register_serr_irq) {
1035 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1038 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
1039 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
1042 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1044 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
1045 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
1051 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
1054 * Called from sysfs and reads the afu error info buffer. The h/w only supports
1055 * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
1056 * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
1058 ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
1059 loff_t off, size_t count)
1061 loff_t aligned_start, aligned_end;
1062 size_t aligned_length;
1064 const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
1066 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
1069 /* calculate aligned read window */
1070 count = min((size_t)(afu->eb_len - off), count);
1071 aligned_start = round_down(off, 8);
1072 aligned_end = round_up(off + count, 8);
1073 aligned_length = aligned_end - aligned_start;
1075 /* max we can copy in one read is PAGE_SIZE */
1076 if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
1077 aligned_length = ERR_BUFF_MAX_COPY_SIZE;
1078 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
1081 /* use bounce buffer for copy */
1082 tbuf = (void *)__get_free_page(GFP_TEMPORARY);
1086 /* perform aligned read from the mmio region */
1087 memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
1088 memcpy(buf, tbuf + (off & 0x7), count);
1090 free_page((unsigned long)tbuf);
1095 static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
1099 if ((rc = pci_map_slice_regs(afu, adapter, dev)))
1102 if ((rc = sanitise_afu_regs(afu)))
1105 /* We need to reset the AFU before we can read the AFU descriptor */
1106 if ((rc = cxl_ops->afu_reset(afu)))
1110 dump_afu_descriptor(afu);
1112 if ((rc = cxl_read_afu_descriptor(afu)))
1115 if ((rc = cxl_afu_descriptor_looks_ok(afu)))
1118 if (adapter->native->sl_ops->afu_regs_init)
1119 if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
1122 if (adapter->native->sl_ops->register_serr_irq)
1123 if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
1126 if ((rc = cxl_native_register_psl_irq(afu)))
1132 if (adapter->native->sl_ops->release_serr_irq)
1133 adapter->native->sl_ops->release_serr_irq(afu);
1135 pci_unmap_slice_regs(afu);
1139 static void pci_deconfigure_afu(struct cxl_afu *afu)
1141 cxl_native_release_psl_irq(afu);
1142 if (afu->adapter->native->sl_ops->release_serr_irq)
1143 afu->adapter->native->sl_ops->release_serr_irq(afu);
1144 pci_unmap_slice_regs(afu);
1147 static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
1149 struct cxl_afu *afu;
1152 afu = cxl_alloc_afu(adapter, slice);
1156 afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
1160 mutex_init(&afu->native->spa_mutex);
1162 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
1164 goto err_free_native;
1166 rc = pci_configure_afu(afu, adapter, dev);
1168 goto err_free_native;
1170 /* Don't care if this fails */
1171 cxl_debugfs_afu_add(afu);
1174 * After we call this function we must not free the afu directly, even
1175 * if it returns an error!
1177 if ((rc = cxl_register_afu(afu)))
1180 if ((rc = cxl_sysfs_afu_add(afu)))
1183 adapter->afu[afu->slice] = afu;
1185 if ((rc = cxl_pci_vphb_add(afu)))
1186 dev_info(&afu->dev, "Can't register vPHB\n");
1191 pci_deconfigure_afu(afu);
1192 cxl_debugfs_afu_remove(afu);
1193 device_unregister(&afu->dev);
1204 static void cxl_pci_remove_afu(struct cxl_afu *afu)
1206 pr_devel("%s\n", __func__);
1211 cxl_pci_vphb_remove(afu);
1212 cxl_sysfs_afu_remove(afu);
1213 cxl_debugfs_afu_remove(afu);
1215 spin_lock(&afu->adapter->afu_list_lock);
1216 afu->adapter->afu[afu->slice] = NULL;
1217 spin_unlock(&afu->adapter->afu_list_lock);
1219 cxl_context_detach_all(afu);
1220 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1222 pci_deconfigure_afu(afu);
1223 device_unregister(&afu->dev);
1226 int cxl_pci_reset(struct cxl *adapter)
1228 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1231 if (adapter->perst_same_image) {
1233 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
1237 dev_info(&dev->dev, "CXL reset\n");
1239 /* pcie_warm_reset requests a fundamental pci reset which includes a
1240 * PERST assert/deassert. PERST triggers a loading of the image
1241 * if "user" or "factory" is selected in sysfs */
1242 if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
1243 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
1250 static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
1252 if (pci_request_region(dev, 2, "priv 2 regs"))
1254 if (pci_request_region(dev, 0, "priv 1 regs"))
1257 pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
1258 p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
1260 if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
1263 if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
1269 iounmap(adapter->native->p1_mmio);
1270 adapter->native->p1_mmio = NULL;
1272 pci_release_region(dev, 0);
1274 pci_release_region(dev, 2);
1279 static void cxl_unmap_adapter_regs(struct cxl *adapter)
1281 if (adapter->native->p1_mmio) {
1282 iounmap(adapter->native->p1_mmio);
1283 adapter->native->p1_mmio = NULL;
1284 pci_release_region(to_pci_dev(adapter->dev.parent), 2);
1286 if (adapter->native->p2_mmio) {
1287 iounmap(adapter->native->p2_mmio);
1288 adapter->native->p2_mmio = NULL;
1289 pci_release_region(to_pci_dev(adapter->dev.parent), 0);
1293 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
1296 u32 afu_desc_off, afu_desc_size;
1297 u32 ps_off, ps_size;
1301 if (!(vsec = find_cxl_vsec(dev))) {
1302 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
1306 CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
1307 if (vseclen < CXL_VSEC_MIN_SIZE) {
1308 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
1312 CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
1313 CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
1314 CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
1315 CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
1316 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
1317 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
1318 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1319 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1321 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
1322 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
1323 CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
1324 CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
1325 CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
1327 /* Convert everything to bytes, because there is NO WAY I'd look at the
1328 * code a month later and forget what units these are in ;-) */
1329 adapter->native->ps_off = ps_off * 64 * 1024;
1330 adapter->ps_size = ps_size * 64 * 1024;
1331 adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
1332 adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
1334 /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
1335 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
1341 * Workaround a PCIe Host Bridge defect on some cards, that can cause
1342 * malformed Transaction Layer Packet (TLP) errors to be erroneously
1343 * reported. Mask this error in the Uncorrectable Error Mask Register.
1345 * The upper nibble of the PSL revision is used to distinguish between
1346 * different cards. The affected ones have it set to 0.
1348 static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
1353 if (adapter->psl_rev & 0xf000)
1355 if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
1357 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
1358 if (data & PCI_ERR_UNC_MALF_TLP)
1359 if (data & PCI_ERR_UNC_INTN)
1361 data |= PCI_ERR_UNC_MALF_TLP;
1362 data |= PCI_ERR_UNC_INTN;
1363 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
1366 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
1368 if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
1371 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
1372 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
1376 if (!adapter->slices) {
1377 /* Once we support dynamic reprogramming we can use the card if
1378 * it supports loadable AFUs */
1379 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
1383 if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
1384 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
1388 if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
1389 dev_err(&dev->dev, "ABORTING: Problem state size larger than "
1390 "available in BAR2: 0x%llx > 0x%llx\n",
1391 adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
1398 ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1400 return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
1403 static void cxl_release_adapter(struct device *dev)
1405 struct cxl *adapter = to_cxl_adapter(dev);
1407 pr_devel("cxl_release_adapter\n");
1409 cxl_remove_adapter_nr(adapter);
1411 kfree(adapter->native);
1415 #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1417 static int sanitise_adapter_regs(struct cxl *adapter)
1419 /* Clear PSL tberror bit by writing 1 to it */
1420 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
1421 return cxl_tlb_slb_invalidate(adapter);
1424 /* This should contain *only* operations that can safely be done in
1425 * both creation and recovery.
1427 static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
1431 adapter->dev.parent = &dev->dev;
1432 adapter->dev.release = cxl_release_adapter;
1433 pci_set_drvdata(dev, adapter);
1435 rc = pci_enable_device(dev);
1437 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
1441 if ((rc = cxl_read_vsec(adapter, dev)))
1444 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
1447 cxl_fixup_malformed_tlp(adapter, dev);
1449 if ((rc = setup_cxl_bars(dev)))
1452 if ((rc = setup_cxl_protocol_area(dev)))
1455 if ((rc = cxl_update_image_control(adapter)))
1458 if ((rc = cxl_map_adapter_regs(adapter, dev)))
1461 if ((rc = sanitise_adapter_regs(adapter)))
1464 if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
1467 /* Required for devices using CAPP DMA mode, harmless for others */
1468 pci_set_master(dev);
1470 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
1473 /* If recovery happened, the last step is to turn on snooping.
1474 * In the non-recovery case this has no effect */
1475 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
1478 /* Ignore error, adapter init is not dependant on timebase sync */
1479 cxl_setup_psl_timebase(adapter, dev);
1481 if ((rc = cxl_native_register_psl_err_irq(adapter)))
1487 cxl_unmap_adapter_regs(adapter);
1492 static void cxl_deconfigure_adapter(struct cxl *adapter)
1494 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
1496 cxl_native_release_psl_err_irq(adapter);
1497 cxl_unmap_adapter_regs(adapter);
1499 pci_disable_device(pdev);
1502 static const struct cxl_service_layer_ops psl_ops = {
1503 .adapter_regs_init = init_implementation_adapter_psl_regs,
1504 .afu_regs_init = init_implementation_afu_psl_regs,
1505 .register_serr_irq = cxl_native_register_serr_irq,
1506 .release_serr_irq = cxl_native_release_serr_irq,
1507 .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs,
1508 .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs,
1509 .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs,
1510 .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
1511 .debugfs_stop_trace = cxl_stop_trace,
1512 .write_timebase_ctrl = write_timebase_ctrl_psl,
1513 .timebase_read = timebase_read_psl,
1514 .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
1515 .needs_reset_before_disable = true,
1518 static const struct cxl_service_layer_ops xsl_ops = {
1519 .adapter_regs_init = init_implementation_adapter_xsl_regs,
1520 .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs,
1521 .write_timebase_ctrl = write_timebase_ctrl_xsl,
1522 .timebase_read = timebase_read_xsl,
1523 .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
1526 static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
1528 if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
1530 dev_info(&adapter->dev, "Device uses an XSL\n");
1531 adapter->native->sl_ops = &xsl_ops;
1532 adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
1534 dev_info(&adapter->dev, "Device uses a PSL\n");
1535 adapter->native->sl_ops = &psl_ops;
1540 static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
1542 struct cxl *adapter;
1545 adapter = cxl_alloc_adapter();
1547 return ERR_PTR(-ENOMEM);
1549 adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
1550 if (!adapter->native) {
1555 set_sl_ops(adapter, dev);
1557 /* Set defaults for parameters which need to persist over
1558 * configure/reconfigure
1560 adapter->perst_loads_image = true;
1561 adapter->perst_same_image = false;
1563 rc = cxl_configure_adapter(adapter, dev);
1565 pci_disable_device(dev);
1569 /* Don't care if this one fails: */
1570 cxl_debugfs_adapter_add(adapter);
1573 * After we call this function we must not free the adapter directly,
1574 * even if it returns an error!
1576 if ((rc = cxl_register_adapter(adapter)))
1579 if ((rc = cxl_sysfs_adapter_add(adapter)))
1585 /* This should mirror cxl_remove_adapter, except without the
1588 cxl_debugfs_adapter_remove(adapter);
1589 cxl_deconfigure_adapter(adapter);
1590 device_unregister(&adapter->dev);
1594 cxl_release_adapter(&adapter->dev);
1598 static void cxl_pci_remove_adapter(struct cxl *adapter)
1600 pr_devel("cxl_remove_adapter\n");
1602 cxl_sysfs_adapter_remove(adapter);
1603 cxl_debugfs_adapter_remove(adapter);
1605 cxl_deconfigure_adapter(adapter);
1607 device_unregister(&adapter->dev);
1610 #define CXL_MAX_PCIEX_PARENT 2
1612 static int cxl_slot_is_switched(struct pci_dev *dev)
1614 struct device_node *np;
1618 if (!(np = pci_device_to_OF_node(dev))) {
1619 pr_err("cxl: np = NULL\n");
1624 np = of_get_next_parent(np);
1625 prop = of_get_property(np, "device_type", NULL);
1626 if (!prop || strcmp((char *)prop, "pciex"))
1631 return (depth > CXL_MAX_PCIEX_PARENT);
1634 bool cxl_slot_is_supported(struct pci_dev *dev, int flags)
1636 if (!cpu_has_feature(CPU_FTR_HVMODE))
1639 if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) {
1641 * CAPP DMA mode is technically supported on regular P8, but
1642 * will EEH if the card attempts to access memory < 4GB, which
1643 * we cannot realistically avoid. We might be able to work
1644 * around the issue, but until then return unsupported:
1649 if (cxl_slot_is_switched(dev))
1653 * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since
1654 * the CAPP can be connected to PHB 0, 1 or 2 on a first come first
1655 * served basis, which is racy to check from here. If we need to
1656 * support this in future we might need to consider having this
1657 * function effectively reserve it ahead of time.
1659 * Currently, the only user of this API is the Mellanox CX4, which is
1660 * only supported on P8NVL due to the above mentioned limitation of
1661 * CAPP DMA mode and therefore does not need to worry about this. If the
1662 * issue with CAPP DMA mode is later worked around on P8 we might need
1668 EXPORT_SYMBOL_GPL(cxl_slot_is_supported);
1671 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1673 struct cxl *adapter;
1677 if (cxl_pci_is_vphb_device(dev)) {
1678 dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
1682 if (cxl_slot_is_switched(dev)) {
1683 dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
1688 dump_cxl_config_space(dev);
1690 adapter = cxl_pci_init_adapter(dev);
1691 if (IS_ERR(adapter)) {
1692 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
1693 return PTR_ERR(adapter);
1696 for (slice = 0; slice < adapter->slices; slice++) {
1697 if ((rc = pci_init_afu(adapter, slice, dev))) {
1698 dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
1702 rc = cxl_afu_select_best_mode(adapter->afu[slice]);
1704 dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
1707 if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1)
1708 pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]);
1713 static void cxl_remove(struct pci_dev *dev)
1715 struct cxl *adapter = pci_get_drvdata(dev);
1716 struct cxl_afu *afu;
1720 * Lock to prevent someone grabbing a ref through the adapter list as
1721 * we are removing it
1723 for (i = 0; i < adapter->slices; i++) {
1724 afu = adapter->afu[i];
1725 cxl_pci_remove_afu(afu);
1727 cxl_pci_remove_adapter(adapter);
1730 static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
1731 pci_channel_state_t state)
1733 struct pci_dev *afu_dev;
1734 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1735 pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
1737 /* There should only be one entry, but go through the list
1740 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1741 if (!afu_dev->driver)
1744 afu_dev->error_state = state;
1746 if (afu_dev->driver->err_handler)
1747 afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
1749 /* Disconnect trumps all, NONE trumps NEED_RESET */
1750 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1751 result = PCI_ERS_RESULT_DISCONNECT;
1752 else if ((afu_result == PCI_ERS_RESULT_NONE) &&
1753 (result == PCI_ERS_RESULT_NEED_RESET))
1754 result = PCI_ERS_RESULT_NONE;
1759 static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1760 pci_channel_state_t state)
1762 struct cxl *adapter = pci_get_drvdata(pdev);
1763 struct cxl_afu *afu;
1764 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1767 /* At this point, we could still have an interrupt pending.
1768 * Let's try to get them out of the way before they do
1769 * anything we don't like.
1773 /* If we're permanently dead, give up. */
1774 if (state == pci_channel_io_perm_failure) {
1775 /* Tell the AFU drivers; but we don't care what they
1776 * say, we're going away.
1778 for (i = 0; i < adapter->slices; i++) {
1779 afu = adapter->afu[i];
1780 /* Only participate in EEH if we are on a virtual PHB */
1781 if (afu->phb == NULL)
1782 return PCI_ERS_RESULT_NONE;
1783 cxl_vphb_error_detected(afu, state);
1785 return PCI_ERS_RESULT_DISCONNECT;
1788 /* Are we reflashing?
1790 * If we reflash, we could come back as something entirely
1791 * different, including a non-CAPI card. As such, by default
1792 * we don't participate in the process. We'll be unbound and
1793 * the slot re-probed. (TODO: check EEH doesn't blindly rebind
1796 * However, this isn't the entire story: for reliablity
1797 * reasons, we usually want to reflash the FPGA on PERST in
1798 * order to get back to a more reliable known-good state.
1800 * This causes us a bit of a problem: if we reflash we can't
1801 * trust that we'll come back the same - we could have a new
1802 * image and been PERSTed in order to load that
1803 * image. However, most of the time we actually *will* come
1804 * back the same - for example a regular EEH event.
1806 * Therefore, we allow the user to assert that the image is
1807 * indeed the same and that we should continue on into EEH
1810 if (adapter->perst_loads_image && !adapter->perst_same_image) {
1811 /* TODO take the PHB out of CXL mode */
1812 dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
1813 return PCI_ERS_RESULT_NONE;
1817 * At this point, we want to try to recover. We'll always
1818 * need a complete slot reset: we don't trust any other reset.
1820 * Now, we go through each AFU:
1821 * - We send the driver, if bound, an error_detected callback.
1822 * We expect it to clean up, but it can also tell us to give
1823 * up and permanently detach the card. To simplify things, if
1824 * any bound AFU driver doesn't support EEH, we give up on EEH.
1826 * - We detach all contexts associated with the AFU. This
1827 * does not free them, but puts them into a CLOSED state
1828 * which causes any the associated files to return useful
1829 * errors to userland. It also unmaps, but does not free,
1832 * - We clean up our side: releasing and unmapping resources we hold
1833 * so we can wire them up again when the hardware comes back up.
1835 * Driver authors should note:
1837 * - Any contexts you create in your kernel driver (except
1838 * those associated with anonymous file descriptors) are
1839 * your responsibility to free and recreate. Likewise with
1840 * any attached resources.
1842 * - We will take responsibility for re-initialising the
1843 * device context (the one set up for you in
1844 * cxl_pci_enable_device_hook and accessed through
1845 * cxl_get_context). If you've attached IRQs or other
1846 * resources to it, they remains yours to free.
1848 * You can call the same functions to release resources as you
1849 * normally would: we make sure that these functions continue
1850 * to work when the hardware is down.
1854 * 1) If you normally free all your resources at the end of
1855 * each request, or if you use anonymous FDs, your
1856 * error_detected callback can simply set a flag to tell
1857 * your driver not to start any new calls. You can then
1858 * clear the flag in the resume callback.
1860 * 2) If you normally allocate your resources on startup:
1861 * * Set a flag in error_detected as above.
1862 * * Let CXL detach your contexts.
1863 * * In slot_reset, free the old resources and allocate new ones.
1864 * * In resume, clear the flag to allow things to start.
1866 for (i = 0; i < adapter->slices; i++) {
1867 afu = adapter->afu[i];
1869 result = cxl_vphb_error_detected(afu, state);
1871 /* Only continue if everyone agrees on NEED_RESET */
1872 if (result != PCI_ERS_RESULT_NEED_RESET)
1875 cxl_context_detach_all(afu);
1876 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1877 pci_deconfigure_afu(afu);
1879 cxl_deconfigure_adapter(adapter);
1884 static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
1886 struct cxl *adapter = pci_get_drvdata(pdev);
1887 struct cxl_afu *afu;
1888 struct cxl_context *ctx;
1889 struct pci_dev *afu_dev;
1890 pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
1891 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1894 if (cxl_configure_adapter(adapter, pdev))
1897 for (i = 0; i < adapter->slices; i++) {
1898 afu = adapter->afu[i];
1900 if (pci_configure_afu(afu, adapter, pdev))
1903 if (cxl_afu_select_best_mode(afu))
1906 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1907 /* Reset the device context.
1908 * TODO: make this less disruptive
1910 ctx = cxl_get_context(afu_dev);
1912 if (ctx && cxl_release_context(ctx))
1915 ctx = cxl_dev_context_init(afu_dev);
1919 afu_dev->dev.archdata.cxl_ctx = ctx;
1921 if (cxl_ops->afu_check_and_enable(afu))
1924 afu_dev->error_state = pci_channel_io_normal;
1926 /* If there's a driver attached, allow it to
1927 * chime in on recovery. Drivers should check
1928 * if everything has come back OK, but
1929 * shouldn't start new work until we call
1930 * their resume function.
1932 if (!afu_dev->driver)
1935 if (afu_dev->driver->err_handler &&
1936 afu_dev->driver->err_handler->slot_reset)
1937 afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
1939 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1940 result = PCI_ERS_RESULT_DISCONNECT;
1946 /* All the bits that happen in both error_detected and cxl_remove
1947 * should be idempotent, so we don't need to worry about leaving a mix
1948 * of unconfigured and reconfigured resources.
1950 dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
1951 return PCI_ERS_RESULT_DISCONNECT;
1954 static void cxl_pci_resume(struct pci_dev *pdev)
1956 struct cxl *adapter = pci_get_drvdata(pdev);
1957 struct cxl_afu *afu;
1958 struct pci_dev *afu_dev;
1961 /* Everything is back now. Drivers should restart work now.
1962 * This is not the place to be checking if everything came back up
1963 * properly, because there's no return value: do that in slot_reset.
1965 for (i = 0; i < adapter->slices; i++) {
1966 afu = adapter->afu[i];
1968 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1969 if (afu_dev->driver && afu_dev->driver->err_handler &&
1970 afu_dev->driver->err_handler->resume)
1971 afu_dev->driver->err_handler->resume(afu_dev);
1976 static const struct pci_error_handlers cxl_err_handler = {
1977 .error_detected = cxl_pci_error_detected,
1978 .slot_reset = cxl_pci_slot_reset,
1979 .resume = cxl_pci_resume,
1982 struct pci_driver cxl_pci_driver = {
1984 .id_table = cxl_pci_tbl,
1986 .remove = cxl_remove,
1987 .shutdown = cxl_remove,
1988 .err_handler = &cxl_err_handler,