2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
67 #define NUM_IB_PORTS 1
70 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74 module_param(num_vls, uint, S_IRUGO);
75 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78 * Default time to aggregate two 10K packets from the idle state
79 * (timer not running). The timer starts at the end of the first packet,
80 * so only the time for one 10K packet and header plus a bit extra is needed.
81 * 10 * 1024 + 64 header byte = 10304 byte
82 * 10304 byte / 12.5 GB/s = 824.32ns
84 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85 module_param(rcv_intr_timeout, uint, S_IRUGO);
86 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88 uint rcv_intr_count = 16; /* same as qib */
89 module_param(rcv_intr_count, uint, S_IRUGO);
90 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92 ushort link_crc_mask = SUPPORTED_CRCS;
93 module_param(link_crc_mask, ushort, S_IRUGO);
94 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97 module_param_named(loopback, loopback, uint, S_IRUGO);
98 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100 /* Other driver tunables */
101 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102 static ushort crc_14b_sideband = 1;
103 static uint use_flr = 1;
104 uint quick_linkup; /* skip LNI */
107 u64 flag; /* the flag */
108 char *str; /* description string */
109 u16 extra; /* extra information */
114 /* str must be a string constant */
115 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118 /* Send Error Consequences */
119 #define SEC_WRITE_DROPPED 0x1
120 #define SEC_PACKET_DROPPED 0x2
121 #define SEC_SC_HALTED 0x4 /* per-context only */
122 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
124 #define MIN_KERNEL_KCTXTS 2
125 #define FIRST_KERNEL_KCTXT 1
126 #define NUM_MAP_REGS 32
128 /* Bit offset into the GUID which carries HFI id information */
129 #define GUID_HFI_INDEX_SHIFT 39
131 /* extract the emulation revision */
132 #define emulator_rev(dd) ((dd)->irev >> 8)
133 /* parallel and serial emulation versions are 3 and 4 respectively */
134 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
135 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
140 #define IB_PACKET_TYPE 2ull
141 #define QW_SHIFT 6ull
143 #define QPN_WIDTH 7ull
145 /* LRH.BTH: QW 0, OFFSET 48 - for match */
146 #define LRH_BTH_QW 0ull
147 #define LRH_BTH_BIT_OFFSET 48ull
148 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
149 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
150 #define LRH_BTH_SELECT
151 #define LRH_BTH_MASK 3ull
152 #define LRH_BTH_VALUE 2ull
154 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
155 #define LRH_SC_QW 0ull
156 #define LRH_SC_BIT_OFFSET 56ull
157 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
158 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
159 #define LRH_SC_MASK 128ull
160 #define LRH_SC_VALUE 0ull
162 /* SC[n..0] QW 0, OFFSET 60 - for select */
163 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
165 /* QPN[m+n:1] QW 1, OFFSET 1 */
166 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
168 /* defines to build power on SC2VL table */
180 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
181 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
182 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
183 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
184 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
185 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
186 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
187 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190 #define DC_SC_VL_VAL( \
209 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
210 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
211 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
212 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
213 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
214 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
215 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
216 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
217 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
218 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
219 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
220 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
221 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
222 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
223 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
224 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227 /* all CceStatus sub-block freeze bits */
228 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
229 | CCE_STATUS_RXE_FROZE_SMASK \
230 | CCE_STATUS_TXE_FROZE_SMASK \
231 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
232 /* all CceStatus sub-block TXE pause bits */
233 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
234 | CCE_STATUS_TXE_PAUSED_SMASK \
235 | CCE_STATUS_SDMA_PAUSED_SMASK)
236 /* all CceStatus sub-block RXE pause bits */
237 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
242 static struct flag_table cce_err_status_flags[] = {
243 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
244 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
245 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
246 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
247 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
249 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
250 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
251 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
252 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
253 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
254 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
255 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
256 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
257 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
258 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
259 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
260 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
261 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
262 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
263 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
265 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
267 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
269 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
270 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
271 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
273 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
274 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
275 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
277 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
279 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
281 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
282 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
283 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
285 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
286 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
287 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
289 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
290 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
291 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
293 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
294 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
295 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
297 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
298 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
299 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
301 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
302 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
303 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
304 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
305 /*31*/ FLAG_ENTRY0("LATriggered",
306 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
307 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
308 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
309 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
311 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
312 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
313 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
314 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
315 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
316 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
317 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
319 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
320 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
321 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
323 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
324 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
331 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
332 static struct flag_table misc_err_status_flags[] = {
333 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
334 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
335 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
336 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
337 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
338 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
339 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
340 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
341 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
342 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
343 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
344 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
345 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
349 * TXE PIO Error flags and consequences
351 static struct flag_table pio_err_status_flags[] = {
352 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
354 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
355 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
357 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
358 /* 2*/ FLAG_ENTRY("PioCsrParity",
360 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
361 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
363 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
364 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
366 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
367 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
369 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
370 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
372 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
373 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
375 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
376 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
378 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
379 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
381 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
382 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
384 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
385 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
387 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
388 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
390 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
391 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
394 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
397 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
399 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
400 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
402 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
403 /*17*/ FLAG_ENTRY("PioInitSmIn",
405 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
406 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
408 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
409 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
411 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
412 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
414 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
415 /*21*/ FLAG_ENTRY("PioWriteDataParity",
417 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
418 /*22*/ FLAG_ENTRY("PioStateMachine",
420 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
421 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
422 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
424 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
425 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
426 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
427 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
429 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
430 /*26*/ FLAG_ENTRY("PioVlfSopParity",
432 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
433 /*27*/ FLAG_ENTRY("PioVlFifoParity",
435 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
436 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
438 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
439 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
441 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
443 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
445 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
446 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
448 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
449 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
451 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
452 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
454 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
458 /* TXE PIO errors that cause an SPC freeze */
459 #define ALL_PIO_FREEZE_ERR \
460 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
461 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
462 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491 * TXE SDMA Error flags
493 static struct flag_table sdma_err_status_flags[] = {
494 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
495 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
496 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
497 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
498 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
499 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
500 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
505 /* TXE SDMA errors that cause an SPC freeze */
506 #define ALL_SDMA_FREEZE_ERR \
507 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
508 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
509 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
511 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
512 #define PORT_DISCARD_EGRESS_ERRS \
513 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
514 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
515 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
518 * TXE Egress Error flags
520 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
521 static struct flag_table egress_err_status_flags[] = {
522 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
523 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
525 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
526 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
527 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
528 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
530 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
531 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
532 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
533 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
535 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
536 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
537 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
538 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
539 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
540 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
541 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
542 SEES(TX_SDMA0_DISALLOWED_PACKET)),
543 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
544 SEES(TX_SDMA1_DISALLOWED_PACKET)),
545 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
546 SEES(TX_SDMA2_DISALLOWED_PACKET)),
547 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
548 SEES(TX_SDMA3_DISALLOWED_PACKET)),
549 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
550 SEES(TX_SDMA4_DISALLOWED_PACKET)),
551 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
552 SEES(TX_SDMA5_DISALLOWED_PACKET)),
553 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
554 SEES(TX_SDMA6_DISALLOWED_PACKET)),
555 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
556 SEES(TX_SDMA7_DISALLOWED_PACKET)),
557 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
558 SEES(TX_SDMA8_DISALLOWED_PACKET)),
559 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
560 SEES(TX_SDMA9_DISALLOWED_PACKET)),
561 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
562 SEES(TX_SDMA10_DISALLOWED_PACKET)),
563 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
564 SEES(TX_SDMA11_DISALLOWED_PACKET)),
565 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
566 SEES(TX_SDMA12_DISALLOWED_PACKET)),
567 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
568 SEES(TX_SDMA13_DISALLOWED_PACKET)),
569 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
570 SEES(TX_SDMA14_DISALLOWED_PACKET)),
571 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
572 SEES(TX_SDMA15_DISALLOWED_PACKET)),
573 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
574 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
575 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
577 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
579 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
581 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
583 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
585 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
587 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
589 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
591 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
592 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
593 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
594 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
595 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
596 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
597 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
598 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
599 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
600 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
601 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
602 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
603 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
604 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
605 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
606 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
607 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
608 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
609 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
610 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
611 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
612 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
613 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
614 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
615 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
619 * TXE Egress Error Info flags
621 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
622 static struct flag_table egress_err_info_flags[] = {
623 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
624 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
625 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
626 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
627 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
628 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
629 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
630 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
631 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
632 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
633 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
634 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
635 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
636 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
637 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
638 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
639 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
640 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
641 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
642 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
643 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
644 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
647 /* TXE Egress errors that cause an SPC freeze */
648 #define ALL_TXE_EGRESS_FREEZE_ERR \
649 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
650 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
651 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
652 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
653 | SEES(TX_LAUNCH_CSR_PARITY) \
654 | SEES(TX_SBRD_CTL_CSR_PARITY) \
655 | SEES(TX_CONFIG_PARITY) \
656 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
657 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
658 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
665 | SEES(TX_CREDIT_RETURN_PARITY))
668 * TXE Send error flags
670 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
671 static struct flag_table send_err_status_flags[] = {
672 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
673 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
674 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
678 * TXE Send Context Error flags and consequences
680 static struct flag_table sc_err_status_flags[] = {
681 /* 0*/ FLAG_ENTRY("InconsistentSop",
682 SEC_PACKET_DROPPED | SEC_SC_HALTED,
683 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
684 /* 1*/ FLAG_ENTRY("DisallowedPacket",
685 SEC_PACKET_DROPPED | SEC_SC_HALTED,
686 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
687 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
688 SEC_WRITE_DROPPED | SEC_SC_HALTED,
689 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
690 /* 3*/ FLAG_ENTRY("WriteOverflow",
691 SEC_WRITE_DROPPED | SEC_SC_HALTED,
692 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
693 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
694 SEC_WRITE_DROPPED | SEC_SC_HALTED,
695 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
700 * RXE Receive Error flags
702 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
703 static struct flag_table rxe_err_status_flags[] = {
704 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
705 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
706 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
707 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
708 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
709 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
710 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
711 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
712 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
713 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
714 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
715 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
716 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
717 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
718 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
719 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
720 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
721 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
722 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
723 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
724 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
725 RXES(RBUF_BLOCK_LIST_READ_UNC)),
726 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
727 RXES(RBUF_BLOCK_LIST_READ_COR)),
728 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
729 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
730 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
731 RXES(RBUF_CSR_QENT_CNT_PARITY)),
732 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
733 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
734 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
735 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
736 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
737 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
738 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
739 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
740 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
741 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
742 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
743 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
744 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
745 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
746 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
747 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
748 RXES(RBUF_FL_INITDONE_PARITY)),
749 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
750 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
751 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
752 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
753 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
754 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
755 RXES(LOOKUP_DES_PART1_UNC_COR)),
756 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
757 RXES(LOOKUP_DES_PART2_PARITY)),
758 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
759 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
760 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
761 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
762 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
763 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
764 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
765 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
766 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
767 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
768 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
769 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
770 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
771 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
772 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
773 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
774 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
775 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
776 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
777 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
778 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
779 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
782 /* RXE errors that will trigger an SPC freeze */
783 #define ALL_RXE_FREEZE_ERR \
784 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
785 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
786 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
829 #define RXE_FREEZE_ABORT_MASK \
830 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
831 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
832 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
837 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
838 static struct flag_table dcc_err_flags[] = {
839 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
840 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
841 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
842 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
843 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
844 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
845 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
846 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
847 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
848 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
849 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
850 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
851 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
852 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
853 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
854 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
855 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
856 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
857 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
858 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
859 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
860 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
861 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
862 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
863 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
864 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
865 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
866 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
867 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
868 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
869 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
870 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
871 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
872 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
873 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
874 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
875 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
876 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
877 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
878 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
879 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
880 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
881 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
882 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
883 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
884 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
890 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
891 static struct flag_table lcb_err_flags[] = {
892 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
893 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
894 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
895 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
896 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
897 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
898 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
899 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
900 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
901 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
902 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
903 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
904 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
905 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
906 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
907 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
908 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
909 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
910 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
911 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
912 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
913 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
914 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
915 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
916 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
917 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
918 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
919 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
920 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
921 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
922 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
923 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
924 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
925 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
926 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
927 LCBE(REDUNDANT_FLIT_PARITY_ERR))
933 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
934 static struct flag_table dc8051_err_flags[] = {
935 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
936 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
937 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
938 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
939 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
940 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
941 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
942 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
943 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
944 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
945 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
949 * DC8051 Information Error flags
951 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
953 static struct flag_table dc8051_info_err_flags[] = {
954 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
955 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
956 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
957 FLAG_ENTRY0("Serdes internal loopback failure",
958 FAILED_SERDES_INTERNAL_LOOPBACK),
959 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
960 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
961 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
962 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
963 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
964 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
965 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
966 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
970 * DC8051 Information Host Information flags
972 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
974 static struct flag_table dc8051_info_host_msg_flags[] = {
975 FLAG_ENTRY0("Host request done", 0x0001),
976 FLAG_ENTRY0("BC SMA message", 0x0002),
977 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
978 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
979 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
980 FLAG_ENTRY0("External device config request", 0x0020),
981 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
982 FLAG_ENTRY0("LinkUp achieved", 0x0080),
983 FLAG_ENTRY0("Link going down", 0x0100),
986 static u32 encoded_size(u32 size);
987 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
988 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
989 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
991 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
992 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
993 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
994 u8 *remote_tx_rate, u16 *link_widths);
995 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
996 u8 *flag_bits, u16 *link_widths);
997 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
999 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1000 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1001 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1002 u8 *tx_polarity_inversion,
1003 u8 *rx_polarity_inversion, u8 *max_rate);
1004 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1005 unsigned int context, u64 err_status);
1006 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1007 static void handle_dcc_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009 static void handle_lcb_err(struct hfi1_devdata *dd,
1010 unsigned int context, u64 err_status);
1011 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019 static void set_partition_keys(struct hfi1_pportdata *);
1020 static const char *link_state_name(u32 state);
1021 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1023 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1025 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1026 static int thermal_init(struct hfi1_devdata *dd);
1028 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1030 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1031 static void handle_temp_err(struct hfi1_devdata *);
1032 static void dc_shutdown(struct hfi1_devdata *);
1033 static void dc_start(struct hfi1_devdata *);
1036 * Error interrupt table entry. This is used as input to the interrupt
1037 * "clear down" routine used for all second tier error interrupt register.
1038 * Second tier interrupt registers have a single bit representing them
1039 * in the top-level CceIntStatus.
1041 struct err_reg_info {
1042 u32 status; /* status CSR offset */
1043 u32 clear; /* clear CSR offset */
1044 u32 mask; /* mask CSR offset */
1045 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1049 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1050 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1051 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1054 * Helpers for building HFI and DC error interrupt table entries. Different
1055 * helpers are needed because of inconsistent register names.
1057 #define EE(reg, handler, desc) \
1058 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1060 #define DC_EE1(reg, handler, desc) \
1061 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1062 #define DC_EE2(reg, handler, desc) \
1063 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1066 * Table of the "misc" grouping of error interrupts. Each entry refers to
1067 * another register containing more information.
1069 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1070 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1071 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1072 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1073 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1074 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1075 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1076 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1077 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1078 /* the rest are reserved */
1082 * Index into the Various section of the interrupt sources
1083 * corresponding to the Critical Temperature interrupt.
1085 #define TCRIT_INT_SOURCE 4
1088 * SDMA error interrupt entry - refers to another register containing more
1091 static const struct err_reg_info sdma_eng_err =
1092 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1094 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1095 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1096 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1097 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1098 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1099 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1100 /* rest are reserved */
1104 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1105 * register can not be derived from the MTU value because 10K is not
1106 * a power of 2. Therefore, we need a constant. Everything else can
1109 #define DCC_CFG_PORT_MTU_CAP_10240 7
1112 * Table of the DC grouping of error interrupts. Each entry refers to
1113 * another register containing more information.
1115 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1116 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1117 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1118 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1119 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1120 /* the rest are reserved */
1130 * csr to read for name (if applicable)
1135 * offset into dd or ppd to store the counter's value
1145 * accessor for stat element, context either dd or ppd
1147 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1148 int mode, u64 data);
1151 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1152 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1154 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1164 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1166 (counter * 8 + RCV_COUNTER_ARRAY32), \
1167 0, flags | CNTR_32BIT, \
1168 port_access_u32_csr)
1170 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1172 (counter * 8 + RCV_COUNTER_ARRAY32), \
1173 0, flags | CNTR_32BIT, \
1177 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1179 (counter * 8 + RCV_COUNTER_ARRAY64), \
1181 port_access_u64_csr)
1183 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1185 (counter * 8 + RCV_COUNTER_ARRAY64), \
1189 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1190 #define OVR_ELM(ctx) \
1191 CNTR_ELEM("RcvHdrOvr" #ctx, \
1192 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1193 0, CNTR_NORMAL, port_access_u64_csr)
1196 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1198 (counter * 8 + SEND_COUNTER_ARRAY32), \
1199 0, flags | CNTR_32BIT, \
1200 port_access_u32_csr)
1203 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1205 (counter * 8 + SEND_COUNTER_ARRAY64), \
1207 port_access_u64_csr)
1209 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1211 counter * 8 + SEND_COUNTER_ARRAY64, \
1217 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1219 (counter * 8 + CCE_COUNTER_ARRAY32), \
1220 0, flags | CNTR_32BIT, \
1223 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1225 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1230 #define DC_PERF_CNTR(name, counter, flags) \
1237 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1245 #define SW_IBP_CNTR(name, cntr) \
1252 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1256 if (dd->flags & HFI1_PRESENT) {
1257 val = readq((void __iomem *)dd->kregbase + offset);
1263 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1265 if (dd->flags & HFI1_PRESENT)
1266 writeq(value, (void __iomem *)dd->kregbase + offset);
1269 void __iomem *get_csr_addr(
1270 struct hfi1_devdata *dd,
1273 return (void __iomem *)dd->kregbase + offset;
1276 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1277 int mode, u64 value)
1281 if (mode == CNTR_MODE_R) {
1282 ret = read_csr(dd, csr);
1283 } else if (mode == CNTR_MODE_W) {
1284 write_csr(dd, csr, value);
1287 dd_dev_err(dd, "Invalid cntr register access mode");
1291 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1296 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1297 void *context, int vl, int mode, u64 data)
1299 struct hfi1_devdata *dd = context;
1300 u64 csr = entry->csr;
1302 if (entry->flags & CNTR_SDMA) {
1303 if (vl == CNTR_INVALID_VL)
1307 if (vl != CNTR_INVALID_VL)
1310 return read_write_csr(dd, csr, mode, data);
1313 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1314 void *context, int idx, int mode, u64 data)
1316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1318 if (dd->per_sdma && idx < dd->num_sdma)
1319 return dd->per_sdma[idx].err_cnt;
1323 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1324 void *context, int idx, int mode, u64 data)
1326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1328 if (dd->per_sdma && idx < dd->num_sdma)
1329 return dd->per_sdma[idx].sdma_int_cnt;
1333 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1334 void *context, int idx, int mode, u64 data)
1336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1338 if (dd->per_sdma && idx < dd->num_sdma)
1339 return dd->per_sdma[idx].idle_int_cnt;
1343 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1344 void *context, int idx, int mode,
1347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1349 if (dd->per_sdma && idx < dd->num_sdma)
1350 return dd->per_sdma[idx].progress_int_cnt;
1354 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1355 int vl, int mode, u64 data)
1357 struct hfi1_devdata *dd = context;
1360 u64 csr = entry->csr;
1362 if (entry->flags & CNTR_VL) {
1363 if (vl == CNTR_INVALID_VL)
1367 if (vl != CNTR_INVALID_VL)
1371 val = read_write_csr(dd, csr, mode, data);
1375 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1376 int vl, int mode, u64 data)
1378 struct hfi1_devdata *dd = context;
1379 u32 csr = entry->csr;
1382 if (vl != CNTR_INVALID_VL)
1384 if (mode == CNTR_MODE_R)
1385 ret = read_lcb_csr(dd, csr, &data);
1386 else if (mode == CNTR_MODE_W)
1387 ret = write_lcb_csr(dd, csr, data);
1390 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1394 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1399 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1400 int vl, int mode, u64 data)
1402 struct hfi1_pportdata *ppd = context;
1404 if (vl != CNTR_INVALID_VL)
1406 return read_write_csr(ppd->dd, entry->csr, mode, data);
1409 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1410 void *context, int vl, int mode, u64 data)
1412 struct hfi1_pportdata *ppd = context;
1414 u64 csr = entry->csr;
1416 if (entry->flags & CNTR_VL) {
1417 if (vl == CNTR_INVALID_VL)
1421 if (vl != CNTR_INVALID_VL)
1424 val = read_write_csr(ppd->dd, csr, mode, data);
1428 /* Software defined */
1429 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1434 if (mode == CNTR_MODE_R) {
1436 } else if (mode == CNTR_MODE_W) {
1440 dd_dev_err(dd, "Invalid cntr sw access mode");
1444 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1449 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1450 int vl, int mode, u64 data)
1452 struct hfi1_pportdata *ppd = context;
1454 if (vl != CNTR_INVALID_VL)
1456 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1459 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1460 int vl, int mode, u64 data)
1462 struct hfi1_pportdata *ppd = context;
1464 if (vl != CNTR_INVALID_VL)
1466 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1469 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1470 void *context, int vl, int mode,
1473 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1475 if (vl != CNTR_INVALID_VL)
1477 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1480 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1481 void *context, int vl, int mode, u64 data)
1483 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1487 if (vl == CNTR_INVALID_VL)
1488 counter = &ppd->port_xmit_discards;
1489 else if (vl >= 0 && vl < C_VL_COUNT)
1490 counter = &ppd->port_xmit_discards_vl[vl];
1494 return read_write_sw(ppd->dd, counter, mode, data);
1497 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1498 void *context, int vl, int mode,
1501 struct hfi1_pportdata *ppd = context;
1503 if (vl != CNTR_INVALID_VL)
1506 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1510 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1511 void *context, int vl, int mode, u64 data)
1513 struct hfi1_pportdata *ppd = context;
1515 if (vl != CNTR_INVALID_VL)
1518 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1522 u64 get_all_cpu_total(u64 __percpu *cntr)
1527 for_each_possible_cpu(cpu)
1528 counter += *per_cpu_ptr(cntr, cpu);
1532 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1534 int vl, int mode, u64 data)
1538 if (vl != CNTR_INVALID_VL)
1541 if (mode == CNTR_MODE_R) {
1542 ret = get_all_cpu_total(cntr) - *z_val;
1543 } else if (mode == CNTR_MODE_W) {
1544 /* A write can only zero the counter */
1546 *z_val = get_all_cpu_total(cntr);
1548 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1550 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1557 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1558 void *context, int vl, int mode, u64 data)
1560 struct hfi1_devdata *dd = context;
1562 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1566 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1567 void *context, int vl, int mode, u64 data)
1569 struct hfi1_devdata *dd = context;
1571 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1575 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1576 void *context, int vl, int mode, u64 data)
1578 struct hfi1_devdata *dd = context;
1580 return dd->verbs_dev.n_piowait;
1583 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1584 void *context, int vl, int mode, u64 data)
1586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1588 return dd->verbs_dev.n_piodrain;
1591 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1592 void *context, int vl, int mode, u64 data)
1594 struct hfi1_devdata *dd = context;
1596 return dd->verbs_dev.n_txwait;
1599 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1600 void *context, int vl, int mode, u64 data)
1602 struct hfi1_devdata *dd = context;
1604 return dd->verbs_dev.n_kmem_wait;
1607 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1608 void *context, int vl, int mode, u64 data)
1610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1612 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1616 /* Software counters for the error status bits within MISC_ERR_STATUS */
1617 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1618 void *context, int vl, int mode,
1621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1623 return dd->misc_err_status_cnt[12];
1626 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1627 void *context, int vl, int mode,
1630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1632 return dd->misc_err_status_cnt[11];
1635 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1636 void *context, int vl, int mode,
1639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1641 return dd->misc_err_status_cnt[10];
1644 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1645 void *context, int vl,
1648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1650 return dd->misc_err_status_cnt[9];
1653 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1654 void *context, int vl, int mode,
1657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1659 return dd->misc_err_status_cnt[8];
1662 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1663 const struct cntr_entry *entry,
1664 void *context, int vl, int mode, u64 data)
1666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1668 return dd->misc_err_status_cnt[7];
1671 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1672 void *context, int vl,
1675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1677 return dd->misc_err_status_cnt[6];
1680 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1681 void *context, int vl, int mode,
1684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1686 return dd->misc_err_status_cnt[5];
1689 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1690 void *context, int vl, int mode,
1693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1695 return dd->misc_err_status_cnt[4];
1698 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1699 void *context, int vl,
1702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1704 return dd->misc_err_status_cnt[3];
1707 static u64 access_misc_csr_write_bad_addr_err_cnt(
1708 const struct cntr_entry *entry,
1709 void *context, int vl, int mode, u64 data)
1711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1713 return dd->misc_err_status_cnt[2];
1716 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1717 void *context, int vl,
1720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1722 return dd->misc_err_status_cnt[1];
1725 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1726 void *context, int vl, int mode,
1729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1731 return dd->misc_err_status_cnt[0];
1735 * Software counter for the aggregate of
1736 * individual CceErrStatus counters
1738 static u64 access_sw_cce_err_status_aggregated_cnt(
1739 const struct cntr_entry *entry,
1740 void *context, int vl, int mode, u64 data)
1742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1744 return dd->sw_cce_err_status_aggregate;
1748 * Software counters corresponding to each of the
1749 * error status bits within CceErrStatus
1751 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1752 void *context, int vl, int mode,
1755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1757 return dd->cce_err_status_cnt[40];
1760 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1761 void *context, int vl, int mode,
1764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1766 return dd->cce_err_status_cnt[39];
1769 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1770 void *context, int vl, int mode,
1773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1775 return dd->cce_err_status_cnt[38];
1778 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1779 void *context, int vl, int mode,
1782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1784 return dd->cce_err_status_cnt[37];
1787 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1788 void *context, int vl, int mode,
1791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1793 return dd->cce_err_status_cnt[36];
1796 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1797 const struct cntr_entry *entry,
1798 void *context, int vl, int mode, u64 data)
1800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1802 return dd->cce_err_status_cnt[35];
1805 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1806 const struct cntr_entry *entry,
1807 void *context, int vl, int mode, u64 data)
1809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1811 return dd->cce_err_status_cnt[34];
1814 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1815 void *context, int vl,
1818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1820 return dd->cce_err_status_cnt[33];
1823 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1824 void *context, int vl, int mode,
1827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1829 return dd->cce_err_status_cnt[32];
1832 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1833 void *context, int vl, int mode, u64 data)
1835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1837 return dd->cce_err_status_cnt[31];
1840 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1841 void *context, int vl, int mode,
1844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1846 return dd->cce_err_status_cnt[30];
1849 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1850 void *context, int vl, int mode,
1853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1855 return dd->cce_err_status_cnt[29];
1858 static u64 access_pcic_transmit_back_parity_err_cnt(
1859 const struct cntr_entry *entry,
1860 void *context, int vl, int mode, u64 data)
1862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1864 return dd->cce_err_status_cnt[28];
1867 static u64 access_pcic_transmit_front_parity_err_cnt(
1868 const struct cntr_entry *entry,
1869 void *context, int vl, int mode, u64 data)
1871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1873 return dd->cce_err_status_cnt[27];
1876 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1877 void *context, int vl, int mode,
1880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1882 return dd->cce_err_status_cnt[26];
1885 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1886 void *context, int vl, int mode,
1889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1891 return dd->cce_err_status_cnt[25];
1894 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1895 void *context, int vl, int mode,
1898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1900 return dd->cce_err_status_cnt[24];
1903 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1904 void *context, int vl, int mode,
1907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1909 return dd->cce_err_status_cnt[23];
1912 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1913 void *context, int vl,
1916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1918 return dd->cce_err_status_cnt[22];
1921 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1922 void *context, int vl, int mode,
1925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1927 return dd->cce_err_status_cnt[21];
1930 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1931 const struct cntr_entry *entry,
1932 void *context, int vl, int mode, u64 data)
1934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1936 return dd->cce_err_status_cnt[20];
1939 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1940 void *context, int vl,
1943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1945 return dd->cce_err_status_cnt[19];
1948 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1949 void *context, int vl, int mode,
1952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1954 return dd->cce_err_status_cnt[18];
1957 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1958 void *context, int vl, int mode,
1961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1963 return dd->cce_err_status_cnt[17];
1966 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1967 void *context, int vl, int mode,
1970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1972 return dd->cce_err_status_cnt[16];
1975 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1976 void *context, int vl, int mode,
1979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1981 return dd->cce_err_status_cnt[15];
1984 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1985 void *context, int vl,
1988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1990 return dd->cce_err_status_cnt[14];
1993 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1994 void *context, int vl, int mode,
1997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1999 return dd->cce_err_status_cnt[13];
2002 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2003 const struct cntr_entry *entry,
2004 void *context, int vl, int mode, u64 data)
2006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2008 return dd->cce_err_status_cnt[12];
2011 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2012 const struct cntr_entry *entry,
2013 void *context, int vl, int mode, u64 data)
2015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2017 return dd->cce_err_status_cnt[11];
2020 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2021 const struct cntr_entry *entry,
2022 void *context, int vl, int mode, u64 data)
2024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2026 return dd->cce_err_status_cnt[10];
2029 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2030 const struct cntr_entry *entry,
2031 void *context, int vl, int mode, u64 data)
2033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2035 return dd->cce_err_status_cnt[9];
2038 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2039 const struct cntr_entry *entry,
2040 void *context, int vl, int mode, u64 data)
2042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2044 return dd->cce_err_status_cnt[8];
2047 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2048 void *context, int vl,
2051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2053 return dd->cce_err_status_cnt[7];
2056 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2057 const struct cntr_entry *entry,
2058 void *context, int vl, int mode, u64 data)
2060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2062 return dd->cce_err_status_cnt[6];
2065 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2066 void *context, int vl, int mode,
2069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2071 return dd->cce_err_status_cnt[5];
2074 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2075 void *context, int vl, int mode,
2078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2080 return dd->cce_err_status_cnt[4];
2083 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2084 const struct cntr_entry *entry,
2085 void *context, int vl, int mode, u64 data)
2087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2089 return dd->cce_err_status_cnt[3];
2092 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2093 void *context, int vl,
2096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2098 return dd->cce_err_status_cnt[2];
2101 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2102 void *context, int vl,
2105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2107 return dd->cce_err_status_cnt[1];
2110 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2111 void *context, int vl, int mode,
2114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2116 return dd->cce_err_status_cnt[0];
2120 * Software counters corresponding to each of the
2121 * error status bits within RcvErrStatus
2123 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2124 void *context, int vl, int mode,
2127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2129 return dd->rcv_err_status_cnt[63];
2132 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2133 void *context, int vl,
2136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2138 return dd->rcv_err_status_cnt[62];
2141 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2142 void *context, int vl, int mode,
2145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2147 return dd->rcv_err_status_cnt[61];
2150 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2151 void *context, int vl, int mode,
2154 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2156 return dd->rcv_err_status_cnt[60];
2159 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2160 void *context, int vl,
2163 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2165 return dd->rcv_err_status_cnt[59];
2168 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2169 void *context, int vl,
2172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2174 return dd->rcv_err_status_cnt[58];
2177 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2178 void *context, int vl, int mode,
2181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2183 return dd->rcv_err_status_cnt[57];
2186 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2187 void *context, int vl, int mode,
2190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2192 return dd->rcv_err_status_cnt[56];
2195 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2196 void *context, int vl, int mode,
2199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2201 return dd->rcv_err_status_cnt[55];
2204 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2205 const struct cntr_entry *entry,
2206 void *context, int vl, int mode, u64 data)
2208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2210 return dd->rcv_err_status_cnt[54];
2213 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2214 const struct cntr_entry *entry,
2215 void *context, int vl, int mode, u64 data)
2217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2219 return dd->rcv_err_status_cnt[53];
2222 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2223 void *context, int vl,
2226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2228 return dd->rcv_err_status_cnt[52];
2231 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2232 void *context, int vl,
2235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2237 return dd->rcv_err_status_cnt[51];
2240 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2241 void *context, int vl,
2244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2246 return dd->rcv_err_status_cnt[50];
2249 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2250 void *context, int vl,
2253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2255 return dd->rcv_err_status_cnt[49];
2258 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2259 void *context, int vl,
2262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2264 return dd->rcv_err_status_cnt[48];
2267 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2268 void *context, int vl,
2271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2273 return dd->rcv_err_status_cnt[47];
2276 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2277 void *context, int vl, int mode,
2280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2282 return dd->rcv_err_status_cnt[46];
2285 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2286 const struct cntr_entry *entry,
2287 void *context, int vl, int mode, u64 data)
2289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2291 return dd->rcv_err_status_cnt[45];
2294 static u64 access_rx_lookup_csr_parity_err_cnt(
2295 const struct cntr_entry *entry,
2296 void *context, int vl, int mode, u64 data)
2298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2300 return dd->rcv_err_status_cnt[44];
2303 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2304 const struct cntr_entry *entry,
2305 void *context, int vl, int mode, u64 data)
2307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2309 return dd->rcv_err_status_cnt[43];
2312 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2313 const struct cntr_entry *entry,
2314 void *context, int vl, int mode, u64 data)
2316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2318 return dd->rcv_err_status_cnt[42];
2321 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2322 const struct cntr_entry *entry,
2323 void *context, int vl, int mode, u64 data)
2325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2327 return dd->rcv_err_status_cnt[41];
2330 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2331 const struct cntr_entry *entry,
2332 void *context, int vl, int mode, u64 data)
2334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2336 return dd->rcv_err_status_cnt[40];
2339 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2340 const struct cntr_entry *entry,
2341 void *context, int vl, int mode, u64 data)
2343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2345 return dd->rcv_err_status_cnt[39];
2348 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2349 const struct cntr_entry *entry,
2350 void *context, int vl, int mode, u64 data)
2352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2354 return dd->rcv_err_status_cnt[38];
2357 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2358 const struct cntr_entry *entry,
2359 void *context, int vl, int mode, u64 data)
2361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2363 return dd->rcv_err_status_cnt[37];
2366 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2367 const struct cntr_entry *entry,
2368 void *context, int vl, int mode, u64 data)
2370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2372 return dd->rcv_err_status_cnt[36];
2375 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2376 const struct cntr_entry *entry,
2377 void *context, int vl, int mode, u64 data)
2379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2381 return dd->rcv_err_status_cnt[35];
2384 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2385 const struct cntr_entry *entry,
2386 void *context, int vl, int mode, u64 data)
2388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2390 return dd->rcv_err_status_cnt[34];
2393 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2394 const struct cntr_entry *entry,
2395 void *context, int vl, int mode, u64 data)
2397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2399 return dd->rcv_err_status_cnt[33];
2402 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2403 void *context, int vl, int mode,
2406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2408 return dd->rcv_err_status_cnt[32];
2411 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2412 void *context, int vl, int mode,
2415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2417 return dd->rcv_err_status_cnt[31];
2420 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2421 void *context, int vl, int mode,
2424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2426 return dd->rcv_err_status_cnt[30];
2429 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2430 void *context, int vl, int mode,
2433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2435 return dd->rcv_err_status_cnt[29];
2438 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2439 void *context, int vl,
2442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2444 return dd->rcv_err_status_cnt[28];
2447 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2448 const struct cntr_entry *entry,
2449 void *context, int vl, int mode, u64 data)
2451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2453 return dd->rcv_err_status_cnt[27];
2456 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2457 const struct cntr_entry *entry,
2458 void *context, int vl, int mode, u64 data)
2460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2462 return dd->rcv_err_status_cnt[26];
2465 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2466 const struct cntr_entry *entry,
2467 void *context, int vl, int mode, u64 data)
2469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2471 return dd->rcv_err_status_cnt[25];
2474 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2475 const struct cntr_entry *entry,
2476 void *context, int vl, int mode, u64 data)
2478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2480 return dd->rcv_err_status_cnt[24];
2483 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2484 const struct cntr_entry *entry,
2485 void *context, int vl, int mode, u64 data)
2487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2489 return dd->rcv_err_status_cnt[23];
2492 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2493 const struct cntr_entry *entry,
2494 void *context, int vl, int mode, u64 data)
2496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2498 return dd->rcv_err_status_cnt[22];
2501 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2502 const struct cntr_entry *entry,
2503 void *context, int vl, int mode, u64 data)
2505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2507 return dd->rcv_err_status_cnt[21];
2510 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2511 const struct cntr_entry *entry,
2512 void *context, int vl, int mode, u64 data)
2514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2516 return dd->rcv_err_status_cnt[20];
2519 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2520 const struct cntr_entry *entry,
2521 void *context, int vl, int mode, u64 data)
2523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2525 return dd->rcv_err_status_cnt[19];
2528 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2529 void *context, int vl,
2532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2534 return dd->rcv_err_status_cnt[18];
2537 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2538 void *context, int vl,
2541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2543 return dd->rcv_err_status_cnt[17];
2546 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2547 const struct cntr_entry *entry,
2548 void *context, int vl, int mode, u64 data)
2550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2552 return dd->rcv_err_status_cnt[16];
2555 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2556 const struct cntr_entry *entry,
2557 void *context, int vl, int mode, u64 data)
2559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2561 return dd->rcv_err_status_cnt[15];
2564 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2565 void *context, int vl,
2568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2570 return dd->rcv_err_status_cnt[14];
2573 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2574 void *context, int vl,
2577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2579 return dd->rcv_err_status_cnt[13];
2582 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2583 void *context, int vl, int mode,
2586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2588 return dd->rcv_err_status_cnt[12];
2591 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2592 void *context, int vl, int mode,
2595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2597 return dd->rcv_err_status_cnt[11];
2600 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2601 void *context, int vl, int mode,
2604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2606 return dd->rcv_err_status_cnt[10];
2609 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2610 void *context, int vl, int mode,
2613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2615 return dd->rcv_err_status_cnt[9];
2618 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2619 void *context, int vl, int mode,
2622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2624 return dd->rcv_err_status_cnt[8];
2627 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2628 const struct cntr_entry *entry,
2629 void *context, int vl, int mode, u64 data)
2631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2633 return dd->rcv_err_status_cnt[7];
2636 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2637 const struct cntr_entry *entry,
2638 void *context, int vl, int mode, u64 data)
2640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2642 return dd->rcv_err_status_cnt[6];
2645 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2646 void *context, int vl, int mode,
2649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2651 return dd->rcv_err_status_cnt[5];
2654 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2655 void *context, int vl, int mode,
2658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2660 return dd->rcv_err_status_cnt[4];
2663 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2664 void *context, int vl, int mode,
2667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2669 return dd->rcv_err_status_cnt[3];
2672 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2673 void *context, int vl, int mode,
2676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2678 return dd->rcv_err_status_cnt[2];
2681 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2682 void *context, int vl, int mode,
2685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2687 return dd->rcv_err_status_cnt[1];
2690 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2691 void *context, int vl, int mode,
2694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2696 return dd->rcv_err_status_cnt[0];
2700 * Software counters corresponding to each of the
2701 * error status bits within SendPioErrStatus
2703 static u64 access_pio_pec_sop_head_parity_err_cnt(
2704 const struct cntr_entry *entry,
2705 void *context, int vl, int mode, u64 data)
2707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2709 return dd->send_pio_err_status_cnt[35];
2712 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2713 const struct cntr_entry *entry,
2714 void *context, int vl, int mode, u64 data)
2716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2718 return dd->send_pio_err_status_cnt[34];
2721 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2722 const struct cntr_entry *entry,
2723 void *context, int vl, int mode, u64 data)
2725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2727 return dd->send_pio_err_status_cnt[33];
2730 static u64 access_pio_current_free_cnt_parity_err_cnt(
2731 const struct cntr_entry *entry,
2732 void *context, int vl, int mode, u64 data)
2734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2736 return dd->send_pio_err_status_cnt[32];
2739 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2740 void *context, int vl, int mode,
2743 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2745 return dd->send_pio_err_status_cnt[31];
2748 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2749 void *context, int vl, int mode,
2752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2754 return dd->send_pio_err_status_cnt[30];
2757 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2758 void *context, int vl, int mode,
2761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2763 return dd->send_pio_err_status_cnt[29];
2766 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2767 const struct cntr_entry *entry,
2768 void *context, int vl, int mode, u64 data)
2770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2772 return dd->send_pio_err_status_cnt[28];
2775 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2776 void *context, int vl, int mode,
2779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2781 return dd->send_pio_err_status_cnt[27];
2784 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2785 void *context, int vl, int mode,
2788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2790 return dd->send_pio_err_status_cnt[26];
2793 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2794 void *context, int vl,
2797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2799 return dd->send_pio_err_status_cnt[25];
2802 static u64 access_pio_block_qw_count_parity_err_cnt(
2803 const struct cntr_entry *entry,
2804 void *context, int vl, int mode, u64 data)
2806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2808 return dd->send_pio_err_status_cnt[24];
2811 static u64 access_pio_write_qw_valid_parity_err_cnt(
2812 const struct cntr_entry *entry,
2813 void *context, int vl, int mode, u64 data)
2815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2817 return dd->send_pio_err_status_cnt[23];
2820 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2821 void *context, int vl, int mode,
2824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2826 return dd->send_pio_err_status_cnt[22];
2829 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2830 void *context, int vl,
2833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2835 return dd->send_pio_err_status_cnt[21];
2838 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2839 void *context, int vl,
2842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2844 return dd->send_pio_err_status_cnt[20];
2847 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2848 void *context, int vl,
2851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2853 return dd->send_pio_err_status_cnt[19];
2856 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2857 const struct cntr_entry *entry,
2858 void *context, int vl, int mode, u64 data)
2860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2862 return dd->send_pio_err_status_cnt[18];
2865 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2866 void *context, int vl, int mode,
2869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2871 return dd->send_pio_err_status_cnt[17];
2874 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2875 void *context, int vl, int mode,
2878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2880 return dd->send_pio_err_status_cnt[16];
2883 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2884 const struct cntr_entry *entry,
2885 void *context, int vl, int mode, u64 data)
2887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2889 return dd->send_pio_err_status_cnt[15];
2892 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2893 const struct cntr_entry *entry,
2894 void *context, int vl, int mode, u64 data)
2896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2898 return dd->send_pio_err_status_cnt[14];
2901 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2902 const struct cntr_entry *entry,
2903 void *context, int vl, int mode, u64 data)
2905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2907 return dd->send_pio_err_status_cnt[13];
2910 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2911 const struct cntr_entry *entry,
2912 void *context, int vl, int mode, u64 data)
2914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2916 return dd->send_pio_err_status_cnt[12];
2919 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2920 const struct cntr_entry *entry,
2921 void *context, int vl, int mode, u64 data)
2923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2925 return dd->send_pio_err_status_cnt[11];
2928 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2929 const struct cntr_entry *entry,
2930 void *context, int vl, int mode, u64 data)
2932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2934 return dd->send_pio_err_status_cnt[10];
2937 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2938 const struct cntr_entry *entry,
2939 void *context, int vl, int mode, u64 data)
2941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2943 return dd->send_pio_err_status_cnt[9];
2946 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2947 const struct cntr_entry *entry,
2948 void *context, int vl, int mode, u64 data)
2950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2952 return dd->send_pio_err_status_cnt[8];
2955 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2956 const struct cntr_entry *entry,
2957 void *context, int vl, int mode, u64 data)
2959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2961 return dd->send_pio_err_status_cnt[7];
2964 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2965 void *context, int vl, int mode,
2968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2970 return dd->send_pio_err_status_cnt[6];
2973 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2974 void *context, int vl, int mode,
2977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2979 return dd->send_pio_err_status_cnt[5];
2982 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2983 void *context, int vl, int mode,
2986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2988 return dd->send_pio_err_status_cnt[4];
2991 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2992 void *context, int vl, int mode,
2995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2997 return dd->send_pio_err_status_cnt[3];
3000 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3001 void *context, int vl, int mode,
3004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3006 return dd->send_pio_err_status_cnt[2];
3009 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3010 void *context, int vl,
3013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3015 return dd->send_pio_err_status_cnt[1];
3018 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3019 void *context, int vl, int mode,
3022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3024 return dd->send_pio_err_status_cnt[0];
3028 * Software counters corresponding to each of the
3029 * error status bits within SendDmaErrStatus
3031 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3032 const struct cntr_entry *entry,
3033 void *context, int vl, int mode, u64 data)
3035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3037 return dd->send_dma_err_status_cnt[3];
3040 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3041 const struct cntr_entry *entry,
3042 void *context, int vl, int mode, u64 data)
3044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3046 return dd->send_dma_err_status_cnt[2];
3049 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3050 void *context, int vl, int mode,
3053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3055 return dd->send_dma_err_status_cnt[1];
3058 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3059 void *context, int vl, int mode,
3062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3064 return dd->send_dma_err_status_cnt[0];
3068 * Software counters corresponding to each of the
3069 * error status bits within SendEgressErrStatus
3071 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3072 const struct cntr_entry *entry,
3073 void *context, int vl, int mode, u64 data)
3075 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3077 return dd->send_egress_err_status_cnt[63];
3080 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3081 const struct cntr_entry *entry,
3082 void *context, int vl, int mode, u64 data)
3084 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3086 return dd->send_egress_err_status_cnt[62];
3089 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3090 void *context, int vl, int mode,
3093 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3095 return dd->send_egress_err_status_cnt[61];
3098 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3099 void *context, int vl,
3102 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3104 return dd->send_egress_err_status_cnt[60];
3107 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3108 const struct cntr_entry *entry,
3109 void *context, int vl, int mode, u64 data)
3111 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3113 return dd->send_egress_err_status_cnt[59];
3116 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3117 void *context, int vl, int mode,
3120 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3122 return dd->send_egress_err_status_cnt[58];
3125 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3126 void *context, int vl, int mode,
3129 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3131 return dd->send_egress_err_status_cnt[57];
3134 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3135 void *context, int vl, int mode,
3138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3140 return dd->send_egress_err_status_cnt[56];
3143 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3144 void *context, int vl, int mode,
3147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3149 return dd->send_egress_err_status_cnt[55];
3152 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3153 void *context, int vl, int mode,
3156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3158 return dd->send_egress_err_status_cnt[54];
3161 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3162 void *context, int vl, int mode,
3165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3167 return dd->send_egress_err_status_cnt[53];
3170 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3171 void *context, int vl, int mode,
3174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3176 return dd->send_egress_err_status_cnt[52];
3179 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3180 void *context, int vl, int mode,
3183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3185 return dd->send_egress_err_status_cnt[51];
3188 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3189 void *context, int vl, int mode,
3192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3194 return dd->send_egress_err_status_cnt[50];
3197 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3198 void *context, int vl, int mode,
3201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3203 return dd->send_egress_err_status_cnt[49];
3206 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3207 void *context, int vl, int mode,
3210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3212 return dd->send_egress_err_status_cnt[48];
3215 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3216 void *context, int vl, int mode,
3219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3221 return dd->send_egress_err_status_cnt[47];
3224 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3225 void *context, int vl, int mode,
3228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3230 return dd->send_egress_err_status_cnt[46];
3233 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3234 void *context, int vl, int mode,
3237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3239 return dd->send_egress_err_status_cnt[45];
3242 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3243 void *context, int vl,
3246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3248 return dd->send_egress_err_status_cnt[44];
3251 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3252 const struct cntr_entry *entry,
3253 void *context, int vl, int mode, u64 data)
3255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3257 return dd->send_egress_err_status_cnt[43];
3260 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3261 void *context, int vl, int mode,
3264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3266 return dd->send_egress_err_status_cnt[42];
3269 static u64 access_tx_credit_return_partiy_err_cnt(
3270 const struct cntr_entry *entry,
3271 void *context, int vl, int mode, u64 data)
3273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3275 return dd->send_egress_err_status_cnt[41];
3278 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3279 const struct cntr_entry *entry,
3280 void *context, int vl, int mode, u64 data)
3282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3284 return dd->send_egress_err_status_cnt[40];
3287 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3288 const struct cntr_entry *entry,
3289 void *context, int vl, int mode, u64 data)
3291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3293 return dd->send_egress_err_status_cnt[39];
3296 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3297 const struct cntr_entry *entry,
3298 void *context, int vl, int mode, u64 data)
3300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3302 return dd->send_egress_err_status_cnt[38];
3305 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3306 const struct cntr_entry *entry,
3307 void *context, int vl, int mode, u64 data)
3309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3311 return dd->send_egress_err_status_cnt[37];
3314 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3315 const struct cntr_entry *entry,
3316 void *context, int vl, int mode, u64 data)
3318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3320 return dd->send_egress_err_status_cnt[36];
3323 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3324 const struct cntr_entry *entry,
3325 void *context, int vl, int mode, u64 data)
3327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3329 return dd->send_egress_err_status_cnt[35];
3332 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3333 const struct cntr_entry *entry,
3334 void *context, int vl, int mode, u64 data)
3336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3338 return dd->send_egress_err_status_cnt[34];
3341 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3342 const struct cntr_entry *entry,
3343 void *context, int vl, int mode, u64 data)
3345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3347 return dd->send_egress_err_status_cnt[33];
3350 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3351 const struct cntr_entry *entry,
3352 void *context, int vl, int mode, u64 data)
3354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3356 return dd->send_egress_err_status_cnt[32];
3359 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3360 const struct cntr_entry *entry,
3361 void *context, int vl, int mode, u64 data)
3363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3365 return dd->send_egress_err_status_cnt[31];
3368 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3369 const struct cntr_entry *entry,
3370 void *context, int vl, int mode, u64 data)
3372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3374 return dd->send_egress_err_status_cnt[30];
3377 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3378 const struct cntr_entry *entry,
3379 void *context, int vl, int mode, u64 data)
3381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3383 return dd->send_egress_err_status_cnt[29];
3386 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3387 const struct cntr_entry *entry,
3388 void *context, int vl, int mode, u64 data)
3390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3392 return dd->send_egress_err_status_cnt[28];
3395 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3396 const struct cntr_entry *entry,
3397 void *context, int vl, int mode, u64 data)
3399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3401 return dd->send_egress_err_status_cnt[27];
3404 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3405 const struct cntr_entry *entry,
3406 void *context, int vl, int mode, u64 data)
3408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3410 return dd->send_egress_err_status_cnt[26];
3413 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3414 const struct cntr_entry *entry,
3415 void *context, int vl, int mode, u64 data)
3417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3419 return dd->send_egress_err_status_cnt[25];
3422 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3423 const struct cntr_entry *entry,
3424 void *context, int vl, int mode, u64 data)
3426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3428 return dd->send_egress_err_status_cnt[24];
3431 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3432 const struct cntr_entry *entry,
3433 void *context, int vl, int mode, u64 data)
3435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3437 return dd->send_egress_err_status_cnt[23];
3440 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3441 const struct cntr_entry *entry,
3442 void *context, int vl, int mode, u64 data)
3444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3446 return dd->send_egress_err_status_cnt[22];
3449 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3450 const struct cntr_entry *entry,
3451 void *context, int vl, int mode, u64 data)
3453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3455 return dd->send_egress_err_status_cnt[21];
3458 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3459 const struct cntr_entry *entry,
3460 void *context, int vl, int mode, u64 data)
3462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3464 return dd->send_egress_err_status_cnt[20];
3467 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3468 const struct cntr_entry *entry,
3469 void *context, int vl, int mode, u64 data)
3471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3473 return dd->send_egress_err_status_cnt[19];
3476 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3477 const struct cntr_entry *entry,
3478 void *context, int vl, int mode, u64 data)
3480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3482 return dd->send_egress_err_status_cnt[18];
3485 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3486 const struct cntr_entry *entry,
3487 void *context, int vl, int mode, u64 data)
3489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3491 return dd->send_egress_err_status_cnt[17];
3494 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3495 const struct cntr_entry *entry,
3496 void *context, int vl, int mode, u64 data)
3498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3500 return dd->send_egress_err_status_cnt[16];
3503 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3504 void *context, int vl, int mode,
3507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3509 return dd->send_egress_err_status_cnt[15];
3512 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3513 void *context, int vl,
3516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3518 return dd->send_egress_err_status_cnt[14];
3521 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3522 void *context, int vl, int mode,
3525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3527 return dd->send_egress_err_status_cnt[13];
3530 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3531 void *context, int vl, int mode,
3534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3536 return dd->send_egress_err_status_cnt[12];
3539 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3540 const struct cntr_entry *entry,
3541 void *context, int vl, int mode, u64 data)
3543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3545 return dd->send_egress_err_status_cnt[11];
3548 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3549 void *context, int vl, int mode,
3552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3554 return dd->send_egress_err_status_cnt[10];
3557 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3558 void *context, int vl, int mode,
3561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3563 return dd->send_egress_err_status_cnt[9];
3566 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3567 const struct cntr_entry *entry,
3568 void *context, int vl, int mode, u64 data)
3570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3572 return dd->send_egress_err_status_cnt[8];
3575 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3576 const struct cntr_entry *entry,
3577 void *context, int vl, int mode, u64 data)
3579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3581 return dd->send_egress_err_status_cnt[7];
3584 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3585 void *context, int vl, int mode,
3588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3590 return dd->send_egress_err_status_cnt[6];
3593 static u64 access_tx_incorrect_link_state_err_cnt(
3594 const struct cntr_entry *entry,
3595 void *context, int vl, int mode, u64 data)
3597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3599 return dd->send_egress_err_status_cnt[5];
3602 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3603 void *context, int vl, int mode,
3606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3608 return dd->send_egress_err_status_cnt[4];
3611 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3612 const struct cntr_entry *entry,
3613 void *context, int vl, int mode, u64 data)
3615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3617 return dd->send_egress_err_status_cnt[3];
3620 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3621 void *context, int vl, int mode,
3624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3626 return dd->send_egress_err_status_cnt[2];
3629 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3630 const struct cntr_entry *entry,
3631 void *context, int vl, int mode, u64 data)
3633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3635 return dd->send_egress_err_status_cnt[1];
3638 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3639 const struct cntr_entry *entry,
3640 void *context, int vl, int mode, u64 data)
3642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3644 return dd->send_egress_err_status_cnt[0];
3648 * Software counters corresponding to each of the
3649 * error status bits within SendErrStatus
3651 static u64 access_send_csr_write_bad_addr_err_cnt(
3652 const struct cntr_entry *entry,
3653 void *context, int vl, int mode, u64 data)
3655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3657 return dd->send_err_status_cnt[2];
3660 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3661 void *context, int vl,
3664 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3666 return dd->send_err_status_cnt[1];
3669 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3670 void *context, int vl, int mode,
3673 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3675 return dd->send_err_status_cnt[0];
3679 * Software counters corresponding to each of the
3680 * error status bits within SendCtxtErrStatus
3682 static u64 access_pio_write_out_of_bounds_err_cnt(
3683 const struct cntr_entry *entry,
3684 void *context, int vl, int mode, u64 data)
3686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3688 return dd->sw_ctxt_err_status_cnt[4];
3691 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3692 void *context, int vl, int mode,
3695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3697 return dd->sw_ctxt_err_status_cnt[3];
3700 static u64 access_pio_write_crosses_boundary_err_cnt(
3701 const struct cntr_entry *entry,
3702 void *context, int vl, int mode, u64 data)
3704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3706 return dd->sw_ctxt_err_status_cnt[2];
3709 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3710 void *context, int vl,
3713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3715 return dd->sw_ctxt_err_status_cnt[1];
3718 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3719 void *context, int vl, int mode,
3722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3724 return dd->sw_ctxt_err_status_cnt[0];
3728 * Software counters corresponding to each of the
3729 * error status bits within SendDmaEngErrStatus
3731 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3732 const struct cntr_entry *entry,
3733 void *context, int vl, int mode, u64 data)
3735 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3737 return dd->sw_send_dma_eng_err_status_cnt[23];
3740 static u64 access_sdma_header_storage_cor_err_cnt(
3741 const struct cntr_entry *entry,
3742 void *context, int vl, int mode, u64 data)
3744 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3746 return dd->sw_send_dma_eng_err_status_cnt[22];
3749 static u64 access_sdma_packet_tracking_cor_err_cnt(
3750 const struct cntr_entry *entry,
3751 void *context, int vl, int mode, u64 data)
3753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3755 return dd->sw_send_dma_eng_err_status_cnt[21];
3758 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3759 void *context, int vl, int mode,
3762 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3764 return dd->sw_send_dma_eng_err_status_cnt[20];
3767 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3768 void *context, int vl, int mode,
3771 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3773 return dd->sw_send_dma_eng_err_status_cnt[19];
3776 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3777 const struct cntr_entry *entry,
3778 void *context, int vl, int mode, u64 data)
3780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3782 return dd->sw_send_dma_eng_err_status_cnt[18];
3785 static u64 access_sdma_header_storage_unc_err_cnt(
3786 const struct cntr_entry *entry,
3787 void *context, int vl, int mode, u64 data)
3789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3791 return dd->sw_send_dma_eng_err_status_cnt[17];
3794 static u64 access_sdma_packet_tracking_unc_err_cnt(
3795 const struct cntr_entry *entry,
3796 void *context, int vl, int mode, u64 data)
3798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3800 return dd->sw_send_dma_eng_err_status_cnt[16];
3803 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3804 void *context, int vl, int mode,
3807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3809 return dd->sw_send_dma_eng_err_status_cnt[15];
3812 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3813 void *context, int vl, int mode,
3816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3818 return dd->sw_send_dma_eng_err_status_cnt[14];
3821 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3822 void *context, int vl, int mode,
3825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3827 return dd->sw_send_dma_eng_err_status_cnt[13];
3830 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3831 void *context, int vl, int mode,
3834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3836 return dd->sw_send_dma_eng_err_status_cnt[12];
3839 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3840 void *context, int vl, int mode,
3843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3845 return dd->sw_send_dma_eng_err_status_cnt[11];
3848 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3849 void *context, int vl, int mode,
3852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3854 return dd->sw_send_dma_eng_err_status_cnt[10];
3857 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3858 void *context, int vl, int mode,
3861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3863 return dd->sw_send_dma_eng_err_status_cnt[9];
3866 static u64 access_sdma_packet_desc_overflow_err_cnt(
3867 const struct cntr_entry *entry,
3868 void *context, int vl, int mode, u64 data)
3870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3872 return dd->sw_send_dma_eng_err_status_cnt[8];
3875 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3876 void *context, int vl,
3879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3881 return dd->sw_send_dma_eng_err_status_cnt[7];
3884 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3885 void *context, int vl, int mode, u64 data)
3887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3889 return dd->sw_send_dma_eng_err_status_cnt[6];
3892 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3893 void *context, int vl, int mode,
3896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3898 return dd->sw_send_dma_eng_err_status_cnt[5];
3901 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3902 void *context, int vl, int mode,
3905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3907 return dd->sw_send_dma_eng_err_status_cnt[4];
3910 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3911 const struct cntr_entry *entry,
3912 void *context, int vl, int mode, u64 data)
3914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3916 return dd->sw_send_dma_eng_err_status_cnt[3];
3919 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3920 void *context, int vl, int mode,
3923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3925 return dd->sw_send_dma_eng_err_status_cnt[2];
3928 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3929 void *context, int vl, int mode,
3932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3934 return dd->sw_send_dma_eng_err_status_cnt[1];
3937 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3938 void *context, int vl, int mode,
3941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3943 return dd->sw_send_dma_eng_err_status_cnt[0];
3946 #define def_access_sw_cpu(cntr) \
3947 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3948 void *context, int vl, int mode, u64 data) \
3950 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3951 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3952 ppd->ibport_data.rvp.cntr, vl, \
3956 def_access_sw_cpu(rc_acks);
3957 def_access_sw_cpu(rc_qacks);
3958 def_access_sw_cpu(rc_delayed_comp);
3960 #define def_access_ibp_counter(cntr) \
3961 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3962 void *context, int vl, int mode, u64 data) \
3964 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3966 if (vl != CNTR_INVALID_VL) \
3969 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
3973 def_access_ibp_counter(loop_pkts);
3974 def_access_ibp_counter(rc_resends);
3975 def_access_ibp_counter(rnr_naks);
3976 def_access_ibp_counter(other_naks);
3977 def_access_ibp_counter(rc_timeouts);
3978 def_access_ibp_counter(pkt_drops);
3979 def_access_ibp_counter(dmawait);
3980 def_access_ibp_counter(rc_seqnak);
3981 def_access_ibp_counter(rc_dupreq);
3982 def_access_ibp_counter(rdma_seq);
3983 def_access_ibp_counter(unaligned);
3984 def_access_ibp_counter(seq_naks);
3986 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3987 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3988 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3990 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3992 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3993 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3995 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3997 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3998 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3999 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4000 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4001 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4003 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4005 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4007 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4009 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4011 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4013 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4014 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4015 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4016 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4017 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4019 [C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4020 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4022 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4024 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4026 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4027 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4028 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4029 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4031 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4032 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4033 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4035 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4037 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4039 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4041 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4043 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4045 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4047 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4048 CNTR_SYNTH | CNTR_VL),
4049 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4050 CNTR_SYNTH | CNTR_VL),
4051 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4052 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4053 CNTR_SYNTH | CNTR_VL),
4054 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4055 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4056 CNTR_SYNTH | CNTR_VL),
4057 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4059 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4060 CNTR_SYNTH | CNTR_VL),
4061 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4063 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4064 CNTR_SYNTH | CNTR_VL),
4066 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4068 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4070 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4072 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4074 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4076 [C_DC_CRC_MULT_LN] =
4077 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4079 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4081 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4083 [C_DC_SEQ_CRC_CNT] =
4084 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4086 [C_DC_ESC0_ONLY_CNT] =
4087 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4089 [C_DC_ESC0_PLUS1_CNT] =
4090 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4092 [C_DC_ESC0_PLUS2_CNT] =
4093 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4095 [C_DC_REINIT_FROM_PEER_CNT] =
4096 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4098 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4100 [C_DC_MISC_FLG_CNT] =
4101 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4103 [C_DC_PRF_GOOD_LTP_CNT] =
4104 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4105 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4106 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4108 [C_DC_PRF_RX_FLIT_CNT] =
4109 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4110 [C_DC_PRF_TX_FLIT_CNT] =
4111 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4112 [C_DC_PRF_CLK_CNTR] =
4113 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4114 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4115 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4116 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4117 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4119 [C_DC_PG_STS_TX_SBE_CNT] =
4120 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4121 [C_DC_PG_STS_TX_MBE_CNT] =
4122 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4124 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4125 access_sw_cpu_intr),
4126 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4127 access_sw_cpu_rcv_limit),
4128 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4129 access_sw_vtx_wait),
4130 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4131 access_sw_pio_wait),
4132 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4133 access_sw_pio_drain),
4134 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4135 access_sw_kmem_wait),
4136 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4137 access_sw_send_schedule),
4138 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4139 SEND_DMA_DESC_FETCHED_CNT, 0,
4140 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4141 dev_access_u32_csr),
4142 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4143 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4144 access_sde_int_cnt),
4145 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4146 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4147 access_sde_err_cnt),
4148 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4149 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4150 access_sde_idle_int_cnt),
4151 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4152 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4153 access_sde_progress_int_cnt),
4154 /* MISC_ERR_STATUS */
4155 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4157 access_misc_pll_lock_fail_err_cnt),
4158 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4160 access_misc_mbist_fail_err_cnt),
4161 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4163 access_misc_invalid_eep_cmd_err_cnt),
4164 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4166 access_misc_efuse_done_parity_err_cnt),
4167 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4169 access_misc_efuse_write_err_cnt),
4170 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4172 access_misc_efuse_read_bad_addr_err_cnt),
4173 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4175 access_misc_efuse_csr_parity_err_cnt),
4176 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4178 access_misc_fw_auth_failed_err_cnt),
4179 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4181 access_misc_key_mismatch_err_cnt),
4182 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4184 access_misc_sbus_write_failed_err_cnt),
4185 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4187 access_misc_csr_write_bad_addr_err_cnt),
4188 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4190 access_misc_csr_read_bad_addr_err_cnt),
4191 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4193 access_misc_csr_parity_err_cnt),
4195 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4197 access_sw_cce_err_status_aggregated_cnt),
4198 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4200 access_cce_msix_csr_parity_err_cnt),
4201 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4203 access_cce_int_map_unc_err_cnt),
4204 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4206 access_cce_int_map_cor_err_cnt),
4207 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4209 access_cce_msix_table_unc_err_cnt),
4210 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4212 access_cce_msix_table_cor_err_cnt),
4213 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4215 access_cce_rxdma_conv_fifo_parity_err_cnt),
4216 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4218 access_cce_rcpl_async_fifo_parity_err_cnt),
4219 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4221 access_cce_seg_write_bad_addr_err_cnt),
4222 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4224 access_cce_seg_read_bad_addr_err_cnt),
4225 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4227 access_la_triggered_cnt),
4228 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4230 access_cce_trgt_cpl_timeout_err_cnt),
4231 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4233 access_pcic_receive_parity_err_cnt),
4234 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4236 access_pcic_transmit_back_parity_err_cnt),
4237 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4239 access_pcic_transmit_front_parity_err_cnt),
4240 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4242 access_pcic_cpl_dat_q_unc_err_cnt),
4243 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4245 access_pcic_cpl_hd_q_unc_err_cnt),
4246 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4248 access_pcic_post_dat_q_unc_err_cnt),
4249 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4251 access_pcic_post_hd_q_unc_err_cnt),
4252 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4254 access_pcic_retry_sot_mem_unc_err_cnt),
4255 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4257 access_pcic_retry_mem_unc_err),
4258 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4260 access_pcic_n_post_dat_q_parity_err_cnt),
4261 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4263 access_pcic_n_post_h_q_parity_err_cnt),
4264 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4266 access_pcic_cpl_dat_q_cor_err_cnt),
4267 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4269 access_pcic_cpl_hd_q_cor_err_cnt),
4270 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4272 access_pcic_post_dat_q_cor_err_cnt),
4273 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4275 access_pcic_post_hd_q_cor_err_cnt),
4276 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4278 access_pcic_retry_sot_mem_cor_err_cnt),
4279 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4281 access_pcic_retry_mem_cor_err_cnt),
4282 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4283 "CceCli1AsyncFifoDbgParityError", 0, 0,
4285 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4286 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4287 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4289 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4291 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4292 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4294 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4295 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4296 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4298 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4299 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4301 access_cce_cli2_async_fifo_parity_err_cnt),
4302 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4304 access_cce_csr_cfg_bus_parity_err_cnt),
4305 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4307 access_cce_cli0_async_fifo_parity_err_cnt),
4308 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4310 access_cce_rspd_data_parity_err_cnt),
4311 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4313 access_cce_trgt_access_err_cnt),
4314 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4316 access_cce_trgt_async_fifo_parity_err_cnt),
4317 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4319 access_cce_csr_write_bad_addr_err_cnt),
4320 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4322 access_cce_csr_read_bad_addr_err_cnt),
4323 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4325 access_ccs_csr_parity_err_cnt),
4328 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4330 access_rx_csr_parity_err_cnt),
4331 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4333 access_rx_csr_write_bad_addr_err_cnt),
4334 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4336 access_rx_csr_read_bad_addr_err_cnt),
4337 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4339 access_rx_dma_csr_unc_err_cnt),
4340 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4342 access_rx_dma_dq_fsm_encoding_err_cnt),
4343 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4345 access_rx_dma_eq_fsm_encoding_err_cnt),
4346 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4348 access_rx_dma_csr_parity_err_cnt),
4349 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4351 access_rx_rbuf_data_cor_err_cnt),
4352 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4354 access_rx_rbuf_data_unc_err_cnt),
4355 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4357 access_rx_dma_data_fifo_rd_cor_err_cnt),
4358 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4360 access_rx_dma_data_fifo_rd_unc_err_cnt),
4361 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4363 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4364 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4366 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4367 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4369 access_rx_rbuf_desc_part2_cor_err_cnt),
4370 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4372 access_rx_rbuf_desc_part2_unc_err_cnt),
4373 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4375 access_rx_rbuf_desc_part1_cor_err_cnt),
4376 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4378 access_rx_rbuf_desc_part1_unc_err_cnt),
4379 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4381 access_rx_hq_intr_fsm_err_cnt),
4382 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4384 access_rx_hq_intr_csr_parity_err_cnt),
4385 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4387 access_rx_lookup_csr_parity_err_cnt),
4388 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4390 access_rx_lookup_rcv_array_cor_err_cnt),
4391 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4393 access_rx_lookup_rcv_array_unc_err_cnt),
4394 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4396 access_rx_lookup_des_part2_parity_err_cnt),
4397 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4399 access_rx_lookup_des_part1_unc_cor_err_cnt),
4400 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4402 access_rx_lookup_des_part1_unc_err_cnt),
4403 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4405 access_rx_rbuf_next_free_buf_cor_err_cnt),
4406 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4408 access_rx_rbuf_next_free_buf_unc_err_cnt),
4409 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4410 "RxRbufFlInitWrAddrParityErr", 0, 0,
4412 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4413 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4415 access_rx_rbuf_fl_initdone_parity_err_cnt),
4416 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4418 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4419 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4421 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4422 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4424 access_rx_rbuf_empty_err_cnt),
4425 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4427 access_rx_rbuf_full_err_cnt),
4428 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4430 access_rbuf_bad_lookup_err_cnt),
4431 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4433 access_rbuf_ctx_id_parity_err_cnt),
4434 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4436 access_rbuf_csr_qeopdw_parity_err_cnt),
4437 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4438 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4440 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4441 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4442 "RxRbufCsrQTlPtrParityErr", 0, 0,
4444 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4445 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4447 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4448 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4450 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4451 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4453 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4454 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4456 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4457 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4458 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4460 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4461 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4463 access_rx_rbuf_block_list_read_cor_err_cnt),
4464 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4466 access_rx_rbuf_block_list_read_unc_err_cnt),
4467 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4469 access_rx_rbuf_lookup_des_cor_err_cnt),
4470 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4472 access_rx_rbuf_lookup_des_unc_err_cnt),
4473 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4474 "RxRbufLookupDesRegUncCorErr", 0, 0,
4476 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4477 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4479 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4480 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4482 access_rx_rbuf_free_list_cor_err_cnt),
4483 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4485 access_rx_rbuf_free_list_unc_err_cnt),
4486 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4488 access_rx_rcv_fsm_encoding_err_cnt),
4489 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4491 access_rx_dma_flag_cor_err_cnt),
4492 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4494 access_rx_dma_flag_unc_err_cnt),
4495 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4497 access_rx_dc_sop_eop_parity_err_cnt),
4498 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4500 access_rx_rcv_csr_parity_err_cnt),
4501 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4503 access_rx_rcv_qp_map_table_cor_err_cnt),
4504 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4506 access_rx_rcv_qp_map_table_unc_err_cnt),
4507 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4509 access_rx_rcv_data_cor_err_cnt),
4510 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4512 access_rx_rcv_data_unc_err_cnt),
4513 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4515 access_rx_rcv_hdr_cor_err_cnt),
4516 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4518 access_rx_rcv_hdr_unc_err_cnt),
4519 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4521 access_rx_dc_intf_parity_err_cnt),
4522 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4524 access_rx_dma_csr_cor_err_cnt),
4525 /* SendPioErrStatus */
4526 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4528 access_pio_pec_sop_head_parity_err_cnt),
4529 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4531 access_pio_pcc_sop_head_parity_err_cnt),
4532 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4534 access_pio_last_returned_cnt_parity_err_cnt),
4535 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4537 access_pio_current_free_cnt_parity_err_cnt),
4538 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4540 access_pio_reserved_31_err_cnt),
4541 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4543 access_pio_reserved_30_err_cnt),
4544 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4546 access_pio_ppmc_sop_len_err_cnt),
4547 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4549 access_pio_ppmc_bqc_mem_parity_err_cnt),
4550 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4552 access_pio_vl_fifo_parity_err_cnt),
4553 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4555 access_pio_vlf_sop_parity_err_cnt),
4556 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4558 access_pio_vlf_v1_len_parity_err_cnt),
4559 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4561 access_pio_block_qw_count_parity_err_cnt),
4562 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4564 access_pio_write_qw_valid_parity_err_cnt),
4565 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4567 access_pio_state_machine_err_cnt),
4568 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4570 access_pio_write_data_parity_err_cnt),
4571 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4573 access_pio_host_addr_mem_cor_err_cnt),
4574 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4576 access_pio_host_addr_mem_unc_err_cnt),
4577 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4579 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4580 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4582 access_pio_init_sm_in_err_cnt),
4583 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4585 access_pio_ppmc_pbl_fifo_err_cnt),
4586 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4588 access_pio_credit_ret_fifo_parity_err_cnt),
4589 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4591 access_pio_v1_len_mem_bank1_cor_err_cnt),
4592 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4594 access_pio_v1_len_mem_bank0_cor_err_cnt),
4595 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4597 access_pio_v1_len_mem_bank1_unc_err_cnt),
4598 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4600 access_pio_v1_len_mem_bank0_unc_err_cnt),
4601 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4603 access_pio_sm_pkt_reset_parity_err_cnt),
4604 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4606 access_pio_pkt_evict_fifo_parity_err_cnt),
4607 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4608 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4610 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4611 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4613 access_pio_sbrdctl_crrel_parity_err_cnt),
4614 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4616 access_pio_pec_fifo_parity_err_cnt),
4617 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4619 access_pio_pcc_fifo_parity_err_cnt),
4620 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4622 access_pio_sb_mem_fifo1_err_cnt),
4623 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4625 access_pio_sb_mem_fifo0_err_cnt),
4626 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4628 access_pio_csr_parity_err_cnt),
4629 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4631 access_pio_write_addr_parity_err_cnt),
4632 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4634 access_pio_write_bad_ctxt_err_cnt),
4635 /* SendDmaErrStatus */
4636 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4638 access_sdma_pcie_req_tracking_cor_err_cnt),
4639 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4641 access_sdma_pcie_req_tracking_unc_err_cnt),
4642 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4644 access_sdma_csr_parity_err_cnt),
4645 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4647 access_sdma_rpy_tag_err_cnt),
4648 /* SendEgressErrStatus */
4649 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4651 access_tx_read_pio_memory_csr_unc_err_cnt),
4652 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4654 access_tx_read_sdma_memory_csr_err_cnt),
4655 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4657 access_tx_egress_fifo_cor_err_cnt),
4658 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4660 access_tx_read_pio_memory_cor_err_cnt),
4661 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4663 access_tx_read_sdma_memory_cor_err_cnt),
4664 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4666 access_tx_sb_hdr_cor_err_cnt),
4667 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4669 access_tx_credit_overrun_err_cnt),
4670 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4672 access_tx_launch_fifo8_cor_err_cnt),
4673 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4675 access_tx_launch_fifo7_cor_err_cnt),
4676 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4678 access_tx_launch_fifo6_cor_err_cnt),
4679 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4681 access_tx_launch_fifo5_cor_err_cnt),
4682 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4684 access_tx_launch_fifo4_cor_err_cnt),
4685 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4687 access_tx_launch_fifo3_cor_err_cnt),
4688 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4690 access_tx_launch_fifo2_cor_err_cnt),
4691 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4693 access_tx_launch_fifo1_cor_err_cnt),
4694 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4696 access_tx_launch_fifo0_cor_err_cnt),
4697 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4699 access_tx_credit_return_vl_err_cnt),
4700 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4702 access_tx_hcrc_insertion_err_cnt),
4703 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4705 access_tx_egress_fifo_unc_err_cnt),
4706 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4708 access_tx_read_pio_memory_unc_err_cnt),
4709 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4711 access_tx_read_sdma_memory_unc_err_cnt),
4712 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4714 access_tx_sb_hdr_unc_err_cnt),
4715 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4717 access_tx_credit_return_partiy_err_cnt),
4718 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4720 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4721 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4723 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4724 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4726 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4727 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4729 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4730 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4732 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4733 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4735 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4736 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4738 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4739 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4741 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4742 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4744 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4745 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4747 access_tx_sdma15_disallowed_packet_err_cnt),
4748 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4750 access_tx_sdma14_disallowed_packet_err_cnt),
4751 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4753 access_tx_sdma13_disallowed_packet_err_cnt),
4754 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4756 access_tx_sdma12_disallowed_packet_err_cnt),
4757 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4759 access_tx_sdma11_disallowed_packet_err_cnt),
4760 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4762 access_tx_sdma10_disallowed_packet_err_cnt),
4763 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4765 access_tx_sdma9_disallowed_packet_err_cnt),
4766 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4768 access_tx_sdma8_disallowed_packet_err_cnt),
4769 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4771 access_tx_sdma7_disallowed_packet_err_cnt),
4772 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4774 access_tx_sdma6_disallowed_packet_err_cnt),
4775 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4777 access_tx_sdma5_disallowed_packet_err_cnt),
4778 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4780 access_tx_sdma4_disallowed_packet_err_cnt),
4781 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4783 access_tx_sdma3_disallowed_packet_err_cnt),
4784 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4786 access_tx_sdma2_disallowed_packet_err_cnt),
4787 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4789 access_tx_sdma1_disallowed_packet_err_cnt),
4790 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4792 access_tx_sdma0_disallowed_packet_err_cnt),
4793 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4795 access_tx_config_parity_err_cnt),
4796 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4798 access_tx_sbrd_ctl_csr_parity_err_cnt),
4799 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4801 access_tx_launch_csr_parity_err_cnt),
4802 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4804 access_tx_illegal_vl_err_cnt),
4805 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4806 "TxSbrdCtlStateMachineParityErr", 0, 0,
4808 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4809 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4811 access_egress_reserved_10_err_cnt),
4812 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4814 access_egress_reserved_9_err_cnt),
4815 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4817 access_tx_sdma_launch_intf_parity_err_cnt),
4818 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4820 access_tx_pio_launch_intf_parity_err_cnt),
4821 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4823 access_egress_reserved_6_err_cnt),
4824 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4826 access_tx_incorrect_link_state_err_cnt),
4827 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4829 access_tx_linkdown_err_cnt),
4830 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4831 "EgressFifoUnderrunOrParityErr", 0, 0,
4833 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4834 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4836 access_egress_reserved_2_err_cnt),
4837 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4839 access_tx_pkt_integrity_mem_unc_err_cnt),
4840 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4842 access_tx_pkt_integrity_mem_cor_err_cnt),
4844 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4846 access_send_csr_write_bad_addr_err_cnt),
4847 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4849 access_send_csr_read_bad_addr_err_cnt),
4850 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4852 access_send_csr_parity_cnt),
4853 /* SendCtxtErrStatus */
4854 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4856 access_pio_write_out_of_bounds_err_cnt),
4857 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4859 access_pio_write_overflow_err_cnt),
4860 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4862 access_pio_write_crosses_boundary_err_cnt),
4863 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4865 access_pio_disallowed_packet_err_cnt),
4866 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4868 access_pio_inconsistent_sop_err_cnt),
4869 /* SendDmaEngErrStatus */
4870 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4872 access_sdma_header_request_fifo_cor_err_cnt),
4873 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4875 access_sdma_header_storage_cor_err_cnt),
4876 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4878 access_sdma_packet_tracking_cor_err_cnt),
4879 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4881 access_sdma_assembly_cor_err_cnt),
4882 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4884 access_sdma_desc_table_cor_err_cnt),
4885 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4887 access_sdma_header_request_fifo_unc_err_cnt),
4888 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4890 access_sdma_header_storage_unc_err_cnt),
4891 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4893 access_sdma_packet_tracking_unc_err_cnt),
4894 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4896 access_sdma_assembly_unc_err_cnt),
4897 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4899 access_sdma_desc_table_unc_err_cnt),
4900 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4902 access_sdma_timeout_err_cnt),
4903 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4905 access_sdma_header_length_err_cnt),
4906 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4908 access_sdma_header_address_err_cnt),
4909 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4911 access_sdma_header_select_err_cnt),
4912 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4914 access_sdma_reserved_9_err_cnt),
4915 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4917 access_sdma_packet_desc_overflow_err_cnt),
4918 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4920 access_sdma_length_mismatch_err_cnt),
4921 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4923 access_sdma_halt_err_cnt),
4924 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4926 access_sdma_mem_read_err_cnt),
4927 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4929 access_sdma_first_desc_err_cnt),
4930 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4932 access_sdma_tail_out_of_bounds_err_cnt),
4933 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4935 access_sdma_too_long_err_cnt),
4936 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4938 access_sdma_gen_mismatch_err_cnt),
4939 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4941 access_sdma_wrong_dw_err_cnt),
4944 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4945 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4947 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4949 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4951 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4953 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4955 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4957 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4959 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4960 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4961 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4962 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4963 CNTR_SYNTH | CNTR_VL),
4964 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4965 CNTR_SYNTH | CNTR_VL),
4966 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4967 CNTR_SYNTH | CNTR_VL),
4968 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4969 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4970 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4971 access_sw_link_dn_cnt),
4972 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4973 access_sw_link_up_cnt),
4974 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4975 access_sw_unknown_frame_cnt),
4976 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4977 access_sw_xmit_discards),
4978 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4979 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4980 access_sw_xmit_discards),
4981 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4982 access_xmit_constraint_errs),
4983 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4984 access_rcv_constraint_errs),
4985 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4986 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4987 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4988 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4989 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4990 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4991 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4992 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4993 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4994 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4995 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4996 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4997 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4998 access_sw_cpu_rc_acks),
4999 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5000 access_sw_cpu_rc_qacks),
5001 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5002 access_sw_cpu_rc_delayed_comp),
5003 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5004 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5005 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5006 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5007 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5008 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5009 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5010 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5011 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5012 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5013 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5014 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5015 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5016 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5017 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5018 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5019 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5020 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5021 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5022 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5023 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5024 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5025 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5026 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5027 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5028 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5029 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5030 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5031 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5032 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5033 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5034 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5035 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5036 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5037 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5038 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5039 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5040 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5041 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5042 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5043 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5044 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5045 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5046 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5047 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5048 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5049 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5050 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5051 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5052 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5053 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5054 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5055 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5056 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5057 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5058 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5059 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5060 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5061 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5062 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5063 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5064 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5065 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5066 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5067 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5068 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5069 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5070 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5071 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5072 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5073 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5074 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5075 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5076 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5077 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5078 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5079 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5080 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5081 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5082 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5085 /* ======================================================================== */
5087 /* return true if this is chip revision revision a */
5088 int is_ax(struct hfi1_devdata *dd)
5091 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5092 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5093 return (chip_rev_minor & 0xf0) == 0;
5096 /* return true if this is chip revision revision b */
5097 int is_bx(struct hfi1_devdata *dd)
5100 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5101 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5102 return (chip_rev_minor & 0xF0) == 0x10;
5106 * Append string s to buffer buf. Arguments curp and len are the current
5107 * position and remaining length, respectively.
5109 * return 0 on success, 1 on out of room
5111 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5115 int result = 0; /* success */
5118 /* add a comma, if first in the buffer */
5121 result = 1; /* out of room */
5128 /* copy the string */
5129 while ((c = *s++) != 0) {
5131 result = 1; /* out of room */
5139 /* write return values */
5147 * Using the given flag table, print a comma separated string into
5148 * the buffer. End in '*' if the buffer is too short.
5150 static char *flag_string(char *buf, int buf_len, u64 flags,
5151 struct flag_table *table, int table_size)
5159 /* make sure there is at least 2 so we can form "*" */
5163 len--; /* leave room for a nul */
5164 for (i = 0; i < table_size; i++) {
5165 if (flags & table[i].flag) {
5166 no_room = append_str(buf, &p, &len, table[i].str);
5169 flags &= ~table[i].flag;
5173 /* any undocumented bits left? */
5174 if (!no_room && flags) {
5175 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5176 no_room = append_str(buf, &p, &len, extra);
5179 /* add * if ran out of room */
5181 /* may need to back up to add space for a '*' */
5187 /* add final nul - space already allocated above */
5192 /* first 8 CCE error interrupt source names */
5193 static const char * const cce_misc_names[] = {
5194 "CceErrInt", /* 0 */
5195 "RxeErrInt", /* 1 */
5196 "MiscErrInt", /* 2 */
5197 "Reserved3", /* 3 */
5198 "PioErrInt", /* 4 */
5199 "SDmaErrInt", /* 5 */
5200 "EgressErrInt", /* 6 */
5205 * Return the miscellaneous error interrupt name.
5207 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5209 if (source < ARRAY_SIZE(cce_misc_names))
5210 strncpy(buf, cce_misc_names[source], bsize);
5212 snprintf(buf, bsize, "Reserved%u",
5213 source + IS_GENERAL_ERR_START);
5219 * Return the SDMA engine error interrupt name.
5221 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5223 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5228 * Return the send context error interrupt name.
5230 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5232 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5236 static const char * const various_names[] = {
5245 * Return the various interrupt name.
5247 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5249 if (source < ARRAY_SIZE(various_names))
5250 strncpy(buf, various_names[source], bsize);
5252 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5257 * Return the DC interrupt name.
5259 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5261 static const char * const dc_int_names[] = {
5265 "lbm" /* local block merge */
5268 if (source < ARRAY_SIZE(dc_int_names))
5269 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5271 snprintf(buf, bsize, "DCInt%u", source);
5275 static const char * const sdma_int_names[] = {
5282 * Return the SDMA engine interrupt name.
5284 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5286 /* what interrupt */
5287 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5289 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5291 if (likely(what < 3))
5292 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5294 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5299 * Return the receive available interrupt name.
5301 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5303 snprintf(buf, bsize, "RcvAvailInt%u", source);
5308 * Return the receive urgent interrupt name.
5310 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5312 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5317 * Return the send credit interrupt name.
5319 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5321 snprintf(buf, bsize, "SendCreditInt%u", source);
5326 * Return the reserved interrupt name.
5328 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5330 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5334 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5336 return flag_string(buf, buf_len, flags,
5337 cce_err_status_flags,
5338 ARRAY_SIZE(cce_err_status_flags));
5341 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5343 return flag_string(buf, buf_len, flags,
5344 rxe_err_status_flags,
5345 ARRAY_SIZE(rxe_err_status_flags));
5348 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5350 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5351 ARRAY_SIZE(misc_err_status_flags));
5354 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5356 return flag_string(buf, buf_len, flags,
5357 pio_err_status_flags,
5358 ARRAY_SIZE(pio_err_status_flags));
5361 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5363 return flag_string(buf, buf_len, flags,
5364 sdma_err_status_flags,
5365 ARRAY_SIZE(sdma_err_status_flags));
5368 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5370 return flag_string(buf, buf_len, flags,
5371 egress_err_status_flags,
5372 ARRAY_SIZE(egress_err_status_flags));
5375 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5377 return flag_string(buf, buf_len, flags,
5378 egress_err_info_flags,
5379 ARRAY_SIZE(egress_err_info_flags));
5382 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5384 return flag_string(buf, buf_len, flags,
5385 send_err_status_flags,
5386 ARRAY_SIZE(send_err_status_flags));
5389 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5395 * For most these errors, there is nothing that can be done except
5396 * report or record it.
5398 dd_dev_info(dd, "CCE Error: %s\n",
5399 cce_err_status_string(buf, sizeof(buf), reg));
5401 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5402 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5403 /* this error requires a manual drop into SPC freeze mode */
5405 start_freeze_handling(dd->pport, FREEZE_SELF);
5408 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5409 if (reg & (1ull << i)) {
5410 incr_cntr64(&dd->cce_err_status_cnt[i]);
5411 /* maintain a counter over all cce_err_status errors */
5412 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5418 * Check counters for receive errors that do not have an interrupt
5419 * associated with them.
5421 #define RCVERR_CHECK_TIME 10
5422 static void update_rcverr_timer(unsigned long opaque)
5424 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5425 struct hfi1_pportdata *ppd = dd->pport;
5426 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5428 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5429 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5430 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5431 set_link_down_reason(
5432 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5433 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5434 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5436 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5438 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5441 static int init_rcverr(struct hfi1_devdata *dd)
5443 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5444 /* Assume the hardware counter has been reset */
5445 dd->rcv_ovfl_cnt = 0;
5446 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5449 static void free_rcverr(struct hfi1_devdata *dd)
5451 if (dd->rcverr_timer.data)
5452 del_timer_sync(&dd->rcverr_timer);
5453 dd->rcverr_timer.data = 0;
5456 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5461 dd_dev_info(dd, "Receive Error: %s\n",
5462 rxe_err_status_string(buf, sizeof(buf), reg));
5464 if (reg & ALL_RXE_FREEZE_ERR) {
5468 * Freeze mode recovery is disabled for the errors
5469 * in RXE_FREEZE_ABORT_MASK
5471 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5472 flags = FREEZE_ABORT;
5474 start_freeze_handling(dd->pport, flags);
5477 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5478 if (reg & (1ull << i))
5479 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5483 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5488 dd_dev_info(dd, "Misc Error: %s",
5489 misc_err_status_string(buf, sizeof(buf), reg));
5490 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5491 if (reg & (1ull << i))
5492 incr_cntr64(&dd->misc_err_status_cnt[i]);
5496 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5501 dd_dev_info(dd, "PIO Error: %s\n",
5502 pio_err_status_string(buf, sizeof(buf), reg));
5504 if (reg & ALL_PIO_FREEZE_ERR)
5505 start_freeze_handling(dd->pport, 0);
5507 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5508 if (reg & (1ull << i))
5509 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5513 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518 dd_dev_info(dd, "SDMA Error: %s\n",
5519 sdma_err_status_string(buf, sizeof(buf), reg));
5521 if (reg & ALL_SDMA_FREEZE_ERR)
5522 start_freeze_handling(dd->pport, 0);
5524 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5525 if (reg & (1ull << i))
5526 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5530 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5532 incr_cntr64(&ppd->port_xmit_discards);
5535 static void count_port_inactive(struct hfi1_devdata *dd)
5537 __count_port_discards(dd->pport);
5541 * We have had a "disallowed packet" error during egress. Determine the
5542 * integrity check which failed, and update relevant error counter, etc.
5544 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5545 * bit of state per integrity check, and so we can miss the reason for an
5546 * egress error if more than one packet fails the same integrity check
5547 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5549 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5552 struct hfi1_pportdata *ppd = dd->pport;
5553 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5554 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5557 /* clear down all observed info as quickly as possible after read */
5558 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5561 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5562 info, egress_err_info_string(buf, sizeof(buf), info), src);
5564 /* Eventually add other counters for each bit */
5565 if (info & PORT_DISCARD_EGRESS_ERRS) {
5569 * Count all applicable bits as individual errors and
5570 * attribute them to the packet that triggered this handler.
5571 * This may not be completely accurate due to limitations
5572 * on the available hardware error information. There is
5573 * a single information register and any number of error
5574 * packets may have occurred and contributed to it before
5575 * this routine is called. This means that:
5576 * a) If multiple packets with the same error occur before
5577 * this routine is called, earlier packets are missed.
5578 * There is only a single bit for each error type.
5579 * b) Errors may not be attributed to the correct VL.
5580 * The driver is attributing all bits in the info register
5581 * to the packet that triggered this call, but bits
5582 * could be an accumulation of different packets with
5584 * c) A single error packet may have multiple counts attached
5585 * to it. There is no way for the driver to know if
5586 * multiple bits set in the info register are due to a
5587 * single packet or multiple packets. The driver assumes
5590 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5591 for (i = 0; i < weight; i++) {
5592 __count_port_discards(ppd);
5593 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5594 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5596 incr_cntr64(&ppd->port_xmit_discards_vl
5603 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5604 * register. Does it represent a 'port inactive' error?
5606 static inline int port_inactive_err(u64 posn)
5608 return (posn >= SEES(TX_LINKDOWN) &&
5609 posn <= SEES(TX_INCORRECT_LINK_STATE));
5613 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5614 * register. Does it represent a 'disallowed packet' error?
5616 static inline int disallowed_pkt_err(int posn)
5618 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5619 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5623 * Input value is a bit position of one of the SDMA engine disallowed
5624 * packet errors. Return which engine. Use of this must be guarded by
5625 * disallowed_pkt_err().
5627 static inline int disallowed_pkt_engine(int posn)
5629 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5633 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5636 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5638 struct sdma_vl_map *m;
5642 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5646 m = rcu_dereference(dd->sdma_map);
5647 vl = m->engine_to_vl[engine];
5654 * Translate the send context (sofware index) into a VL. Return -1 if the
5655 * translation cannot be done.
5657 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5659 struct send_context_info *sci;
5660 struct send_context *sc;
5663 sci = &dd->send_contexts[sw_index];
5665 /* there is no information for user (PSM) and ack contexts */
5666 if (sci->type != SC_KERNEL)
5672 if (dd->vld[15].sc == sc)
5674 for (i = 0; i < num_vls; i++)
5675 if (dd->vld[i].sc == sc)
5681 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5683 u64 reg_copy = reg, handled = 0;
5687 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5688 start_freeze_handling(dd->pport, 0);
5689 else if (is_ax(dd) &&
5690 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5691 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5692 start_freeze_handling(dd->pport, 0);
5695 int posn = fls64(reg_copy);
5696 /* fls64() returns a 1-based offset, we want it zero based */
5697 int shift = posn - 1;
5698 u64 mask = 1ULL << shift;
5700 if (port_inactive_err(shift)) {
5701 count_port_inactive(dd);
5703 } else if (disallowed_pkt_err(shift)) {
5704 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5706 handle_send_egress_err_info(dd, vl);
5715 dd_dev_info(dd, "Egress Error: %s\n",
5716 egress_err_status_string(buf, sizeof(buf), reg));
5718 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5719 if (reg & (1ull << i))
5720 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5724 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5729 dd_dev_info(dd, "Send Error: %s\n",
5730 send_err_status_string(buf, sizeof(buf), reg));
5732 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5733 if (reg & (1ull << i))
5734 incr_cntr64(&dd->send_err_status_cnt[i]);
5739 * The maximum number of times the error clear down will loop before
5740 * blocking a repeating error. This value is arbitrary.
5742 #define MAX_CLEAR_COUNT 20
5745 * Clear and handle an error register. All error interrupts are funneled
5746 * through here to have a central location to correctly handle single-
5747 * or multi-shot errors.
5749 * For non per-context registers, call this routine with a context value
5750 * of 0 so the per-context offset is zero.
5752 * If the handler loops too many times, assume that something is wrong
5753 * and can't be fixed, so mask the error bits.
5755 static void interrupt_clear_down(struct hfi1_devdata *dd,
5757 const struct err_reg_info *eri)
5762 /* read in a loop until no more errors are seen */
5765 reg = read_kctxt_csr(dd, context, eri->status);
5768 write_kctxt_csr(dd, context, eri->clear, reg);
5769 if (likely(eri->handler))
5770 eri->handler(dd, context, reg);
5772 if (count > MAX_CLEAR_COUNT) {
5775 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5778 * Read-modify-write so any other masked bits
5781 mask = read_kctxt_csr(dd, context, eri->mask);
5783 write_kctxt_csr(dd, context, eri->mask, mask);
5790 * CCE block "misc" interrupt. Source is < 16.
5792 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5794 const struct err_reg_info *eri = &misc_errs[source];
5797 interrupt_clear_down(dd, 0, eri);
5799 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5804 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5806 return flag_string(buf, buf_len, flags,
5807 sc_err_status_flags,
5808 ARRAY_SIZE(sc_err_status_flags));
5812 * Send context error interrupt. Source (hw_context) is < 160.
5814 * All send context errors cause the send context to halt. The normal
5815 * clear-down mechanism cannot be used because we cannot clear the
5816 * error bits until several other long-running items are done first.
5817 * This is OK because with the context halted, nothing else is going
5818 * to happen on it anyway.
5820 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5821 unsigned int hw_context)
5823 struct send_context_info *sci;
5824 struct send_context *sc;
5830 sw_index = dd->hw_to_sw[hw_context];
5831 if (sw_index >= dd->num_send_contexts) {
5833 "out of range sw index %u for send context %u\n",
5834 sw_index, hw_context);
5837 sci = &dd->send_contexts[sw_index];
5840 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5841 sw_index, hw_context);
5845 /* tell the software that a halt has begun */
5846 sc_stop(sc, SCF_HALTED);
5848 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5850 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5851 send_context_err_status_string(flags, sizeof(flags),
5854 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5855 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5858 * Automatically restart halted kernel contexts out of interrupt
5859 * context. User contexts must ask the driver to restart the context.
5861 if (sc->type != SC_USER)
5862 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5865 * Update the counters for the corresponding status bits.
5866 * Note that these particular counters are aggregated over all
5869 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5870 if (status & (1ull << i))
5871 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5875 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5876 unsigned int source, u64 status)
5878 struct sdma_engine *sde;
5881 sde = &dd->per_sdma[source];
5882 #ifdef CONFIG_SDMA_VERBOSITY
5883 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5884 slashstrip(__FILE__), __LINE__, __func__);
5885 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5886 sde->this_idx, source, (unsigned long long)status);
5889 sdma_engine_error(sde, status);
5892 * Update the counters for the corresponding status bits.
5893 * Note that these particular counters are aggregated over
5894 * all 16 DMA engines.
5896 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5897 if (status & (1ull << i))
5898 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5903 * CCE block SDMA error interrupt. Source is < 16.
5905 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5907 #ifdef CONFIG_SDMA_VERBOSITY
5908 struct sdma_engine *sde = &dd->per_sdma[source];
5910 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5911 slashstrip(__FILE__), __LINE__, __func__);
5912 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5914 sdma_dumpstate(sde);
5916 interrupt_clear_down(dd, source, &sdma_eng_err);
5920 * CCE block "various" interrupt. Source is < 8.
5922 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5924 const struct err_reg_info *eri = &various_err[source];
5927 * TCritInt cannot go through interrupt_clear_down()
5928 * because it is not a second tier interrupt. The handler
5929 * should be called directly.
5931 if (source == TCRIT_INT_SOURCE)
5932 handle_temp_err(dd);
5933 else if (eri->handler)
5934 interrupt_clear_down(dd, 0, eri);
5937 "%s: Unimplemented/reserved interrupt %d\n",
5941 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5943 /* src_ctx is always zero */
5944 struct hfi1_pportdata *ppd = dd->pport;
5945 unsigned long flags;
5946 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5948 if (reg & QSFP_HFI0_MODPRST_N) {
5949 if (!qsfp_mod_present(ppd)) {
5950 dd_dev_info(dd, "%s: QSFP module removed\n",
5953 ppd->driver_link_ready = 0;
5955 * Cable removed, reset all our information about the
5956 * cache and cable capabilities
5959 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5961 * We don't set cache_refresh_required here as we expect
5962 * an interrupt when a cable is inserted
5964 ppd->qsfp_info.cache_valid = 0;
5965 ppd->qsfp_info.reset_needed = 0;
5966 ppd->qsfp_info.limiting_active = 0;
5967 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5969 /* Invert the ModPresent pin now to detect plug-in */
5970 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5971 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5973 if ((ppd->offline_disabled_reason >
5975 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
5976 (ppd->offline_disabled_reason ==
5977 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5978 ppd->offline_disabled_reason =
5980 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
5982 if (ppd->host_link_state == HLS_DN_POLL) {
5984 * The link is still in POLL. This means
5985 * that the normal link down processing
5986 * will not happen. We have to do it here
5987 * before turning the DC off.
5989 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5992 dd_dev_info(dd, "%s: QSFP module inserted\n",
5995 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5996 ppd->qsfp_info.cache_valid = 0;
5997 ppd->qsfp_info.cache_refresh_required = 1;
5998 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6002 * Stop inversion of ModPresent pin to detect
6003 * removal of the cable
6005 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6006 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6007 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6009 ppd->offline_disabled_reason =
6010 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6014 if (reg & QSFP_HFI0_INT_N) {
6015 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6017 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6018 ppd->qsfp_info.check_interrupt_flags = 1;
6019 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6022 /* Schedule the QSFP work only if there is a cable attached. */
6023 if (qsfp_mod_present(ppd))
6024 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6027 static int request_host_lcb_access(struct hfi1_devdata *dd)
6031 ret = do_8051_command(dd, HCMD_MISC,
6032 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6033 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6034 if (ret != HCMD_SUCCESS) {
6035 dd_dev_err(dd, "%s: command failed with error %d\n",
6038 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6041 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6045 ret = do_8051_command(dd, HCMD_MISC,
6046 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6047 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6048 if (ret != HCMD_SUCCESS) {
6049 dd_dev_err(dd, "%s: command failed with error %d\n",
6052 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6056 * Set the LCB selector - allow host access. The DCC selector always
6057 * points to the host.
6059 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6061 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6062 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6063 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6067 * Clear the LCB selector - allow 8051 access. The DCC selector always
6068 * points to the host.
6070 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6072 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6073 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6077 * Acquire LCB access from the 8051. If the host already has access,
6078 * just increment a counter. Otherwise, inform the 8051 that the
6079 * host is taking access.
6083 * -EBUSY if the 8051 has control and cannot be disturbed
6084 * -errno if unable to acquire access from the 8051
6086 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6088 struct hfi1_pportdata *ppd = dd->pport;
6092 * Use the host link state lock so the operation of this routine
6093 * { link state check, selector change, count increment } can occur
6094 * as a unit against a link state change. Otherwise there is a
6095 * race between the state change and the count increment.
6098 mutex_lock(&ppd->hls_lock);
6100 while (!mutex_trylock(&ppd->hls_lock))
6104 /* this access is valid only when the link is up */
6105 if ((ppd->host_link_state & HLS_UP) == 0) {
6106 dd_dev_info(dd, "%s: link state %s not up\n",
6107 __func__, link_state_name(ppd->host_link_state));
6112 if (dd->lcb_access_count == 0) {
6113 ret = request_host_lcb_access(dd);
6116 "%s: unable to acquire LCB access, err %d\n",
6120 set_host_lcb_access(dd);
6122 dd->lcb_access_count++;
6124 mutex_unlock(&ppd->hls_lock);
6129 * Release LCB access by decrementing the use count. If the count is moving
6130 * from 1 to 0, inform 8051 that it has control back.
6134 * -errno if unable to release access to the 8051
6136 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6141 * Use the host link state lock because the acquire needed it.
6142 * Here, we only need to keep { selector change, count decrement }
6146 mutex_lock(&dd->pport->hls_lock);
6148 while (!mutex_trylock(&dd->pport->hls_lock))
6152 if (dd->lcb_access_count == 0) {
6153 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6158 if (dd->lcb_access_count == 1) {
6159 set_8051_lcb_access(dd);
6160 ret = request_8051_lcb_access(dd);
6163 "%s: unable to release LCB access, err %d\n",
6165 /* restore host access if the grant didn't work */
6166 set_host_lcb_access(dd);
6170 dd->lcb_access_count--;
6172 mutex_unlock(&dd->pport->hls_lock);
6177 * Initialize LCB access variables and state. Called during driver load,
6178 * after most of the initialization is finished.
6180 * The DC default is LCB access on for the host. The driver defaults to
6181 * leaving access to the 8051. Assign access now - this constrains the call
6182 * to this routine to be after all LCB set-up is done. In particular, after
6183 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6185 static void init_lcb_access(struct hfi1_devdata *dd)
6187 dd->lcb_access_count = 0;
6191 * Write a response back to a 8051 request.
6193 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6195 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6196 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6198 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6199 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6203 * Handle host requests from the 8051.
6205 * This is a work-queue function outside of the interrupt.
6207 void handle_8051_request(struct work_struct *work)
6209 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6211 struct hfi1_devdata *dd = ppd->dd;
6214 u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6215 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
6217 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6218 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6219 return; /* no request */
6221 /* zero out COMPLETED so the response is seen */
6222 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6224 /* extract request details */
6225 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6226 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6227 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6228 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6231 case HREQ_LOAD_CONFIG:
6232 case HREQ_SAVE_CONFIG:
6233 case HREQ_READ_CONFIG:
6234 case HREQ_SET_TX_EQ_ABS:
6235 case HREQ_SET_TX_EQ_REL:
6236 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6238 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6243 for (i = 0; lanes; lanes >>= 1, i++) {
6248 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6249 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6250 cdr_ctrl_byte |= (1 << (i + 4));
6252 /* disable TX CDR */
6253 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6254 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6255 cdr_ctrl_byte &= ~(1 << (i + 4));
6260 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6261 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6262 cdr_ctrl_byte |= (1 << i);
6264 /* disable RX CDR */
6265 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6266 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6267 cdr_ctrl_byte &= ~(1 << i);
6270 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6272 hreq_response(dd, HREQ_SUCCESS, data);
6273 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6276 case HREQ_CONFIG_DONE:
6277 hreq_response(dd, HREQ_SUCCESS, 0);
6280 case HREQ_INTERFACE_TEST:
6281 hreq_response(dd, HREQ_SUCCESS, data);
6285 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6286 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6291 static void write_global_credit(struct hfi1_devdata *dd,
6292 u8 vau, u16 total, u16 shared)
6294 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6296 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6298 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6299 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6303 * Set up initial VL15 credits of the remote. Assumes the rest of
6304 * the CM credit registers are zero from a previous global or credit reset .
6306 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6308 /* leave shared count at zero for both global and VL15 */
6309 write_global_credit(dd, vau, vl15buf, 0);
6311 /* We may need some credits for another VL when sending packets
6312 * with the snoop interface. Dividing it down the middle for VL15
6313 * and VL0 should suffice.
6315 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6316 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6317 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6318 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6319 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6321 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6322 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6327 * Zero all credit details from the previous connection and
6328 * reset the CM manager's internal counters.
6330 void reset_link_credits(struct hfi1_devdata *dd)
6334 /* remove all previous VL credit limits */
6335 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6336 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6337 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6338 write_global_credit(dd, 0, 0, 0);
6339 /* reset the CM block */
6340 pio_send_control(dd, PSC_CM_RESET);
6343 /* convert a vCU to a CU */
6344 static u32 vcu_to_cu(u8 vcu)
6349 /* convert a CU to a vCU */
6350 static u8 cu_to_vcu(u32 cu)
6355 /* convert a vAU to an AU */
6356 static u32 vau_to_au(u8 vau)
6358 return 8 * (1 << vau);
6361 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6363 ppd->sm_trap_qp = 0x0;
6368 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6370 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6374 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6375 write_csr(dd, DC_LCB_CFG_RUN, 0);
6376 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6377 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6378 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6379 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6380 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6381 reg = read_csr(dd, DCC_CFG_RESET);
6382 write_csr(dd, DCC_CFG_RESET, reg |
6383 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6384 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6385 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6387 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6388 write_csr(dd, DCC_CFG_RESET, reg);
6389 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6394 * This routine should be called after the link has been transitioned to
6395 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6398 * The expectation is that the caller of this routine would have taken
6399 * care of properly transitioning the link into the correct state.
6401 static void dc_shutdown(struct hfi1_devdata *dd)
6403 unsigned long flags;
6405 spin_lock_irqsave(&dd->dc8051_lock, flags);
6406 if (dd->dc_shutdown) {
6407 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6410 dd->dc_shutdown = 1;
6411 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6412 /* Shutdown the LCB */
6413 lcb_shutdown(dd, 1);
6415 * Going to OFFLINE would have causes the 8051 to put the
6416 * SerDes into reset already. Just need to shut down the 8051,
6419 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6423 * Calling this after the DC has been brought out of reset should not
6426 static void dc_start(struct hfi1_devdata *dd)
6428 unsigned long flags;
6431 spin_lock_irqsave(&dd->dc8051_lock, flags);
6432 if (!dd->dc_shutdown)
6434 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6435 /* Take the 8051 out of reset */
6436 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6437 /* Wait until 8051 is ready */
6438 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6440 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6443 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6444 write_csr(dd, DCC_CFG_RESET, 0x10);
6445 /* lcb_shutdown() with abort=1 does not restore these */
6446 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6447 spin_lock_irqsave(&dd->dc8051_lock, flags);
6448 dd->dc_shutdown = 0;
6450 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6454 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6456 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6458 u64 rx_radr, tx_radr;
6461 if (dd->icode != ICODE_FPGA_EMULATION)
6465 * These LCB defaults on emulator _s are good, nothing to do here:
6466 * LCB_CFG_TX_FIFOS_RADR
6467 * LCB_CFG_RX_FIFOS_RADR
6469 * LCB_CFG_IGNORE_LOST_RCLK
6471 if (is_emulator_s(dd))
6473 /* else this is _p */
6475 version = emulator_rev(dd);
6477 version = 0x2d; /* all B0 use 0x2d or higher settings */
6479 if (version <= 0x12) {
6480 /* release 0x12 and below */
6483 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6484 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6485 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6488 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6489 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6490 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6492 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6493 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6495 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6496 } else if (version <= 0x18) {
6497 /* release 0x13 up to 0x18 */
6498 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6500 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6501 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6502 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6503 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6504 } else if (version == 0x19) {
6506 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6508 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6509 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6510 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6511 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6512 } else if (version == 0x1a) {
6514 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6516 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6517 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6518 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6519 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6520 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6522 /* release 0x1b and higher */
6523 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6525 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6526 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6527 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6528 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6531 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6532 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6533 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6534 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6535 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6539 * Handle a SMA idle message
6541 * This is a work-queue function outside of the interrupt.
6543 void handle_sma_message(struct work_struct *work)
6545 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6547 struct hfi1_devdata *dd = ppd->dd;
6552 * msg is bytes 1-4 of the 40-bit idle message - the command code
6555 ret = read_idle_sma(dd, &msg);
6558 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6560 * React to the SMA message. Byte[1] (0 for us) is the command.
6562 switch (msg & 0xff) {
6565 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6568 * Only expected in INIT or ARMED, discard otherwise.
6570 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6571 ppd->neighbor_normal = 1;
6573 case SMA_IDLE_ACTIVE:
6575 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6578 * Can activate the node. Discard otherwise.
6580 if (ppd->host_link_state == HLS_UP_ARMED &&
6581 ppd->is_active_optimize_enabled) {
6582 ppd->neighbor_normal = 1;
6583 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6587 "%s: received Active SMA idle message, couldn't set link to Active\n",
6593 "%s: received unexpected SMA idle message 0x%llx\n",
6599 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6602 unsigned long flags;
6604 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6605 rcvctrl = read_csr(dd, RCV_CTRL);
6608 write_csr(dd, RCV_CTRL, rcvctrl);
6609 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6612 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6614 adjust_rcvctrl(dd, add, 0);
6617 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6619 adjust_rcvctrl(dd, 0, clear);
6623 * Called from all interrupt handlers to start handling an SPC freeze.
6625 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6627 struct hfi1_devdata *dd = ppd->dd;
6628 struct send_context *sc;
6631 if (flags & FREEZE_SELF)
6632 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6634 /* enter frozen mode */
6635 dd->flags |= HFI1_FROZEN;
6637 /* notify all SDMA engines that they are going into a freeze */
6638 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6640 /* do halt pre-handling on all enabled send contexts */
6641 for (i = 0; i < dd->num_send_contexts; i++) {
6642 sc = dd->send_contexts[i].sc;
6643 if (sc && (sc->flags & SCF_ENABLED))
6644 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6647 /* Send context are frozen. Notify user space */
6648 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6650 if (flags & FREEZE_ABORT) {
6652 "Aborted freeze recovery. Please REBOOT system\n");
6655 /* queue non-interrupt handler */
6656 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6660 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6661 * depending on the "freeze" parameter.
6663 * No need to return an error if it times out, our only option
6664 * is to proceed anyway.
6666 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6668 unsigned long timeout;
6671 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6673 reg = read_csr(dd, CCE_STATUS);
6675 /* waiting until all indicators are set */
6676 if ((reg & ALL_FROZE) == ALL_FROZE)
6677 return; /* all done */
6679 /* waiting until all indicators are clear */
6680 if ((reg & ALL_FROZE) == 0)
6681 return; /* all done */
6684 if (time_after(jiffies, timeout)) {
6686 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6687 freeze ? "" : "un", reg & ALL_FROZE,
6688 freeze ? ALL_FROZE : 0ull);
6691 usleep_range(80, 120);
6696 * Do all freeze handling for the RXE block.
6698 static void rxe_freeze(struct hfi1_devdata *dd)
6703 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6705 /* disable all receive contexts */
6706 for (i = 0; i < dd->num_rcv_contexts; i++)
6707 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6711 * Unfreeze handling for the RXE block - kernel contexts only.
6712 * This will also enable the port. User contexts will do unfreeze
6713 * handling on a per-context basis as they call into the driver.
6716 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6721 /* enable all kernel contexts */
6722 for (i = 0; i < dd->n_krcv_queues; i++) {
6723 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6724 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6725 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6726 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6727 hfi1_rcvctrl(dd, rcvmask, i);
6731 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6735 * Non-interrupt SPC freeze handling.
6737 * This is a work-queue function outside of the triggering interrupt.
6739 void handle_freeze(struct work_struct *work)
6741 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6743 struct hfi1_devdata *dd = ppd->dd;
6745 /* wait for freeze indicators on all affected blocks */
6746 wait_for_freeze_status(dd, 1);
6748 /* SPC is now frozen */
6750 /* do send PIO freeze steps */
6753 /* do send DMA freeze steps */
6756 /* do send egress freeze steps - nothing to do */
6758 /* do receive freeze steps */
6762 * Unfreeze the hardware - clear the freeze, wait for each
6763 * block's frozen bit to clear, then clear the frozen flag.
6765 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6766 wait_for_freeze_status(dd, 0);
6769 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6770 wait_for_freeze_status(dd, 1);
6771 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6772 wait_for_freeze_status(dd, 0);
6775 /* do send PIO unfreeze steps for kernel contexts */
6776 pio_kernel_unfreeze(dd);
6778 /* do send DMA unfreeze steps */
6781 /* do send egress unfreeze steps - nothing to do */
6783 /* do receive unfreeze steps for kernel contexts */
6784 rxe_kernel_unfreeze(dd);
6787 * The unfreeze procedure touches global device registers when
6788 * it disables and re-enables RXE. Mark the device unfrozen
6789 * after all that is done so other parts of the driver waiting
6790 * for the device to unfreeze don't do things out of order.
6792 * The above implies that the meaning of HFI1_FROZEN flag is
6793 * "Device has gone into freeze mode and freeze mode handling
6794 * is still in progress."
6796 * The flag will be removed when freeze mode processing has
6799 dd->flags &= ~HFI1_FROZEN;
6800 wake_up(&dd->event_queue);
6802 /* no longer frozen */
6806 * Handle a link up interrupt from the 8051.
6808 * This is a work-queue function outside of the interrupt.
6810 void handle_link_up(struct work_struct *work)
6812 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6814 set_link_state(ppd, HLS_UP_INIT);
6816 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6817 read_ltp_rtt(ppd->dd);
6819 * OPA specifies that certain counters are cleared on a transition
6820 * to link up, so do that.
6822 clear_linkup_counters(ppd->dd);
6824 * And (re)set link up default values.
6826 set_linkup_defaults(ppd);
6828 /* enforce link speed enabled */
6829 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6830 /* oops - current speed is not enabled, bounce */
6832 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6833 ppd->link_speed_active, ppd->link_speed_enabled);
6834 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6835 OPA_LINKDOWN_REASON_SPEED_POLICY);
6836 set_link_state(ppd, HLS_DN_OFFLINE);
6843 * Several pieces of LNI information were cached for SMA in ppd.
6844 * Reset these on link down
6846 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6848 ppd->neighbor_guid = 0;
6849 ppd->neighbor_port_number = 0;
6850 ppd->neighbor_type = 0;
6851 ppd->neighbor_fm_security = 0;
6855 * Handle a link down interrupt from the 8051.
6857 * This is a work-queue function outside of the interrupt.
6859 void handle_link_down(struct work_struct *work)
6861 u8 lcl_reason, neigh_reason = 0;
6862 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6865 if ((ppd->host_link_state &
6866 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6867 ppd->port_type == PORT_TYPE_FIXED)
6868 ppd->offline_disabled_reason =
6869 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6871 /* Go offline first, then deal with reading/writing through 8051 */
6872 set_link_state(ppd, HLS_DN_OFFLINE);
6875 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6878 * If no reason, assume peer-initiated but missed
6879 * LinkGoingDown idle flits.
6881 if (neigh_reason == 0)
6882 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6884 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6886 reset_neighbor_info(ppd);
6888 /* disable the port */
6889 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6892 * If there is no cable attached, turn the DC off. Otherwise,
6893 * start the link bring up.
6895 if (!qsfp_mod_present(ppd)) {
6896 dc_shutdown(ppd->dd);
6903 void handle_link_bounce(struct work_struct *work)
6905 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6909 * Only do something if the link is currently up.
6911 if (ppd->host_link_state & HLS_UP) {
6912 set_link_state(ppd, HLS_DN_OFFLINE);
6916 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6917 __func__, link_state_name(ppd->host_link_state));
6922 * Mask conversion: Capability exchange to Port LTP. The capability
6923 * exchange has an implicit 16b CRC that is mandatory.
6925 static int cap_to_port_ltp(int cap)
6927 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6929 if (cap & CAP_CRC_14B)
6930 port_ltp |= PORT_LTP_CRC_MODE_14;
6931 if (cap & CAP_CRC_48B)
6932 port_ltp |= PORT_LTP_CRC_MODE_48;
6933 if (cap & CAP_CRC_12B_16B_PER_LANE)
6934 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6940 * Convert an OPA Port LTP mask to capability mask
6942 int port_ltp_to_cap(int port_ltp)
6946 if (port_ltp & PORT_LTP_CRC_MODE_14)
6947 cap_mask |= CAP_CRC_14B;
6948 if (port_ltp & PORT_LTP_CRC_MODE_48)
6949 cap_mask |= CAP_CRC_48B;
6950 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6951 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6957 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6959 static int lcb_to_port_ltp(int lcb_crc)
6963 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6964 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6965 else if (lcb_crc == LCB_CRC_48B)
6966 port_ltp = PORT_LTP_CRC_MODE_48;
6967 else if (lcb_crc == LCB_CRC_14B)
6968 port_ltp = PORT_LTP_CRC_MODE_14;
6970 port_ltp = PORT_LTP_CRC_MODE_16;
6976 * Our neighbor has indicated that we are allowed to act as a fabric
6977 * manager, so place the full management partition key in the second
6978 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6979 * that we should already have the limited management partition key in
6980 * array element 1, and also that the port is not yet up when
6981 * add_full_mgmt_pkey() is invoked.
6983 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6985 struct hfi1_devdata *dd = ppd->dd;
6987 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6988 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6989 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6990 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
6991 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6992 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6996 * Convert the given link width to the OPA link width bitmask.
6998 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7003 * Simulator and quick linkup do not set the width.
7004 * Just set it to 4x without complaint.
7006 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7007 return OPA_LINK_WIDTH_4X;
7008 return 0; /* no lanes up */
7009 case 1: return OPA_LINK_WIDTH_1X;
7010 case 2: return OPA_LINK_WIDTH_2X;
7011 case 3: return OPA_LINK_WIDTH_3X;
7013 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7016 case 4: return OPA_LINK_WIDTH_4X;
7021 * Do a population count on the bottom nibble.
7023 static const u8 bit_counts[16] = {
7024 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7027 static inline u8 nibble_to_count(u8 nibble)
7029 return bit_counts[nibble & 0xf];
7033 * Read the active lane information from the 8051 registers and return
7036 * Active lane information is found in these 8051 registers:
7040 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7046 u8 tx_polarity_inversion;
7047 u8 rx_polarity_inversion;
7050 /* read the active lanes */
7051 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7052 &rx_polarity_inversion, &max_rate);
7053 read_local_lni(dd, &enable_lane_rx);
7055 /* convert to counts */
7056 tx = nibble_to_count(enable_lane_tx);
7057 rx = nibble_to_count(enable_lane_rx);
7060 * Set link_speed_active here, overriding what was set in
7061 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7062 * set the max_rate field in handle_verify_cap until v0.19.
7064 if ((dd->icode == ICODE_RTL_SILICON) &&
7065 (dd->dc8051_ver < dc8051_ver(0, 19))) {
7066 /* max_rate: 0 = 12.5G, 1 = 25G */
7069 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7073 "%s: unexpected max rate %d, using 25Gb\n",
7074 __func__, (int)max_rate);
7077 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7083 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7084 enable_lane_tx, tx, enable_lane_rx, rx);
7085 *tx_width = link_width_to_bits(dd, tx);
7086 *rx_width = link_width_to_bits(dd, rx);
7090 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7091 * Valid after the end of VerifyCap and during LinkUp. Does not change
7092 * after link up. I.e. look elsewhere for downgrade information.
7095 * + bits [7:4] contain the number of active transmitters
7096 * + bits [3:0] contain the number of active receivers
7097 * These are numbers 1 through 4 and can be different values if the
7098 * link is asymmetric.
7100 * verify_cap_local_fm_link_width[0] retains its original value.
7102 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7106 u8 misc_bits, local_flags;
7107 u16 active_tx, active_rx;
7109 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7111 rx = (widths >> 8) & 0xf;
7113 *tx_width = link_width_to_bits(dd, tx);
7114 *rx_width = link_width_to_bits(dd, rx);
7116 /* print the active widths */
7117 get_link_widths(dd, &active_tx, &active_rx);
7121 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7122 * hardware information when the link first comes up.
7124 * The link width is not available until after VerifyCap.AllFramesReceived
7125 * (the trigger for handle_verify_cap), so this is outside that routine
7126 * and should be called when the 8051 signals linkup.
7128 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7130 u16 tx_width, rx_width;
7132 /* get end-of-LNI link widths */
7133 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7135 /* use tx_width as the link is supposed to be symmetric on link up */
7136 ppd->link_width_active = tx_width;
7137 /* link width downgrade active (LWD.A) starts out matching LW.A */
7138 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7139 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7140 /* per OPA spec, on link up LWD.E resets to LWD.S */
7141 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7142 /* cache the active egress rate (units {10^6 bits/sec]) */
7143 ppd->current_egress_rate = active_egress_rate(ppd);
7147 * Handle a verify capabilities interrupt from the 8051.
7149 * This is a work-queue function outside of the interrupt.
7151 void handle_verify_cap(struct work_struct *work)
7153 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7155 struct hfi1_devdata *dd = ppd->dd;
7157 u8 power_management;
7167 u16 active_tx, active_rx;
7168 u8 partner_supported_crc;
7172 set_link_state(ppd, HLS_VERIFY_CAP);
7174 lcb_shutdown(dd, 0);
7175 adjust_lcb_for_fpga_serdes(dd);
7178 * These are now valid:
7179 * remote VerifyCap fields in the general LNI config
7180 * CSR DC8051_STS_REMOTE_GUID
7181 * CSR DC8051_STS_REMOTE_NODE_TYPE
7182 * CSR DC8051_STS_REMOTE_FM_SECURITY
7183 * CSR DC8051_STS_REMOTE_PORT_NO
7186 read_vc_remote_phy(dd, &power_management, &continious);
7187 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7188 &partner_supported_crc);
7189 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7190 read_remote_device_id(dd, &device_id, &device_rev);
7192 * And the 'MgmtAllowed' information, which is exchanged during
7193 * LNI, is also be available at this point.
7195 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7196 /* print the active widths */
7197 get_link_widths(dd, &active_tx, &active_rx);
7199 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7200 (int)power_management, (int)continious);
7202 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7203 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7204 (int)partner_supported_crc);
7205 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7206 (u32)remote_tx_rate, (u32)link_widths);
7207 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7208 (u32)device_id, (u32)device_rev);
7210 * The peer vAU value just read is the peer receiver value. HFI does
7211 * not support a transmit vAU of 0 (AU == 8). We advertised that
7212 * with Z=1 in the fabric capabilities sent to the peer. The peer
7213 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7214 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7215 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7216 * subject to the Z value exception.
7220 set_up_vl15(dd, vau, vl15buf);
7222 /* set up the LCB CRC mode */
7223 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7225 /* order is important: use the lowest bit in common */
7226 if (crc_mask & CAP_CRC_14B)
7227 crc_val = LCB_CRC_14B;
7228 else if (crc_mask & CAP_CRC_48B)
7229 crc_val = LCB_CRC_48B;
7230 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7231 crc_val = LCB_CRC_12B_16B_PER_LANE;
7233 crc_val = LCB_CRC_16B;
7235 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7236 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7237 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7239 /* set (14b only) or clear sideband credit */
7240 reg = read_csr(dd, SEND_CM_CTRL);
7241 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7242 write_csr(dd, SEND_CM_CTRL,
7243 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7245 write_csr(dd, SEND_CM_CTRL,
7246 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7249 ppd->link_speed_active = 0; /* invalid value */
7250 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7251 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7252 switch (remote_tx_rate) {
7254 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7257 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7261 /* actual rate is highest bit of the ANDed rates */
7262 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7265 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7267 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7269 if (ppd->link_speed_active == 0) {
7270 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7271 __func__, (int)remote_tx_rate);
7272 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7276 * Cache the values of the supported, enabled, and active
7277 * LTP CRC modes to return in 'portinfo' queries. But the bit
7278 * flags that are returned in the portinfo query differ from
7279 * what's in the link_crc_mask, crc_sizes, and crc_val
7280 * variables. Convert these here.
7282 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7283 /* supported crc modes */
7284 ppd->port_ltp_crc_mode |=
7285 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7286 /* enabled crc modes */
7287 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7288 /* active crc mode */
7290 /* set up the remote credit return table */
7291 assign_remote_cm_au_table(dd, vcu);
7294 * The LCB is reset on entry to handle_verify_cap(), so this must
7295 * be applied on every link up.
7297 * Adjust LCB error kill enable to kill the link if
7298 * these RBUF errors are seen:
7299 * REPLAY_BUF_MBE_SMASK
7300 * FLIT_INPUT_BUF_MBE_SMASK
7302 if (is_ax(dd)) { /* fixed in B0 */
7303 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7304 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7305 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7306 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7309 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7310 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7312 /* give 8051 access to the LCB CSRs */
7313 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7314 set_8051_lcb_access(dd);
7316 ppd->neighbor_guid =
7317 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7318 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7319 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7320 ppd->neighbor_type =
7321 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7322 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7323 ppd->neighbor_fm_security =
7324 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7325 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7327 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7328 ppd->neighbor_guid, ppd->neighbor_type,
7329 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7330 if (ppd->mgmt_allowed)
7331 add_full_mgmt_pkey(ppd);
7333 /* tell the 8051 to go to LinkUp */
7334 set_link_state(ppd, HLS_GOING_UP);
7338 * Apply the link width downgrade enabled policy against the current active
7341 * Called when the enabled policy changes or the active link widths change.
7343 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7350 /* use the hls lock to avoid a race with actual link up */
7353 mutex_lock(&ppd->hls_lock);
7354 /* only apply if the link is up */
7355 if (!(ppd->host_link_state & HLS_UP)) {
7356 /* still going up..wait and retry */
7357 if (ppd->host_link_state & HLS_GOING_UP) {
7358 if (++tries < 1000) {
7359 mutex_unlock(&ppd->hls_lock);
7360 usleep_range(100, 120); /* arbitrary */
7364 "%s: giving up waiting for link state change\n",
7370 lwde = ppd->link_width_downgrade_enabled;
7372 if (refresh_widths) {
7373 get_link_widths(ppd->dd, &tx, &rx);
7374 ppd->link_width_downgrade_tx_active = tx;
7375 ppd->link_width_downgrade_rx_active = rx;
7379 /* downgrade is disabled */
7381 /* bounce if not at starting active width */
7382 if ((ppd->link_width_active !=
7383 ppd->link_width_downgrade_tx_active) ||
7384 (ppd->link_width_active !=
7385 ppd->link_width_downgrade_rx_active)) {
7387 "Link downgrade is disabled and link has downgraded, downing link\n");
7389 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7390 ppd->link_width_active,
7391 ppd->link_width_downgrade_tx_active,
7392 ppd->link_width_downgrade_rx_active);
7395 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7396 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7397 /* Tx or Rx is outside the enabled policy */
7399 "Link is outside of downgrade allowed, downing link\n");
7401 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7402 lwde, ppd->link_width_downgrade_tx_active,
7403 ppd->link_width_downgrade_rx_active);
7408 mutex_unlock(&ppd->hls_lock);
7411 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7412 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7413 set_link_state(ppd, HLS_DN_OFFLINE);
7420 * Handle a link downgrade interrupt from the 8051.
7422 * This is a work-queue function outside of the interrupt.
7424 void handle_link_downgrade(struct work_struct *work)
7426 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7427 link_downgrade_work);
7429 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7430 apply_link_downgrade_policy(ppd, 1);
7433 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7435 return flag_string(buf, buf_len, flags, dcc_err_flags,
7436 ARRAY_SIZE(dcc_err_flags));
7439 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7441 return flag_string(buf, buf_len, flags, lcb_err_flags,
7442 ARRAY_SIZE(lcb_err_flags));
7445 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7447 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7448 ARRAY_SIZE(dc8051_err_flags));
7451 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7453 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7454 ARRAY_SIZE(dc8051_info_err_flags));
7457 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7459 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7460 ARRAY_SIZE(dc8051_info_host_msg_flags));
7463 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7465 struct hfi1_pportdata *ppd = dd->pport;
7466 u64 info, err, host_msg;
7467 int queue_link_down = 0;
7470 /* look at the flags */
7471 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7472 /* 8051 information set by firmware */
7473 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7474 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7475 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7476 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7478 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7479 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7482 * Handle error flags.
7484 if (err & FAILED_LNI) {
7486 * LNI error indications are cleared by the 8051
7487 * only when starting polling. Only pay attention
7488 * to them when in the states that occur during
7491 if (ppd->host_link_state
7492 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7493 queue_link_down = 1;
7494 dd_dev_info(dd, "Link error: %s\n",
7495 dc8051_info_err_string(buf,
7500 err &= ~(u64)FAILED_LNI;
7502 /* unknown frames can happen durning LNI, just count */
7503 if (err & UNKNOWN_FRAME) {
7504 ppd->unknown_frame_count++;
7505 err &= ~(u64)UNKNOWN_FRAME;
7508 /* report remaining errors, but do not do anything */
7509 dd_dev_err(dd, "8051 info error: %s\n",
7510 dc8051_info_err_string(buf, sizeof(buf),
7515 * Handle host message flags.
7517 if (host_msg & HOST_REQ_DONE) {
7519 * Presently, the driver does a busy wait for
7520 * host requests to complete. This is only an
7521 * informational message.
7522 * NOTE: The 8051 clears the host message
7523 * information *on the next 8051 command*.
7524 * Therefore, when linkup is achieved,
7525 * this flag will still be set.
7527 host_msg &= ~(u64)HOST_REQ_DONE;
7529 if (host_msg & BC_SMA_MSG) {
7530 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7531 host_msg &= ~(u64)BC_SMA_MSG;
7533 if (host_msg & LINKUP_ACHIEVED) {
7534 dd_dev_info(dd, "8051: Link up\n");
7535 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7536 host_msg &= ~(u64)LINKUP_ACHIEVED;
7538 if (host_msg & EXT_DEVICE_CFG_REQ) {
7539 queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
7540 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7542 if (host_msg & VERIFY_CAP_FRAME) {
7543 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7544 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7546 if (host_msg & LINK_GOING_DOWN) {
7547 const char *extra = "";
7548 /* no downgrade action needed if going down */
7549 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7550 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7551 extra = " (ignoring downgrade)";
7553 dd_dev_info(dd, "8051: Link down%s\n", extra);
7554 queue_link_down = 1;
7555 host_msg &= ~(u64)LINK_GOING_DOWN;
7557 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7558 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7559 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7562 /* report remaining messages, but do not do anything */
7563 dd_dev_info(dd, "8051 info host message: %s\n",
7564 dc8051_info_host_msg_string(buf,
7569 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7571 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7573 * Lost the 8051 heartbeat. If this happens, we
7574 * receive constant interrupts about it. Disable
7575 * the interrupt after the first.
7577 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7578 write_csr(dd, DC_DC8051_ERR_EN,
7579 read_csr(dd, DC_DC8051_ERR_EN) &
7580 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7582 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7585 /* report the error, but do not do anything */
7586 dd_dev_err(dd, "8051 error: %s\n",
7587 dc8051_err_string(buf, sizeof(buf), reg));
7590 if (queue_link_down) {
7592 * if the link is already going down or disabled, do not
7595 if ((ppd->host_link_state &
7596 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7597 ppd->link_enabled == 0) {
7598 dd_dev_info(dd, "%s: not queuing link down\n",
7601 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7606 static const char * const fm_config_txt[] = {
7608 "BadHeadDist: Distance violation between two head flits",
7610 "BadTailDist: Distance violation between two tail flits",
7612 "BadCtrlDist: Distance violation between two credit control flits",
7614 "BadCrdAck: Credits return for unsupported VL",
7616 "UnsupportedVLMarker: Received VL Marker",
7618 "BadPreempt: Exceeded the preemption nesting level",
7620 "BadControlFlit: Received unsupported control flit",
7623 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7626 static const char * const port_rcv_txt[] = {
7628 "BadPktLen: Illegal PktLen",
7630 "PktLenTooLong: Packet longer than PktLen",
7632 "PktLenTooShort: Packet shorter than PktLen",
7634 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7636 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7638 "BadL2: Illegal L2 opcode",
7640 "BadSC: Unsupported SC",
7642 "BadRC: Illegal RC",
7644 "PreemptError: Preempting with same VL",
7646 "PreemptVL15: Preempting a VL15 packet",
7649 #define OPA_LDR_FMCONFIG_OFFSET 16
7650 #define OPA_LDR_PORTRCV_OFFSET 0
7651 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7653 u64 info, hdr0, hdr1;
7656 struct hfi1_pportdata *ppd = dd->pport;
7660 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7661 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7662 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7663 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7664 /* set status bit */
7665 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7667 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7670 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7671 struct hfi1_pportdata *ppd = dd->pport;
7672 /* this counter saturates at (2^32) - 1 */
7673 if (ppd->link_downed < (u32)UINT_MAX)
7675 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7678 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7679 u8 reason_valid = 1;
7681 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7682 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7683 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7684 /* set status bit */
7685 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7695 extra = fm_config_txt[info];
7698 extra = fm_config_txt[info];
7699 if (ppd->port_error_action &
7700 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7703 * lcl_reason cannot be derived from info
7707 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7712 snprintf(buf, sizeof(buf), "reserved%lld", info);
7717 if (reason_valid && !do_bounce) {
7718 do_bounce = ppd->port_error_action &
7719 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7720 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7723 /* just report this */
7724 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7725 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7728 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7729 u8 reason_valid = 1;
7731 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7732 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7733 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7734 if (!(dd->err_info_rcvport.status_and_code &
7735 OPA_EI_STATUS_SMASK)) {
7736 dd->err_info_rcvport.status_and_code =
7737 info & OPA_EI_CODE_SMASK;
7738 /* set status bit */
7739 dd->err_info_rcvport.status_and_code |=
7740 OPA_EI_STATUS_SMASK;
7742 * save first 2 flits in the packet that caused
7745 dd->err_info_rcvport.packet_flit1 = hdr0;
7746 dd->err_info_rcvport.packet_flit2 = hdr1;
7759 extra = port_rcv_txt[info];
7763 snprintf(buf, sizeof(buf), "reserved%lld", info);
7768 if (reason_valid && !do_bounce) {
7769 do_bounce = ppd->port_error_action &
7770 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7771 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7774 /* just report this */
7775 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7776 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7779 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7782 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7783 /* informative only */
7784 dd_dev_info(dd, "8051 access to LCB blocked\n");
7785 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7787 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7788 /* informative only */
7789 dd_dev_info(dd, "host access to LCB blocked\n");
7790 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7793 /* report any remaining errors */
7795 dd_dev_info(dd, "DCC Error: %s\n",
7796 dcc_err_string(buf, sizeof(buf), reg));
7798 if (lcl_reason == 0)
7799 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7802 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7803 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7804 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7808 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7812 dd_dev_info(dd, "LCB Error: %s\n",
7813 lcb_err_string(buf, sizeof(buf), reg));
7817 * CCE block DC interrupt. Source is < 8.
7819 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7821 const struct err_reg_info *eri = &dc_errs[source];
7824 interrupt_clear_down(dd, 0, eri);
7825 } else if (source == 3 /* dc_lbm_int */) {
7827 * This indicates that a parity error has occurred on the
7828 * address/control lines presented to the LBM. The error
7829 * is a single pulse, there is no associated error flag,
7830 * and it is non-maskable. This is because if a parity
7831 * error occurs on the request the request is dropped.
7832 * This should never occur, but it is nice to know if it
7835 dd_dev_err(dd, "Parity error in DC LBM block\n");
7837 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7842 * TX block send credit interrupt. Source is < 160.
7844 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7846 sc_group_release_update(dd, source);
7850 * TX block SDMA interrupt. Source is < 48.
7852 * SDMA interrupts are grouped by type:
7855 * N - 2N-1 = SDmaProgress
7856 * 2N - 3N-1 = SDmaIdle
7858 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7860 /* what interrupt */
7861 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7863 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7865 #ifdef CONFIG_SDMA_VERBOSITY
7866 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7867 slashstrip(__FILE__), __LINE__, __func__);
7868 sdma_dumpstate(&dd->per_sdma[which]);
7871 if (likely(what < 3 && which < dd->num_sdma)) {
7872 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7874 /* should not happen */
7875 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7880 * RX block receive available interrupt. Source is < 160.
7882 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7884 struct hfi1_ctxtdata *rcd;
7887 if (likely(source < dd->num_rcv_contexts)) {
7888 rcd = dd->rcd[source];
7890 if (source < dd->first_user_ctxt)
7891 rcd->do_interrupt(rcd, 0);
7893 handle_user_interrupt(rcd);
7896 /* received an interrupt, but no rcd */
7897 err_detail = "dataless";
7899 /* received an interrupt, but are not using that context */
7900 err_detail = "out of range";
7902 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7903 err_detail, source);
7907 * RX block receive urgent interrupt. Source is < 160.
7909 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7911 struct hfi1_ctxtdata *rcd;
7914 if (likely(source < dd->num_rcv_contexts)) {
7915 rcd = dd->rcd[source];
7917 /* only pay attention to user urgent interrupts */
7918 if (source >= dd->first_user_ctxt)
7919 handle_user_interrupt(rcd);
7922 /* received an interrupt, but no rcd */
7923 err_detail = "dataless";
7925 /* received an interrupt, but are not using that context */
7926 err_detail = "out of range";
7928 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7929 err_detail, source);
7933 * Reserved range interrupt. Should not be called in normal operation.
7935 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7939 dd_dev_err(dd, "unexpected %s interrupt\n",
7940 is_reserved_name(name, sizeof(name), source));
7943 static const struct is_table is_table[] = {
7946 * name func interrupt func
7948 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7949 is_misc_err_name, is_misc_err_int },
7950 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7951 is_sdma_eng_err_name, is_sdma_eng_err_int },
7952 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7953 is_sendctxt_err_name, is_sendctxt_err_int },
7954 { IS_SDMA_START, IS_SDMA_END,
7955 is_sdma_eng_name, is_sdma_eng_int },
7956 { IS_VARIOUS_START, IS_VARIOUS_END,
7957 is_various_name, is_various_int },
7958 { IS_DC_START, IS_DC_END,
7959 is_dc_name, is_dc_int },
7960 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7961 is_rcv_avail_name, is_rcv_avail_int },
7962 { IS_RCVURGENT_START, IS_RCVURGENT_END,
7963 is_rcv_urgent_name, is_rcv_urgent_int },
7964 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7965 is_send_credit_name, is_send_credit_int},
7966 { IS_RESERVED_START, IS_RESERVED_END,
7967 is_reserved_name, is_reserved_int},
7971 * Interrupt source interrupt - called when the given source has an interrupt.
7972 * Source is a bit index into an array of 64-bit integers.
7974 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7976 const struct is_table *entry;
7978 /* avoids a double compare by walking the table in-order */
7979 for (entry = &is_table[0]; entry->is_name; entry++) {
7980 if (source < entry->end) {
7981 trace_hfi1_interrupt(dd, entry, source);
7982 entry->is_int(dd, source - entry->start);
7986 /* fell off the end */
7987 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7991 * General interrupt handler. This is able to correctly handle
7992 * all interrupts in case INTx is used.
7994 static irqreturn_t general_interrupt(int irq, void *data)
7996 struct hfi1_devdata *dd = data;
7997 u64 regs[CCE_NUM_INT_CSRS];
8001 this_cpu_inc(*dd->int_counter);
8003 /* phase 1: scan and clear all handled interrupts */
8004 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8005 if (dd->gi_mask[i] == 0) {
8006 regs[i] = 0; /* used later */
8009 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8011 /* only clear if anything is set */
8013 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8016 /* phase 2: call the appropriate handler */
8017 for_each_set_bit(bit, (unsigned long *)®s[0],
8018 CCE_NUM_INT_CSRS * 64) {
8019 is_interrupt(dd, bit);
8025 static irqreturn_t sdma_interrupt(int irq, void *data)
8027 struct sdma_engine *sde = data;
8028 struct hfi1_devdata *dd = sde->dd;
8031 #ifdef CONFIG_SDMA_VERBOSITY
8032 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8033 slashstrip(__FILE__), __LINE__, __func__);
8034 sdma_dumpstate(sde);
8037 this_cpu_inc(*dd->int_counter);
8039 /* This read_csr is really bad in the hot path */
8040 status = read_csr(dd,
8041 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8043 if (likely(status)) {
8044 /* clear the interrupt(s) */
8046 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8049 /* handle the interrupt(s) */
8050 sdma_engine_interrupt(sde, status);
8052 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8059 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8060 * to insure that the write completed. This does NOT guarantee that
8061 * queued DMA writes to memory from the chip are pushed.
8063 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8065 struct hfi1_devdata *dd = rcd->dd;
8066 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8068 mmiowb(); /* make sure everything before is written */
8069 write_csr(dd, addr, rcd->imask);
8070 /* force the above write on the chip and get a value back */
8071 (void)read_csr(dd, addr);
8074 /* force the receive interrupt */
8075 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8077 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8081 * Return non-zero if a packet is present.
8083 * This routine is called when rechecking for packets after the RcvAvail
8084 * interrupt has been cleared down. First, do a quick check of memory for
8085 * a packet present. If not found, use an expensive CSR read of the context
8086 * tail to determine the actual tail. The CSR read is necessary because there
8087 * is no method to push pending DMAs to memory other than an interrupt and we
8088 * are trying to determine if we need to force an interrupt.
8090 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8095 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8096 present = (rcd->seq_cnt ==
8097 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8098 else /* is RDMA rtail */
8099 present = (rcd->head != get_rcvhdrtail(rcd));
8104 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8105 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8106 return rcd->head != tail;
8110 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8111 * This routine will try to handle packets immediately (latency), but if
8112 * it finds too many, it will invoke the thread handler (bandwitdh). The
8113 * chip receive interrupt is *not* cleared down until this or the thread (if
8114 * invoked) is finished. The intent is to avoid extra interrupts while we
8115 * are processing packets anyway.
8117 static irqreturn_t receive_context_interrupt(int irq, void *data)
8119 struct hfi1_ctxtdata *rcd = data;
8120 struct hfi1_devdata *dd = rcd->dd;
8124 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8125 this_cpu_inc(*dd->int_counter);
8126 aspm_ctx_disable(rcd);
8128 /* receive interrupt remains blocked while processing packets */
8129 disposition = rcd->do_interrupt(rcd, 0);
8132 * Too many packets were seen while processing packets in this
8133 * IRQ handler. Invoke the handler thread. The receive interrupt
8136 if (disposition == RCV_PKT_LIMIT)
8137 return IRQ_WAKE_THREAD;
8140 * The packet processor detected no more packets. Clear the receive
8141 * interrupt and recheck for a packet packet that may have arrived
8142 * after the previous check and interrupt clear. If a packet arrived,
8143 * force another interrupt.
8145 clear_recv_intr(rcd);
8146 present = check_packet_present(rcd);
8148 force_recv_intr(rcd);
8154 * Receive packet thread handler. This expects to be invoked with the
8155 * receive interrupt still blocked.
8157 static irqreturn_t receive_context_thread(int irq, void *data)
8159 struct hfi1_ctxtdata *rcd = data;
8162 /* receive interrupt is still blocked from the IRQ handler */
8163 (void)rcd->do_interrupt(rcd, 1);
8166 * The packet processor will only return if it detected no more
8167 * packets. Hold IRQs here so we can safely clear the interrupt and
8168 * recheck for a packet that may have arrived after the previous
8169 * check and the interrupt clear. If a packet arrived, force another
8172 local_irq_disable();
8173 clear_recv_intr(rcd);
8174 present = check_packet_present(rcd);
8176 force_recv_intr(rcd);
8182 /* ========================================================================= */
8184 u32 read_physical_state(struct hfi1_devdata *dd)
8188 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8189 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8190 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8193 u32 read_logical_state(struct hfi1_devdata *dd)
8197 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8198 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8199 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8202 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8206 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8207 /* clear current state, set new state */
8208 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8209 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8210 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8214 * Use the 8051 to read a LCB CSR.
8216 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8221 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8222 if (acquire_lcb_access(dd, 0) == 0) {
8223 *data = read_csr(dd, addr);
8224 release_lcb_access(dd, 0);
8230 /* register is an index of LCB registers: (offset - base) / 8 */
8231 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8232 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8233 if (ret != HCMD_SUCCESS)
8239 * Read an LCB CSR. Access may not be in host control, so check.
8240 * Return 0 on success, -EBUSY on failure.
8242 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8244 struct hfi1_pportdata *ppd = dd->pport;
8246 /* if up, go through the 8051 for the value */
8247 if (ppd->host_link_state & HLS_UP)
8248 return read_lcb_via_8051(dd, addr, data);
8249 /* if going up or down, no access */
8250 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8252 /* otherwise, host has access */
8253 *data = read_csr(dd, addr);
8258 * Use the 8051 to write a LCB CSR.
8260 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8265 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8266 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8267 if (acquire_lcb_access(dd, 0) == 0) {
8268 write_csr(dd, addr, data);
8269 release_lcb_access(dd, 0);
8275 /* register is an index of LCB registers: (offset - base) / 8 */
8276 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8277 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8278 if (ret != HCMD_SUCCESS)
8284 * Write an LCB CSR. Access may not be in host control, so check.
8285 * Return 0 on success, -EBUSY on failure.
8287 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8289 struct hfi1_pportdata *ppd = dd->pport;
8291 /* if up, go through the 8051 for the value */
8292 if (ppd->host_link_state & HLS_UP)
8293 return write_lcb_via_8051(dd, addr, data);
8294 /* if going up or down, no access */
8295 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8297 /* otherwise, host has access */
8298 write_csr(dd, addr, data);
8304 * < 0 = Linux error, not able to get access
8305 * > 0 = 8051 command RETURN_CODE
8307 static int do_8051_command(
8308 struct hfi1_devdata *dd,
8315 unsigned long flags;
8316 unsigned long timeout;
8318 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8321 * Alternative to holding the lock for a long time:
8322 * - keep busy wait - have other users bounce off
8324 spin_lock_irqsave(&dd->dc8051_lock, flags);
8326 /* We can't send any commands to the 8051 if it's in reset */
8327 if (dd->dc_shutdown) {
8328 return_code = -ENODEV;
8333 * If an 8051 host command timed out previously, then the 8051 is
8336 * On first timeout, attempt to reset and restart the entire DC
8337 * block (including 8051). (Is this too big of a hammer?)
8339 * If the 8051 times out a second time, the reset did not bring it
8340 * back to healthy life. In that case, fail any subsequent commands.
8342 if (dd->dc8051_timed_out) {
8343 if (dd->dc8051_timed_out > 1) {
8345 "Previous 8051 host command timed out, skipping command %u\n",
8347 return_code = -ENXIO;
8350 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8353 spin_lock_irqsave(&dd->dc8051_lock, flags);
8357 * If there is no timeout, then the 8051 command interface is
8358 * waiting for a command.
8362 * When writing a LCB CSR, out_data contains the full value to
8363 * to be written, while in_data contains the relative LCB
8364 * address in 7:0. Do the work here, rather than the caller,
8365 * of distrubting the write data to where it needs to go:
8368 * 39:00 -> in_data[47:8]
8369 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8370 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8372 if (type == HCMD_WRITE_LCB_CSR) {
8373 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8374 reg = ((((*out_data) >> 40) & 0xff) <<
8375 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8376 | ((((*out_data) >> 48) & 0xffff) <<
8377 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8378 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8382 * Do two writes: the first to stabilize the type and req_data, the
8383 * second to activate.
8385 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8386 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8387 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8388 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8389 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8390 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8391 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8393 /* wait for completion, alternate: interrupt */
8394 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8396 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8397 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8400 if (time_after(jiffies, timeout)) {
8401 dd->dc8051_timed_out++;
8402 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8405 return_code = -ETIMEDOUT;
8412 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8413 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8414 if (type == HCMD_READ_LCB_CSR) {
8415 /* top 16 bits are in a different register */
8416 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8417 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8419 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8422 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8423 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8424 dd->dc8051_timed_out = 0;
8426 * Clear command for next user.
8428 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8431 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8436 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8438 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8441 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8442 u8 lane_id, u32 config_data)
8447 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8448 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8449 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8450 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8451 if (ret != HCMD_SUCCESS) {
8453 "load 8051 config: field id %d, lane %d, err %d\n",
8454 (int)field_id, (int)lane_id, ret);
8460 * Read the 8051 firmware "registers". Use the RAM directly. Always
8461 * set the result, even on error.
8462 * Return 0 on success, -errno on failure
8464 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8471 /* address start depends on the lane_id */
8473 addr = (4 * NUM_GENERAL_FIELDS)
8474 + (lane_id * 4 * NUM_LANE_FIELDS);
8477 addr += field_id * 4;
8479 /* read is in 8-byte chunks, hardware will truncate the address down */
8480 ret = read_8051_data(dd, addr, 8, &big_data);
8483 /* extract the 4 bytes we want */
8485 *result = (u32)(big_data >> 32);
8487 *result = (u32)big_data;
8490 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8491 __func__, lane_id, field_id);
8497 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8502 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8503 | power_management << POWER_MANAGEMENT_SHIFT;
8504 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8505 GENERAL_CONFIG, frame);
8508 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8509 u16 vl15buf, u8 crc_sizes)
8513 frame = (u32)vau << VAU_SHIFT
8515 | (u32)vcu << VCU_SHIFT
8516 | (u32)vl15buf << VL15BUF_SHIFT
8517 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8518 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8519 GENERAL_CONFIG, frame);
8522 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8523 u8 *flag_bits, u16 *link_widths)
8527 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8529 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8530 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8531 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8534 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8541 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8542 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8543 | (u32)link_widths << LINK_WIDTH_SHIFT;
8544 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8548 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8553 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8554 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8555 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8558 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8563 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8564 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8565 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8566 & REMOTE_DEVICE_REV_MASK;
8569 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8573 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8574 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8575 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8578 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8583 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8584 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8585 & POWER_MANAGEMENT_MASK;
8586 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8587 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8590 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8591 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8595 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8596 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8597 *z = (frame >> Z_SHIFT) & Z_MASK;
8598 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8599 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8600 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8603 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8609 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8611 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8612 & REMOTE_TX_RATE_MASK;
8613 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8616 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8620 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8621 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8624 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8628 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8629 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8632 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8634 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8637 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8639 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8642 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8648 if (dd->pport->host_link_state & HLS_UP) {
8649 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8652 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8653 & LINK_QUALITY_MASK;
8657 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8661 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8662 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8665 static int read_tx_settings(struct hfi1_devdata *dd,
8667 u8 *tx_polarity_inversion,
8668 u8 *rx_polarity_inversion,
8674 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8675 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8676 & ENABLE_LANE_TX_MASK;
8677 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8678 & TX_POLARITY_INVERSION_MASK;
8679 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8680 & RX_POLARITY_INVERSION_MASK;
8681 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8685 static int write_tx_settings(struct hfi1_devdata *dd,
8687 u8 tx_polarity_inversion,
8688 u8 rx_polarity_inversion,
8693 /* no need to mask, all variable sizes match field widths */
8694 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8695 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8696 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8697 | max_rate << MAX_RATE_SHIFT;
8698 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8701 static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8703 u32 frame, version, prod_id;
8707 for (lane = 0; lane < 4; lane++) {
8708 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8711 "Unable to read lane %d firmware details\n",
8715 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8716 & SPICO_ROM_VERSION_MASK;
8717 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8718 & SPICO_ROM_PROD_ID_MASK;
8720 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8721 lane, version, prod_id);
8726 * Read an idle LCB message.
8728 * Returns 0 on success, -EINVAL on error
8730 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8734 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8735 if (ret != HCMD_SUCCESS) {
8736 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8740 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8741 /* return only the payload as we already know the type */
8742 *data_out >>= IDLE_PAYLOAD_SHIFT;
8747 * Read an idle SMA message. To be done in response to a notification from
8750 * Returns 0 on success, -EINVAL on error
8752 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8754 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8759 * Send an idle LCB message.
8761 * Returns 0 on success, -EINVAL on error
8763 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8767 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8768 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8769 if (ret != HCMD_SUCCESS) {
8770 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8778 * Send an idle SMA message.
8780 * Returns 0 on success, -EINVAL on error
8782 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8786 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8787 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8788 return send_idle_message(dd, data);
8792 * Initialize the LCB then do a quick link up. This may or may not be
8795 * return 0 on success, -errno on error
8797 static int do_quick_linkup(struct hfi1_devdata *dd)
8800 unsigned long timeout;
8803 lcb_shutdown(dd, 0);
8806 /* LCB_CFG_LOOPBACK.VAL = 2 */
8807 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8808 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8809 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8810 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8813 /* start the LCBs */
8814 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8815 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8817 /* simulator only loopback steps */
8818 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8819 /* LCB_CFG_RUN.EN = 1 */
8820 write_csr(dd, DC_LCB_CFG_RUN,
8821 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8823 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8824 timeout = jiffies + msecs_to_jiffies(10);
8826 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8829 if (time_after(jiffies, timeout)) {
8831 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8837 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8838 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8843 * When doing quick linkup and not in loopback, both
8844 * sides must be done with LCB set-up before either
8845 * starts the quick linkup. Put a delay here so that
8846 * both sides can be started and have a chance to be
8847 * done with LCB set up before resuming.
8850 "Pausing for peer to be finished with LCB set up\n");
8852 dd_dev_err(dd, "Continuing with quick linkup\n");
8855 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8856 set_8051_lcb_access(dd);
8859 * State "quick" LinkUp request sets the physical link state to
8860 * LinkUp without a verify capability sequence.
8861 * This state is in simulator v37 and later.
8863 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8864 if (ret != HCMD_SUCCESS) {
8866 "%s: set physical link state to quick LinkUp failed with return %d\n",
8869 set_host_lcb_access(dd);
8870 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8877 return 0; /* success */
8881 * Set the SerDes to internal loopback mode.
8882 * Returns 0 on success, -errno on error.
8884 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8888 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8889 if (ret == HCMD_SUCCESS)
8892 "Set physical link state to SerDes Loopback failed with return %d\n",
8900 * Do all special steps to set up loopback.
8902 static int init_loopback(struct hfi1_devdata *dd)
8904 dd_dev_info(dd, "Entering loopback mode\n");
8906 /* all loopbacks should disable self GUID check */
8907 write_csr(dd, DC_DC8051_CFG_MODE,
8908 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8911 * The simulator has only one loopback option - LCB. Switch
8912 * to that option, which includes quick link up.
8914 * Accept all valid loopback values.
8916 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
8917 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
8918 loopback == LOOPBACK_CABLE)) {
8919 loopback = LOOPBACK_LCB;
8924 /* handle serdes loopback */
8925 if (loopback == LOOPBACK_SERDES) {
8926 /* internal serdes loopack needs quick linkup on RTL */
8927 if (dd->icode == ICODE_RTL_SILICON)
8929 return set_serdes_loopback_mode(dd);
8932 /* LCB loopback - handled at poll time */
8933 if (loopback == LOOPBACK_LCB) {
8934 quick_linkup = 1; /* LCB is always quick linkup */
8936 /* not supported in emulation due to emulation RTL changes */
8937 if (dd->icode == ICODE_FPGA_EMULATION) {
8939 "LCB loopback not supported in emulation\n");
8945 /* external cable loopback requires no extra steps */
8946 if (loopback == LOOPBACK_CABLE)
8949 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8954 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8955 * used in the Verify Capability link width attribute.
8957 static u16 opa_to_vc_link_widths(u16 opa_widths)
8962 static const struct link_bits {
8965 } opa_link_xlate[] = {
8966 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
8967 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
8968 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
8969 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
8972 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8973 if (opa_widths & opa_link_xlate[i].from)
8974 result |= opa_link_xlate[i].to;
8980 * Set link attributes before moving to polling.
8982 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8984 struct hfi1_devdata *dd = ppd->dd;
8986 u8 tx_polarity_inversion;
8987 u8 rx_polarity_inversion;
8990 /* reset our fabric serdes to clear any lingering problems */
8991 fabric_serdes_reset(dd);
8993 /* set the local tx rate - need to read-modify-write */
8994 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8995 &rx_polarity_inversion, &ppd->local_tx_rate);
8997 goto set_local_link_attributes_fail;
8999 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9000 /* set the tx rate to the fastest enabled */
9001 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9002 ppd->local_tx_rate = 1;
9004 ppd->local_tx_rate = 0;
9006 /* set the tx rate to all enabled */
9007 ppd->local_tx_rate = 0;
9008 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9009 ppd->local_tx_rate |= 2;
9010 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9011 ppd->local_tx_rate |= 1;
9014 enable_lane_tx = 0xF; /* enable all four lanes */
9015 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9016 rx_polarity_inversion, ppd->local_tx_rate);
9017 if (ret != HCMD_SUCCESS)
9018 goto set_local_link_attributes_fail;
9021 * DC supports continuous updates.
9023 ret = write_vc_local_phy(dd,
9024 0 /* no power management */,
9025 1 /* continuous updates */);
9026 if (ret != HCMD_SUCCESS)
9027 goto set_local_link_attributes_fail;
9029 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9030 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9031 ppd->port_crc_mode_enabled);
9032 if (ret != HCMD_SUCCESS)
9033 goto set_local_link_attributes_fail;
9035 ret = write_vc_local_link_width(dd, 0, 0,
9036 opa_to_vc_link_widths(
9037 ppd->link_width_enabled));
9038 if (ret != HCMD_SUCCESS)
9039 goto set_local_link_attributes_fail;
9041 /* let peer know who we are */
9042 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9043 if (ret == HCMD_SUCCESS)
9046 set_local_link_attributes_fail:
9048 "Failed to set local link attributes, return 0x%x\n",
9054 * Call this to start the link. Schedule a retry if the cable is not
9055 * present or if unable to start polling. Do not do anything if the
9056 * link is disabled. Returns 0 if link is disabled or moved to polling
9058 int start_link(struct hfi1_pportdata *ppd)
9060 if (!ppd->link_enabled) {
9061 dd_dev_info(ppd->dd,
9062 "%s: stopping link start because link is disabled\n",
9066 if (!ppd->driver_link_ready) {
9067 dd_dev_info(ppd->dd,
9068 "%s: stopping link start because driver is not ready\n",
9073 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
9074 loopback == LOOPBACK_LCB ||
9075 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9076 return set_link_state(ppd, HLS_DN_POLL);
9078 dd_dev_info(ppd->dd,
9079 "%s: stopping link start because no cable is present\n",
9084 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9086 struct hfi1_devdata *dd = ppd->dd;
9088 unsigned long timeout;
9091 * Check for QSFP interrupt for t_init (SFF 8679)
9093 timeout = jiffies + msecs_to_jiffies(2000);
9095 mask = read_csr(dd, dd->hfi1_id ?
9096 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9097 if (!(mask & QSFP_HFI0_INT_N)) {
9098 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9099 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9102 if (time_after(jiffies, timeout)) {
9103 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9111 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9113 struct hfi1_devdata *dd = ppd->dd;
9116 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9118 mask |= (u64)QSFP_HFI0_INT_N;
9120 mask &= ~(u64)QSFP_HFI0_INT_N;
9121 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9124 void reset_qsfp(struct hfi1_pportdata *ppd)
9126 struct hfi1_devdata *dd = ppd->dd;
9127 u64 mask, qsfp_mask;
9129 /* Disable INT_N from triggering QSFP interrupts */
9130 set_qsfp_int_n(ppd, 0);
9132 /* Reset the QSFP */
9133 mask = (u64)QSFP_HFI0_RESET_N;
9134 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
9136 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
9138 qsfp_mask = read_csr(dd,
9139 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9142 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9148 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9150 wait_for_qsfp_init(ppd);
9153 * Allow INT_N to trigger the QSFP interrupt to watch
9154 * for alarms and warnings
9156 set_qsfp_int_n(ppd, 1);
9159 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9160 u8 *qsfp_interrupt_status)
9162 struct hfi1_devdata *dd = ppd->dd;
9164 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9165 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9166 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9169 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9170 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9171 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9174 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9175 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9176 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9179 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9180 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9181 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9184 /* Byte 2 is vendor specific */
9186 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9187 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9188 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9191 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9192 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9193 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9196 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9197 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9198 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9201 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9202 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9203 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9206 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9207 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9208 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9211 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9212 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9213 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9216 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9217 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9218 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9221 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9222 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9223 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9226 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9227 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9228 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9231 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9232 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9233 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9236 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9237 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9238 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9241 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9242 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9243 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9246 /* Bytes 9-10 and 11-12 are reserved */
9247 /* Bytes 13-15 are vendor specific */
9252 /* This routine will only be scheduled if the QSFP module is present */
9253 void qsfp_event(struct work_struct *work)
9255 struct qsfp_data *qd;
9256 struct hfi1_pportdata *ppd;
9257 struct hfi1_devdata *dd;
9259 qd = container_of(work, struct qsfp_data, qsfp_work);
9264 if (!qsfp_mod_present(ppd))
9268 * Turn DC back on after cables has been
9269 * re-inserted. Up until now, the DC has been in
9270 * reset to save power.
9274 if (qd->cache_refresh_required) {
9275 set_qsfp_int_n(ppd, 0);
9277 wait_for_qsfp_init(ppd);
9280 * Allow INT_N to trigger the QSFP interrupt to watch
9281 * for alarms and warnings
9283 set_qsfp_int_n(ppd, 1);
9290 if (qd->check_interrupt_flags) {
9291 u8 qsfp_interrupt_status[16] = {0,};
9293 if (qsfp_read(ppd, dd->hfi1_id, 6,
9294 &qsfp_interrupt_status[0], 16) != 16) {
9296 "%s: Failed to read status of QSFP module\n",
9299 unsigned long flags;
9301 handle_qsfp_error_conditions(
9302 ppd, qsfp_interrupt_status);
9303 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9304 ppd->qsfp_info.check_interrupt_flags = 0;
9305 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9311 static void init_qsfp_int(struct hfi1_devdata *dd)
9313 struct hfi1_pportdata *ppd = dd->pport;
9314 u64 qsfp_mask, cce_int_mask;
9315 const int qsfp1_int_smask = QSFP1_INT % 64;
9316 const int qsfp2_int_smask = QSFP2_INT % 64;
9319 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9320 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9321 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9322 * the index of the appropriate CSR in the CCEIntMask CSR array
9324 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9325 (8 * (QSFP1_INT / 64)));
9327 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9328 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9331 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9332 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9336 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9337 /* Clear current status to avoid spurious interrupts */
9338 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9340 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9343 set_qsfp_int_n(ppd, 0);
9345 /* Handle active low nature of INT_N and MODPRST_N pins */
9346 if (qsfp_mod_present(ppd))
9347 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9349 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9354 * Do a one-time initialize of the LCB block.
9356 static void init_lcb(struct hfi1_devdata *dd)
9358 /* simulator does not correctly handle LCB cclk loopback, skip */
9359 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9362 /* the DC has been reset earlier in the driver load */
9364 /* set LCB for cclk loopback on the port */
9365 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9366 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9367 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9368 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9369 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9370 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9371 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9374 int bringup_serdes(struct hfi1_pportdata *ppd)
9376 struct hfi1_devdata *dd = ppd->dd;
9380 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9381 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9386 guid = dd->base_guid + ppd->port - 1;
9390 /* Set linkinit_reason on power up per OPA spec */
9391 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9393 /* one-time init of the LCB */
9397 ret = init_loopback(dd);
9402 /* tune the SERDES to a ballpark setting for
9403 * optimal signal and bit error rate
9404 * Needs to be done before starting the link
9408 return start_link(ppd);
9411 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9413 struct hfi1_devdata *dd = ppd->dd;
9416 * Shut down the link and keep it down. First turn off that the
9417 * driver wants to allow the link to be up (driver_link_ready).
9418 * Then make sure the link is not automatically restarted
9419 * (link_enabled). Cancel any pending restart. And finally
9422 ppd->driver_link_ready = 0;
9423 ppd->link_enabled = 0;
9425 ppd->offline_disabled_reason =
9426 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9427 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9428 OPA_LINKDOWN_REASON_SMA_DISABLED);
9429 set_link_state(ppd, HLS_DN_OFFLINE);
9431 /* disable the port */
9432 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9435 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9437 struct hfi1_pportdata *ppd;
9440 ppd = (struct hfi1_pportdata *)(dd + 1);
9441 for (i = 0; i < dd->num_pports; i++, ppd++) {
9442 ppd->ibport_data.rvp.rc_acks = NULL;
9443 ppd->ibport_data.rvp.rc_qacks = NULL;
9444 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9445 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9446 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9447 if (!ppd->ibport_data.rvp.rc_acks ||
9448 !ppd->ibport_data.rvp.rc_delayed_comp ||
9449 !ppd->ibport_data.rvp.rc_qacks)
9456 static const char * const pt_names[] = {
9462 static const char *pt_name(u32 type)
9464 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9468 * index is the index into the receive array
9470 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9471 u32 type, unsigned long pa, u16 order)
9474 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9475 (dd->kregbase + RCV_ARRAY));
9477 if (!(dd->flags & HFI1_PRESENT))
9480 if (type == PT_INVALID) {
9482 } else if (type > PT_INVALID) {
9484 "unexpected receive array type %u for index %u, not handled\n",
9489 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9490 pt_name(type), index, pa, (unsigned long)order);
9492 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9493 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9494 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9495 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9496 << RCV_ARRAY_RT_ADDR_SHIFT;
9497 writeq(reg, base + (index * 8));
9499 if (type == PT_EAGER)
9501 * Eager entries are written one-by-one so we have to push them
9502 * after we write the entry.
9509 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9511 struct hfi1_devdata *dd = rcd->dd;
9514 /* this could be optimized */
9515 for (i = rcd->eager_base; i < rcd->eager_base +
9516 rcd->egrbufs.alloced; i++)
9517 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9519 for (i = rcd->expected_base;
9520 i < rcd->expected_base + rcd->expected_count; i++)
9521 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9524 int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9525 struct hfi1_ctxt_info *kinfo)
9527 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9528 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9532 struct hfi1_message_header *hfi1_get_msgheader(
9533 struct hfi1_devdata *dd, __le32 *rhf_addr)
9535 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9537 return (struct hfi1_message_header *)
9538 (rhf_addr - dd->rhf_offset + offset);
9541 static const char * const ib_cfg_name_strings[] = {
9542 "HFI1_IB_CFG_LIDLMC",
9543 "HFI1_IB_CFG_LWID_DG_ENB",
9544 "HFI1_IB_CFG_LWID_ENB",
9546 "HFI1_IB_CFG_SPD_ENB",
9548 "HFI1_IB_CFG_RXPOL_ENB",
9549 "HFI1_IB_CFG_LREV_ENB",
9550 "HFI1_IB_CFG_LINKLATENCY",
9551 "HFI1_IB_CFG_HRTBT",
9552 "HFI1_IB_CFG_OP_VLS",
9553 "HFI1_IB_CFG_VL_HIGH_CAP",
9554 "HFI1_IB_CFG_VL_LOW_CAP",
9555 "HFI1_IB_CFG_OVERRUN_THRESH",
9556 "HFI1_IB_CFG_PHYERR_THRESH",
9557 "HFI1_IB_CFG_LINKDEFAULT",
9558 "HFI1_IB_CFG_PKEYS",
9560 "HFI1_IB_CFG_LSTATE",
9561 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9562 "HFI1_IB_CFG_PMA_TICKS",
9566 static const char *ib_cfg_name(int which)
9568 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9570 return ib_cfg_name_strings[which];
9573 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9575 struct hfi1_devdata *dd = ppd->dd;
9579 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9580 val = ppd->link_width_enabled;
9582 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9583 val = ppd->link_width_active;
9585 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9586 val = ppd->link_speed_enabled;
9588 case HFI1_IB_CFG_SPD: /* current Link speed */
9589 val = ppd->link_speed_active;
9592 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9593 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9594 case HFI1_IB_CFG_LINKLATENCY:
9597 case HFI1_IB_CFG_OP_VLS:
9598 val = ppd->vls_operational;
9600 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9601 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9603 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9604 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9606 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9607 val = ppd->overrun_threshold;
9609 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9610 val = ppd->phy_error_threshold;
9612 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9613 val = dd->link_default;
9616 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9617 case HFI1_IB_CFG_PMA_TICKS:
9620 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9623 "%s: which %s: not implemented\n",
9625 ib_cfg_name(which));
9633 * The largest MAD packet size.
9635 #define MAX_MAD_PACKET 2048
9638 * Return the maximum header bytes that can go on the _wire_
9639 * for this device. This count includes the ICRC which is
9640 * not part of the packet held in memory but it is appended
9642 * This is dependent on the device's receive header entry size.
9643 * HFI allows this to be set per-receive context, but the
9644 * driver presently enforces a global value.
9646 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9649 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9650 * the Receive Header Entry Size minus the PBC (or RHF) size
9651 * plus one DW for the ICRC appended by HW.
9653 * dd->rcd[0].rcvhdrqentsize is in DW.
9654 * We use rcd[0] as all context will have the same value. Also,
9655 * the first kernel context would have been allocated by now so
9656 * we are guaranteed a valid value.
9658 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9663 * @ppd - per port data
9665 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9666 * registers compare against LRH.PktLen, so use the max bytes included
9669 * This routine changes all VL values except VL15, which it maintains at
9672 static void set_send_length(struct hfi1_pportdata *ppd)
9674 struct hfi1_devdata *dd = ppd->dd;
9675 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9676 u32 maxvlmtu = dd->vld[15].mtu;
9677 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9678 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9679 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9682 for (i = 0; i < ppd->vls_supported; i++) {
9683 if (dd->vld[i].mtu > maxvlmtu)
9684 maxvlmtu = dd->vld[i].mtu;
9686 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9687 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9688 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9690 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9691 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9692 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9694 write_csr(dd, SEND_LEN_CHECK0, len1);
9695 write_csr(dd, SEND_LEN_CHECK1, len2);
9696 /* adjust kernel credit return thresholds based on new MTUs */
9697 /* all kernel receive contexts have the same hdrqentsize */
9698 for (i = 0; i < ppd->vls_supported; i++) {
9699 sc_set_cr_threshold(dd->vld[i].sc,
9700 sc_mtu_to_threshold(dd->vld[i].sc,
9705 sc_set_cr_threshold(dd->vld[15].sc,
9706 sc_mtu_to_threshold(dd->vld[15].sc,
9708 dd->rcd[0]->rcvhdrqentsize));
9710 /* Adjust maximum MTU for the port in DC */
9711 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9712 (ilog2(maxvlmtu >> 8) + 1);
9713 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9714 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9715 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9716 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9717 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9720 static void set_lidlmc(struct hfi1_pportdata *ppd)
9724 struct hfi1_devdata *dd = ppd->dd;
9725 u32 mask = ~((1U << ppd->lmc) - 1);
9726 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9728 if (dd->hfi1_snoop.mode_flag)
9729 dd_dev_info(dd, "Set lid/lmc while snooping");
9731 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9732 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9733 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9734 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9735 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9736 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9737 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9740 * Iterate over all the send contexts and set their SLID check
9742 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9743 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9744 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9745 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9747 for (i = 0; i < dd->chip_send_contexts; i++) {
9748 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9750 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9753 /* Now we have to do the same thing for the sdma engines */
9754 sdma_update_lmc(dd, mask, ppd->lid);
9757 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9759 unsigned long timeout;
9762 timeout = jiffies + msecs_to_jiffies(msecs);
9764 curr_state = read_physical_state(dd);
9765 if (curr_state == state)
9767 if (time_after(jiffies, timeout)) {
9769 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9773 usleep_range(1950, 2050); /* sleep 2ms-ish */
9780 * Helper for set_link_state(). Do not call except from that routine.
9781 * Expects ppd->hls_mutex to be held.
9783 * @rem_reason value to be sent to the neighbor
9785 * LinkDownReasons only set if transition succeeds.
9787 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9789 struct hfi1_devdata *dd = ppd->dd;
9790 u32 pstate, previous_state;
9791 u32 last_local_state;
9792 u32 last_remote_state;
9797 previous_state = ppd->host_link_state;
9798 ppd->host_link_state = HLS_GOING_OFFLINE;
9799 pstate = read_physical_state(dd);
9800 if (pstate == PLS_OFFLINE) {
9801 do_transition = 0; /* in right state */
9802 do_wait = 0; /* ...no need to wait */
9803 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9804 do_transition = 0; /* in an offline transient state */
9805 do_wait = 1; /* ...wait for it to settle */
9807 do_transition = 1; /* need to move to offline */
9808 do_wait = 1; /* ...will need to wait */
9811 if (do_transition) {
9812 ret = set_physical_link_state(dd,
9813 (rem_reason << 8) | PLS_OFFLINE);
9815 if (ret != HCMD_SUCCESS) {
9817 "Failed to transition to Offline link state, return %d\n",
9821 if (ppd->offline_disabled_reason ==
9822 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
9823 ppd->offline_disabled_reason =
9824 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
9828 /* it can take a while for the link to go down */
9829 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
9834 /* make sure the logical state is also down */
9835 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9838 * Now in charge of LCB - must be after the physical state is
9839 * offline.quiet and before host_link_state is changed.
9841 set_host_lcb_access(dd);
9842 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9843 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9845 if (ppd->port_type == PORT_TYPE_QSFP &&
9846 ppd->qsfp_info.limiting_active &&
9847 qsfp_mod_present(ppd)) {
9848 set_qsfp_tx(ppd, 0);
9852 * The LNI has a mandatory wait time after the physical state
9853 * moves to Offline.Quiet. The wait time may be different
9854 * depending on how the link went down. The 8051 firmware
9855 * will observe the needed wait time and only move to ready
9856 * when that is completed. The largest of the quiet timeouts
9857 * is 6s, so wait that long and then at least 0.5s more for
9858 * other transitions, and another 0.5s for a buffer.
9860 ret = wait_fm_ready(dd, 7000);
9863 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9864 /* state is really offline, so make it so */
9865 ppd->host_link_state = HLS_DN_OFFLINE;
9870 * The state is now offline and the 8051 is ready to accept host
9872 * - change our state
9873 * - notify others if we were previously in a linkup state
9875 ppd->host_link_state = HLS_DN_OFFLINE;
9876 if (previous_state & HLS_UP) {
9877 /* went down while link was up */
9878 handle_linkup_change(dd, 0);
9879 } else if (previous_state
9880 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9881 /* went down while attempting link up */
9882 /* byte 1 of last_*_state is the failure reason */
9883 read_last_local_state(dd, &last_local_state);
9884 read_last_remote_state(dd, &last_remote_state);
9886 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9887 last_local_state, last_remote_state);
9890 /* the active link width (downgrade) is 0 on link down */
9891 ppd->link_width_active = 0;
9892 ppd->link_width_downgrade_tx_active = 0;
9893 ppd->link_width_downgrade_rx_active = 0;
9894 ppd->current_egress_rate = 0;
9898 /* return the link state name */
9899 static const char *link_state_name(u32 state)
9902 int n = ilog2(state);
9903 static const char * const names[] = {
9904 [__HLS_UP_INIT_BP] = "INIT",
9905 [__HLS_UP_ARMED_BP] = "ARMED",
9906 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9907 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9908 [__HLS_DN_POLL_BP] = "POLL",
9909 [__HLS_DN_DISABLE_BP] = "DISABLE",
9910 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9911 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9912 [__HLS_GOING_UP_BP] = "GOING_UP",
9913 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9914 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9917 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9918 return name ? name : "unknown";
9921 /* return the link state reason name */
9922 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9924 if (state == HLS_UP_INIT) {
9925 switch (ppd->linkinit_reason) {
9926 case OPA_LINKINIT_REASON_LINKUP:
9928 case OPA_LINKINIT_REASON_FLAPPING:
9929 return "(FLAPPING)";
9930 case OPA_LINKINIT_OUTSIDE_POLICY:
9931 return "(OUTSIDE_POLICY)";
9932 case OPA_LINKINIT_QUARANTINED:
9933 return "(QUARANTINED)";
9934 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9935 return "(INSUFIC_CAPABILITY)";
9944 * driver_physical_state - convert the driver's notion of a port's
9945 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9946 * Return -1 (converted to a u32) to indicate error.
9948 u32 driver_physical_state(struct hfi1_pportdata *ppd)
9950 switch (ppd->host_link_state) {
9954 return IB_PORTPHYSSTATE_LINKUP;
9956 return IB_PORTPHYSSTATE_POLLING;
9957 case HLS_DN_DISABLE:
9958 return IB_PORTPHYSSTATE_DISABLED;
9959 case HLS_DN_OFFLINE:
9960 return OPA_PORTPHYSSTATE_OFFLINE;
9961 case HLS_VERIFY_CAP:
9962 return IB_PORTPHYSSTATE_POLLING;
9964 return IB_PORTPHYSSTATE_POLLING;
9965 case HLS_GOING_OFFLINE:
9966 return OPA_PORTPHYSSTATE_OFFLINE;
9967 case HLS_LINK_COOLDOWN:
9968 return OPA_PORTPHYSSTATE_OFFLINE;
9969 case HLS_DN_DOWNDEF:
9971 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9972 ppd->host_link_state);
9978 * driver_logical_state - convert the driver's notion of a port's
9979 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9980 * (converted to a u32) to indicate error.
9982 u32 driver_logical_state(struct hfi1_pportdata *ppd)
9984 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9985 return IB_PORT_DOWN;
9987 switch (ppd->host_link_state & HLS_UP) {
9989 return IB_PORT_INIT;
9991 return IB_PORT_ARMED;
9993 return IB_PORT_ACTIVE;
9995 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9996 ppd->host_link_state);
10001 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10002 u8 neigh_reason, u8 rem_reason)
10004 if (ppd->local_link_down_reason.latest == 0 &&
10005 ppd->neigh_link_down_reason.latest == 0) {
10006 ppd->local_link_down_reason.latest = lcl_reason;
10007 ppd->neigh_link_down_reason.latest = neigh_reason;
10008 ppd->remote_link_down_reason = rem_reason;
10013 * Change the physical and/or logical link state.
10015 * Do not call this routine while inside an interrupt. It contains
10016 * calls to routines that can take multiple seconds to finish.
10018 * Returns 0 on success, -errno on failure.
10020 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10022 struct hfi1_devdata *dd = ppd->dd;
10023 struct ib_event event = {.device = NULL};
10025 int was_up, is_down;
10026 int orig_new_state, poll_bounce;
10028 mutex_lock(&ppd->hls_lock);
10030 orig_new_state = state;
10031 if (state == HLS_DN_DOWNDEF)
10032 state = dd->link_default;
10034 /* interpret poll -> poll as a link bounce */
10035 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10036 state == HLS_DN_POLL;
10038 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10039 link_state_name(ppd->host_link_state),
10040 link_state_name(orig_new_state),
10041 poll_bounce ? "(bounce) " : "",
10042 link_state_reason_name(ppd, state));
10044 was_up = !!(ppd->host_link_state & HLS_UP);
10047 * If we're going to a (HLS_*) link state that implies the logical
10048 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10049 * reset is_sm_config_started to 0.
10051 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10052 ppd->is_sm_config_started = 0;
10055 * Do nothing if the states match. Let a poll to poll link bounce
10058 if (ppd->host_link_state == state && !poll_bounce)
10063 if (ppd->host_link_state == HLS_DN_POLL &&
10064 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10066 * Quick link up jumps from polling to here.
10068 * Whether in normal or loopback mode, the
10069 * simulator jumps from polling to link up.
10070 * Accept that here.
10073 } else if (ppd->host_link_state != HLS_GOING_UP) {
10077 ppd->host_link_state = HLS_UP_INIT;
10078 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10080 /* logical state didn't change, stay at going_up */
10081 ppd->host_link_state = HLS_GOING_UP;
10083 "%s: logical state did not change to INIT\n",
10086 /* clear old transient LINKINIT_REASON code */
10087 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10088 ppd->linkinit_reason =
10089 OPA_LINKINIT_REASON_LINKUP;
10091 /* enable the port */
10092 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10094 handle_linkup_change(dd, 1);
10098 if (ppd->host_link_state != HLS_UP_INIT)
10101 ppd->host_link_state = HLS_UP_ARMED;
10102 set_logical_state(dd, LSTATE_ARMED);
10103 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10105 /* logical state didn't change, stay at init */
10106 ppd->host_link_state = HLS_UP_INIT;
10108 "%s: logical state did not change to ARMED\n",
10112 * The simulator does not currently implement SMA messages,
10113 * so neighbor_normal is not set. Set it here when we first
10116 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10117 ppd->neighbor_normal = 1;
10119 case HLS_UP_ACTIVE:
10120 if (ppd->host_link_state != HLS_UP_ARMED)
10123 ppd->host_link_state = HLS_UP_ACTIVE;
10124 set_logical_state(dd, LSTATE_ACTIVE);
10125 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10127 /* logical state didn't change, stay at armed */
10128 ppd->host_link_state = HLS_UP_ARMED;
10130 "%s: logical state did not change to ACTIVE\n",
10133 /* tell all engines to go running */
10134 sdma_all_running(dd);
10136 /* Signal the IB layer that the port has went active */
10137 event.device = &dd->verbs_dev.rdi.ibdev;
10138 event.element.port_num = ppd->port;
10139 event.event = IB_EVENT_PORT_ACTIVE;
10143 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10144 ppd->host_link_state == HLS_DN_OFFLINE) &&
10147 /* Hand LED control to the DC */
10148 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10150 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10151 u8 tmp = ppd->link_enabled;
10153 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10155 ppd->link_enabled = tmp;
10158 ppd->remote_link_down_reason = 0;
10160 if (ppd->driver_link_ready)
10161 ppd->link_enabled = 1;
10164 set_all_slowpath(ppd->dd);
10165 ret = set_local_link_attributes(ppd);
10169 ppd->port_error_action = 0;
10170 ppd->host_link_state = HLS_DN_POLL;
10172 if (quick_linkup) {
10173 /* quick linkup does not go into polling */
10174 ret = do_quick_linkup(dd);
10176 ret1 = set_physical_link_state(dd, PLS_POLLING);
10177 if (ret1 != HCMD_SUCCESS) {
10179 "Failed to transition to Polling link state, return 0x%x\n",
10184 ppd->offline_disabled_reason =
10185 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10187 * If an error occurred above, go back to offline. The
10188 * caller may reschedule another attempt.
10191 goto_offline(ppd, 0);
10193 case HLS_DN_DISABLE:
10194 /* link is disabled */
10195 ppd->link_enabled = 0;
10197 /* allow any state to transition to disabled */
10199 /* must transition to offline first */
10200 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10201 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10204 ppd->remote_link_down_reason = 0;
10207 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10208 if (ret1 != HCMD_SUCCESS) {
10210 "Failed to transition to Disabled link state, return 0x%x\n",
10215 ppd->host_link_state = HLS_DN_DISABLE;
10218 case HLS_DN_OFFLINE:
10219 if (ppd->host_link_state == HLS_DN_DISABLE)
10222 /* allow any state to transition to offline */
10223 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10225 ppd->remote_link_down_reason = 0;
10227 case HLS_VERIFY_CAP:
10228 if (ppd->host_link_state != HLS_DN_POLL)
10230 ppd->host_link_state = HLS_VERIFY_CAP;
10233 if (ppd->host_link_state != HLS_VERIFY_CAP)
10236 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10237 if (ret1 != HCMD_SUCCESS) {
10239 "Failed to transition to link up state, return 0x%x\n",
10244 ppd->host_link_state = HLS_GOING_UP;
10247 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10248 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10250 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10256 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10257 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10259 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10260 ppd->neigh_link_down_reason.sma == 0) {
10261 ppd->local_link_down_reason.sma =
10262 ppd->local_link_down_reason.latest;
10263 ppd->neigh_link_down_reason.sma =
10264 ppd->neigh_link_down_reason.latest;
10270 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10271 __func__, link_state_name(ppd->host_link_state),
10272 link_state_name(state));
10276 mutex_unlock(&ppd->hls_lock);
10279 ib_dispatch_event(&event);
10284 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10290 case HFI1_IB_CFG_LIDLMC:
10293 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10295 * The VL Arbitrator high limit is sent in units of 4k
10296 * bytes, while HFI stores it in units of 64 bytes.
10299 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10300 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10301 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10303 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10304 /* HFI only supports POLL as the default link down state */
10305 if (val != HLS_DN_POLL)
10308 case HFI1_IB_CFG_OP_VLS:
10309 if (ppd->vls_operational != val) {
10310 ppd->vls_operational = val;
10316 * For link width, link width downgrade, and speed enable, always AND
10317 * the setting with what is actually supported. This has two benefits.
10318 * First, enabled can't have unsupported values, no matter what the
10319 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10320 * "fill in with your supported value" have all the bits in the
10321 * field set, so simply ANDing with supported has the desired result.
10323 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10324 ppd->link_width_enabled = val & ppd->link_width_supported;
10326 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10327 ppd->link_width_downgrade_enabled =
10328 val & ppd->link_width_downgrade_supported;
10330 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10331 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10333 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10335 * HFI does not follow IB specs, save this value
10336 * so we can report it, if asked.
10338 ppd->overrun_threshold = val;
10340 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10342 * HFI does not follow IB specs, save this value
10343 * so we can report it, if asked.
10345 ppd->phy_error_threshold = val;
10348 case HFI1_IB_CFG_MTU:
10349 set_send_length(ppd);
10352 case HFI1_IB_CFG_PKEYS:
10353 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10354 set_partition_keys(ppd);
10358 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10359 dd_dev_info(ppd->dd,
10360 "%s: which %s, val 0x%x: not implemented\n",
10361 __func__, ib_cfg_name(which), val);
10367 /* begin functions related to vl arbitration table caching */
10368 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10372 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10373 VL_ARB_LOW_PRIO_TABLE_SIZE);
10374 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10375 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10378 * Note that we always return values directly from the
10379 * 'vl_arb_cache' (and do no CSR reads) in response to a
10380 * 'Get(VLArbTable)'. This is obviously correct after a
10381 * 'Set(VLArbTable)', since the cache will then be up to
10382 * date. But it's also correct prior to any 'Set(VLArbTable)'
10383 * since then both the cache, and the relevant h/w registers
10387 for (i = 0; i < MAX_PRIO_TABLE; i++)
10388 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10392 * vl_arb_lock_cache
10394 * All other vl_arb_* functions should be called only after locking
10397 static inline struct vl_arb_cache *
10398 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10400 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10402 spin_lock(&ppd->vl_arb_cache[idx].lock);
10403 return &ppd->vl_arb_cache[idx];
10406 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10408 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10411 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10412 struct ib_vl_weight_elem *vl)
10414 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10417 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10418 struct ib_vl_weight_elem *vl)
10420 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10423 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10424 struct ib_vl_weight_elem *vl)
10426 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10429 /* end functions related to vl arbitration table caching */
10431 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10432 u32 size, struct ib_vl_weight_elem *vl)
10434 struct hfi1_devdata *dd = ppd->dd;
10436 unsigned int i, is_up = 0;
10437 int drain, ret = 0;
10439 mutex_lock(&ppd->hls_lock);
10441 if (ppd->host_link_state & HLS_UP)
10444 drain = !is_ax(dd) && is_up;
10448 * Before adjusting VL arbitration weights, empty per-VL
10449 * FIFOs, otherwise a packet whose VL weight is being
10450 * set to 0 could get stuck in a FIFO with no chance to
10453 ret = stop_drain_data_vls(dd);
10458 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10463 for (i = 0; i < size; i++, vl++) {
10465 * NOTE: The low priority shift and mask are used here, but
10466 * they are the same for both the low and high registers.
10468 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10469 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10470 | (((u64)vl->weight
10471 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10472 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10473 write_csr(dd, target + (i * 8), reg);
10475 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10478 open_fill_data_vls(dd); /* reopen all VLs */
10481 mutex_unlock(&ppd->hls_lock);
10487 * Read one credit merge VL register.
10489 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10490 struct vl_limit *vll)
10492 u64 reg = read_csr(dd, csr);
10494 vll->dedicated = cpu_to_be16(
10495 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10496 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10497 vll->shared = cpu_to_be16(
10498 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10499 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10503 * Read the current credit merge limits.
10505 static int get_buffer_control(struct hfi1_devdata *dd,
10506 struct buffer_control *bc, u16 *overall_limit)
10511 /* not all entries are filled in */
10512 memset(bc, 0, sizeof(*bc));
10514 /* OPA and HFI have a 1-1 mapping */
10515 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10516 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10518 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10519 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10521 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10522 bc->overall_shared_limit = cpu_to_be16(
10523 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10524 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10526 *overall_limit = (reg
10527 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10528 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10529 return sizeof(struct buffer_control);
10532 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10537 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10538 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10539 for (i = 0; i < sizeof(u64); i++) {
10540 u8 byte = *(((u8 *)®) + i);
10542 dp->vlnt[2 * i] = byte & 0xf;
10543 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10546 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10547 for (i = 0; i < sizeof(u64); i++) {
10548 u8 byte = *(((u8 *)®) + i);
10550 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10551 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10553 return sizeof(struct sc2vlnt);
10556 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10557 struct ib_vl_weight_elem *vl)
10561 for (i = 0; i < nelems; i++, vl++) {
10567 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10569 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10571 0, dp->vlnt[0] & 0xf,
10572 1, dp->vlnt[1] & 0xf,
10573 2, dp->vlnt[2] & 0xf,
10574 3, dp->vlnt[3] & 0xf,
10575 4, dp->vlnt[4] & 0xf,
10576 5, dp->vlnt[5] & 0xf,
10577 6, dp->vlnt[6] & 0xf,
10578 7, dp->vlnt[7] & 0xf,
10579 8, dp->vlnt[8] & 0xf,
10580 9, dp->vlnt[9] & 0xf,
10581 10, dp->vlnt[10] & 0xf,
10582 11, dp->vlnt[11] & 0xf,
10583 12, dp->vlnt[12] & 0xf,
10584 13, dp->vlnt[13] & 0xf,
10585 14, dp->vlnt[14] & 0xf,
10586 15, dp->vlnt[15] & 0xf));
10587 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10588 DC_SC_VL_VAL(31_16,
10589 16, dp->vlnt[16] & 0xf,
10590 17, dp->vlnt[17] & 0xf,
10591 18, dp->vlnt[18] & 0xf,
10592 19, dp->vlnt[19] & 0xf,
10593 20, dp->vlnt[20] & 0xf,
10594 21, dp->vlnt[21] & 0xf,
10595 22, dp->vlnt[22] & 0xf,
10596 23, dp->vlnt[23] & 0xf,
10597 24, dp->vlnt[24] & 0xf,
10598 25, dp->vlnt[25] & 0xf,
10599 26, dp->vlnt[26] & 0xf,
10600 27, dp->vlnt[27] & 0xf,
10601 28, dp->vlnt[28] & 0xf,
10602 29, dp->vlnt[29] & 0xf,
10603 30, dp->vlnt[30] & 0xf,
10604 31, dp->vlnt[31] & 0xf));
10607 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10611 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10612 what, (int)limit, idx);
10615 /* change only the shared limit portion of SendCmGLobalCredit */
10616 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10620 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10621 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10622 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10623 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10626 /* change only the total credit limit portion of SendCmGLobalCredit */
10627 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10631 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10632 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10633 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10634 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10637 /* set the given per-VL shared limit */
10638 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10643 if (vl < TXE_NUM_DATA_VL)
10644 addr = SEND_CM_CREDIT_VL + (8 * vl);
10646 addr = SEND_CM_CREDIT_VL15;
10648 reg = read_csr(dd, addr);
10649 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10650 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10651 write_csr(dd, addr, reg);
10654 /* set the given per-VL dedicated limit */
10655 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10660 if (vl < TXE_NUM_DATA_VL)
10661 addr = SEND_CM_CREDIT_VL + (8 * vl);
10663 addr = SEND_CM_CREDIT_VL15;
10665 reg = read_csr(dd, addr);
10666 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10667 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10668 write_csr(dd, addr, reg);
10671 /* spin until the given per-VL status mask bits clear */
10672 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10675 unsigned long timeout;
10678 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10680 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10683 return; /* success */
10684 if (time_after(jiffies, timeout))
10685 break; /* timed out */
10690 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10691 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10693 * If this occurs, it is likely there was a credit loss on the link.
10694 * The only recovery from that is a link bounce.
10697 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10701 * The number of credits on the VLs may be changed while everything
10702 * is "live", but the following algorithm must be followed due to
10703 * how the hardware is actually implemented. In particular,
10704 * Return_Credit_Status[] is the only correct status check.
10706 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10707 * set Global_Shared_Credit_Limit = 0
10709 * mask0 = all VLs that are changing either dedicated or shared limits
10710 * set Shared_Limit[mask0] = 0
10711 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10712 * if (changing any dedicated limit)
10713 * mask1 = all VLs that are lowering dedicated limits
10714 * lower Dedicated_Limit[mask1]
10715 * spin until Return_Credit_Status[mask1] == 0
10716 * raise Dedicated_Limits
10717 * raise Shared_Limits
10718 * raise Global_Shared_Credit_Limit
10720 * lower = if the new limit is lower, set the limit to the new value
10721 * raise = if the new limit is higher than the current value (may be changed
10722 * earlier in the algorithm), set the new limit to the new value
10724 int set_buffer_control(struct hfi1_pportdata *ppd,
10725 struct buffer_control *new_bc)
10727 struct hfi1_devdata *dd = ppd->dd;
10728 u64 changing_mask, ld_mask, stat_mask;
10730 int i, use_all_mask;
10731 int this_shared_changing;
10732 int vl_count = 0, ret;
10734 * A0: add the variable any_shared_limit_changing below and in the
10735 * algorithm above. If removing A0 support, it can be removed.
10737 int any_shared_limit_changing;
10738 struct buffer_control cur_bc;
10739 u8 changing[OPA_MAX_VLS];
10740 u8 lowering_dedicated[OPA_MAX_VLS];
10743 const u64 all_mask =
10744 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10745 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10746 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10747 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10748 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10749 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10750 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10751 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10752 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10754 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10755 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
10757 /* find the new total credits, do sanity check on unused VLs */
10758 for (i = 0; i < OPA_MAX_VLS; i++) {
10760 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10763 nonzero_msg(dd, i, "dedicated",
10764 be16_to_cpu(new_bc->vl[i].dedicated));
10765 nonzero_msg(dd, i, "shared",
10766 be16_to_cpu(new_bc->vl[i].shared));
10767 new_bc->vl[i].dedicated = 0;
10768 new_bc->vl[i].shared = 0;
10770 new_total += be16_to_cpu(new_bc->overall_shared_limit);
10772 /* fetch the current values */
10773 get_buffer_control(dd, &cur_bc, &cur_total);
10776 * Create the masks we will use.
10778 memset(changing, 0, sizeof(changing));
10779 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10781 * NOTE: Assumes that the individual VL bits are adjacent and in
10785 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10789 any_shared_limit_changing = 0;
10790 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10793 this_shared_changing = new_bc->vl[i].shared
10794 != cur_bc.vl[i].shared;
10795 if (this_shared_changing)
10796 any_shared_limit_changing = 1;
10797 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10798 this_shared_changing) {
10800 changing_mask |= stat_mask;
10803 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10804 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10805 lowering_dedicated[i] = 1;
10806 ld_mask |= stat_mask;
10810 /* bracket the credit change with a total adjustment */
10811 if (new_total > cur_total)
10812 set_global_limit(dd, new_total);
10815 * Start the credit change algorithm.
10818 if ((be16_to_cpu(new_bc->overall_shared_limit) <
10819 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10820 (is_ax(dd) && any_shared_limit_changing)) {
10821 set_global_shared(dd, 0);
10822 cur_bc.overall_shared_limit = 0;
10826 for (i = 0; i < NUM_USABLE_VLS; i++) {
10831 set_vl_shared(dd, i, 0);
10832 cur_bc.vl[i].shared = 0;
10836 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10839 if (change_count > 0) {
10840 for (i = 0; i < NUM_USABLE_VLS; i++) {
10844 if (lowering_dedicated[i]) {
10845 set_vl_dedicated(dd, i,
10846 be16_to_cpu(new_bc->
10848 cur_bc.vl[i].dedicated =
10849 new_bc->vl[i].dedicated;
10853 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10855 /* now raise all dedicated that are going up */
10856 for (i = 0; i < NUM_USABLE_VLS; i++) {
10860 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10861 be16_to_cpu(cur_bc.vl[i].dedicated))
10862 set_vl_dedicated(dd, i,
10863 be16_to_cpu(new_bc->
10868 /* next raise all shared that are going up */
10869 for (i = 0; i < NUM_USABLE_VLS; i++) {
10873 if (be16_to_cpu(new_bc->vl[i].shared) >
10874 be16_to_cpu(cur_bc.vl[i].shared))
10875 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10878 /* finally raise the global shared */
10879 if (be16_to_cpu(new_bc->overall_shared_limit) >
10880 be16_to_cpu(cur_bc.overall_shared_limit))
10881 set_global_shared(dd,
10882 be16_to_cpu(new_bc->overall_shared_limit));
10884 /* bracket the credit change with a total adjustment */
10885 if (new_total < cur_total)
10886 set_global_limit(dd, new_total);
10889 * Determine the actual number of operational VLS using the number of
10890 * dedicated and shared credits for each VL.
10892 if (change_count > 0) {
10893 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10894 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10895 be16_to_cpu(new_bc->vl[i].shared) > 0)
10897 ppd->actual_vls_operational = vl_count;
10898 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10899 ppd->actual_vls_operational :
10900 ppd->vls_operational,
10903 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10904 ppd->actual_vls_operational :
10905 ppd->vls_operational, NULL);
10913 * Read the given fabric manager table. Return the size of the
10914 * table (in bytes) on success, and a negative error code on
10917 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10921 struct vl_arb_cache *vlc;
10924 case FM_TBL_VL_HIGH_ARB:
10927 * OPA specifies 128 elements (of 2 bytes each), though
10928 * HFI supports only 16 elements in h/w.
10930 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10931 vl_arb_get_cache(vlc, t);
10932 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10934 case FM_TBL_VL_LOW_ARB:
10937 * OPA specifies 128 elements (of 2 bytes each), though
10938 * HFI supports only 16 elements in h/w.
10940 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10941 vl_arb_get_cache(vlc, t);
10942 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10944 case FM_TBL_BUFFER_CONTROL:
10945 size = get_buffer_control(ppd->dd, t, NULL);
10947 case FM_TBL_SC2VLNT:
10948 size = get_sc2vlnt(ppd->dd, t);
10950 case FM_TBL_VL_PREEMPT_ELEMS:
10952 /* OPA specifies 128 elements, of 2 bytes each */
10953 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10955 case FM_TBL_VL_PREEMPT_MATRIX:
10958 * OPA specifies that this is the same size as the VL
10959 * arbitration tables (i.e., 256 bytes).
10969 * Write the given fabric manager table.
10971 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10974 struct vl_arb_cache *vlc;
10977 case FM_TBL_VL_HIGH_ARB:
10978 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10979 if (vl_arb_match_cache(vlc, t)) {
10980 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10983 vl_arb_set_cache(vlc, t);
10984 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10985 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10986 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10988 case FM_TBL_VL_LOW_ARB:
10989 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10990 if (vl_arb_match_cache(vlc, t)) {
10991 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10994 vl_arb_set_cache(vlc, t);
10995 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10996 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10997 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10999 case FM_TBL_BUFFER_CONTROL:
11000 ret = set_buffer_control(ppd, t);
11002 case FM_TBL_SC2VLNT:
11003 set_sc2vlnt(ppd->dd, t);
11012 * Disable all data VLs.
11014 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11016 static int disable_data_vls(struct hfi1_devdata *dd)
11021 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11027 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11028 * Just re-enables all data VLs (the "fill" part happens
11029 * automatically - the name was chosen for symmetry with
11030 * stop_drain_data_vls()).
11032 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11034 int open_fill_data_vls(struct hfi1_devdata *dd)
11039 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11045 * drain_data_vls() - assumes that disable_data_vls() has been called,
11046 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11047 * engines to drop to 0.
11049 static void drain_data_vls(struct hfi1_devdata *dd)
11053 pause_for_credit_return(dd);
11057 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11059 * Use open_fill_data_vls() to resume using data VLs. This pair is
11060 * meant to be used like this:
11062 * stop_drain_data_vls(dd);
11063 * // do things with per-VL resources
11064 * open_fill_data_vls(dd);
11066 int stop_drain_data_vls(struct hfi1_devdata *dd)
11070 ret = disable_data_vls(dd);
11072 drain_data_vls(dd);
11078 * Convert a nanosecond time to a cclock count. No matter how slow
11079 * the cclock, a non-zero ns will always have a non-zero result.
11081 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11085 if (dd->icode == ICODE_FPGA_EMULATION)
11086 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11087 else /* simulation pretends to be ASIC */
11088 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11089 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11095 * Convert a cclock count to nanoseconds. Not matter how slow
11096 * the cclock, a non-zero cclocks will always have a non-zero result.
11098 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11102 if (dd->icode == ICODE_FPGA_EMULATION)
11103 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11104 else /* simulation pretends to be ASIC */
11105 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11106 if (cclocks && !ns)
11112 * Dynamically adjust the receive interrupt timeout for a context based on
11113 * incoming packet rate.
11115 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11117 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11119 struct hfi1_devdata *dd = rcd->dd;
11120 u32 timeout = rcd->rcvavail_timeout;
11123 * This algorithm doubles or halves the timeout depending on whether
11124 * the number of packets received in this interrupt were less than or
11125 * greater equal the interrupt count.
11127 * The calculations below do not allow a steady state to be achieved.
11128 * Only at the endpoints it is possible to have an unchanging
11131 if (npkts < rcv_intr_count) {
11133 * Not enough packets arrived before the timeout, adjust
11134 * timeout downward.
11136 if (timeout < 2) /* already at minimum? */
11141 * More than enough packets arrived before the timeout, adjust
11144 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11146 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11149 rcd->rcvavail_timeout = timeout;
11151 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11152 * been verified to be in range
11154 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11156 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11159 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11160 u32 intr_adjust, u32 npkts)
11162 struct hfi1_devdata *dd = rcd->dd;
11164 u32 ctxt = rcd->ctxt;
11167 * Need to write timeout register before updating RcvHdrHead to ensure
11168 * that a new value is used when the HW decides to restart counting.
11171 adjust_rcv_timeout(rcd, npkts);
11173 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11174 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11175 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11178 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11179 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11180 << RCV_HDR_HEAD_HEAD_SHIFT);
11181 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11185 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11189 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11190 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11192 if (rcd->rcvhdrtail_kvaddr)
11193 tail = get_rcvhdrtail(rcd);
11195 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11197 return head == tail;
11201 * Context Control and Receive Array encoding for buffer size:
11210 * 0x8 512 KB (Receive Array only)
11211 * 0x9 1 MB (Receive Array only)
11212 * 0xa 2 MB (Receive Array only)
11214 * 0xB-0xF - reserved (Receive Array only)
11217 * This routine assumes that the value has already been sanity checked.
11219 static u32 encoded_size(u32 size)
11222 case 4 * 1024: return 0x1;
11223 case 8 * 1024: return 0x2;
11224 case 16 * 1024: return 0x3;
11225 case 32 * 1024: return 0x4;
11226 case 64 * 1024: return 0x5;
11227 case 128 * 1024: return 0x6;
11228 case 256 * 1024: return 0x7;
11229 case 512 * 1024: return 0x8;
11230 case 1 * 1024 * 1024: return 0x9;
11231 case 2 * 1024 * 1024: return 0xa;
11233 return 0x1; /* if invalid, go with the minimum size */
11236 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11238 struct hfi1_ctxtdata *rcd;
11240 int did_enable = 0;
11242 rcd = dd->rcd[ctxt];
11246 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11248 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11249 /* if the context already enabled, don't do the extra steps */
11250 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11251 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11252 /* reset the tail and hdr addresses, and sequence count */
11253 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11254 rcd->rcvhdrq_phys);
11255 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11256 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11257 rcd->rcvhdrqtailaddr_phys);
11260 /* reset the cached receive header queue head value */
11264 * Zero the receive header queue so we don't get false
11265 * positives when checking the sequence number. The
11266 * sequence numbers could land exactly on the same spot.
11267 * E.g. a rcd restart before the receive header wrapped.
11269 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11271 /* starting timeout */
11272 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11274 /* enable the context */
11275 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11277 /* clean the egr buffer size first */
11278 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11279 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11280 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11281 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11283 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11284 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11287 /* zero RcvEgrIndexHead */
11288 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11290 /* set eager count and base index */
11291 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11292 & RCV_EGR_CTRL_EGR_CNT_MASK)
11293 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11294 (((rcd->eager_base >> RCV_SHIFT)
11295 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11296 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11297 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11300 * Set TID (expected) count and base index.
11301 * rcd->expected_count is set to individual RcvArray entries,
11302 * not pairs, and the CSR takes a pair-count in groups of
11303 * four, so divide by 8.
11305 reg = (((rcd->expected_count >> RCV_SHIFT)
11306 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11307 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11308 (((rcd->expected_base >> RCV_SHIFT)
11309 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11310 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11311 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11312 if (ctxt == HFI1_CTRL_CTXT)
11313 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11315 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11316 write_csr(dd, RCV_VL15, 0);
11318 * When receive context is being disabled turn on tail
11319 * update with a dummy tail address and then disable
11322 if (dd->rcvhdrtail_dummy_physaddr) {
11323 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11324 dd->rcvhdrtail_dummy_physaddr);
11325 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11326 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11329 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11331 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11332 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11333 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11334 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11335 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11336 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11337 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11338 /* See comment on RcvCtxtCtrl.TailUpd above */
11339 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11340 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11342 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11343 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11344 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11345 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11346 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11348 * In one-packet-per-eager mode, the size comes from
11349 * the RcvArray entry.
11351 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11352 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11354 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11355 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11356 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11357 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11358 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11359 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11360 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11361 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11362 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11363 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11364 rcd->rcvctrl = rcvctrl;
11365 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11366 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11368 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11370 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11371 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11373 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11375 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11376 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11377 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11378 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11379 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11380 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11381 ctxt, reg, reg == 0 ? "not" : "still");
11387 * The interrupt timeout and count must be set after
11388 * the context is enabled to take effect.
11390 /* set interrupt timeout */
11391 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11392 (u64)rcd->rcvavail_timeout <<
11393 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11395 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11396 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11397 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11400 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11402 * If the context has been disabled and the Tail Update has
11403 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11404 * so it doesn't contain an address that is invalid.
11406 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11407 dd->rcvhdrtail_dummy_physaddr);
11410 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11416 ret = dd->cntrnameslen;
11417 *namep = dd->cntrnames;
11419 const struct cntr_entry *entry;
11422 ret = (dd->ndevcntrs) * sizeof(u64);
11424 /* Get the start of the block of counters */
11425 *cntrp = dd->cntrs;
11428 * Now go and fill in each counter in the block.
11430 for (i = 0; i < DEV_CNTR_LAST; i++) {
11431 entry = &dev_cntrs[i];
11432 hfi1_cdbg(CNTR, "reading %s", entry->name);
11433 if (entry->flags & CNTR_DISABLED) {
11435 hfi1_cdbg(CNTR, "\tDisabled\n");
11437 if (entry->flags & CNTR_VL) {
11438 hfi1_cdbg(CNTR, "\tPer VL\n");
11439 for (j = 0; j < C_VL_COUNT; j++) {
11440 val = entry->rw_cntr(entry,
11446 "\t\tRead 0x%llx for %d\n",
11448 dd->cntrs[entry->offset + j] =
11451 } else if (entry->flags & CNTR_SDMA) {
11453 "\t Per SDMA Engine\n");
11454 for (j = 0; j < dd->chip_sdma_engines;
11457 entry->rw_cntr(entry, dd, j,
11460 "\t\tRead 0x%llx for %d\n",
11462 dd->cntrs[entry->offset + j] =
11466 val = entry->rw_cntr(entry, dd,
11469 dd->cntrs[entry->offset] = val;
11470 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11479 * Used by sysfs to create files for hfi stats to read
11481 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11487 ret = ppd->dd->portcntrnameslen;
11488 *namep = ppd->dd->portcntrnames;
11490 const struct cntr_entry *entry;
11493 ret = ppd->dd->nportcntrs * sizeof(u64);
11494 *cntrp = ppd->cntrs;
11496 for (i = 0; i < PORT_CNTR_LAST; i++) {
11497 entry = &port_cntrs[i];
11498 hfi1_cdbg(CNTR, "reading %s", entry->name);
11499 if (entry->flags & CNTR_DISABLED) {
11501 hfi1_cdbg(CNTR, "\tDisabled\n");
11505 if (entry->flags & CNTR_VL) {
11506 hfi1_cdbg(CNTR, "\tPer VL");
11507 for (j = 0; j < C_VL_COUNT; j++) {
11508 val = entry->rw_cntr(entry, ppd, j,
11513 "\t\tRead 0x%llx for %d",
11515 ppd->cntrs[entry->offset + j] = val;
11518 val = entry->rw_cntr(entry, ppd,
11522 ppd->cntrs[entry->offset] = val;
11523 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11530 static void free_cntrs(struct hfi1_devdata *dd)
11532 struct hfi1_pportdata *ppd;
11535 if (dd->synth_stats_timer.data)
11536 del_timer_sync(&dd->synth_stats_timer);
11537 dd->synth_stats_timer.data = 0;
11538 ppd = (struct hfi1_pportdata *)(dd + 1);
11539 for (i = 0; i < dd->num_pports; i++, ppd++) {
11541 kfree(ppd->scntrs);
11542 free_percpu(ppd->ibport_data.rvp.rc_acks);
11543 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11544 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11546 ppd->scntrs = NULL;
11547 ppd->ibport_data.rvp.rc_acks = NULL;
11548 ppd->ibport_data.rvp.rc_qacks = NULL;
11549 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11551 kfree(dd->portcntrnames);
11552 dd->portcntrnames = NULL;
11557 kfree(dd->cntrnames);
11558 dd->cntrnames = NULL;
11561 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11562 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11564 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11565 u64 *psval, void *context, int vl)
11570 if (entry->flags & CNTR_DISABLED) {
11571 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11575 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11577 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11579 /* If its a synthetic counter there is more work we need to do */
11580 if (entry->flags & CNTR_SYNTH) {
11581 if (sval == CNTR_MAX) {
11582 /* No need to read already saturated */
11586 if (entry->flags & CNTR_32BIT) {
11587 /* 32bit counters can wrap multiple times */
11588 u64 upper = sval >> 32;
11589 u64 lower = (sval << 32) >> 32;
11591 if (lower > val) { /* hw wrapped */
11592 if (upper == CNTR_32BIT_MAX)
11598 if (val != CNTR_MAX)
11599 val = (upper << 32) | val;
11602 /* If we rolled we are saturated */
11603 if ((val < sval) || (val > CNTR_MAX))
11610 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11615 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11616 struct cntr_entry *entry,
11617 u64 *psval, void *context, int vl, u64 data)
11621 if (entry->flags & CNTR_DISABLED) {
11622 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11626 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11628 if (entry->flags & CNTR_SYNTH) {
11630 if (entry->flags & CNTR_32BIT) {
11631 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11632 (data << 32) >> 32);
11633 val = data; /* return the full 64bit value */
11635 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11639 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11644 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11649 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11651 struct cntr_entry *entry;
11654 entry = &dev_cntrs[index];
11655 sval = dd->scntrs + entry->offset;
11657 if (vl != CNTR_INVALID_VL)
11660 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11663 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11665 struct cntr_entry *entry;
11668 entry = &dev_cntrs[index];
11669 sval = dd->scntrs + entry->offset;
11671 if (vl != CNTR_INVALID_VL)
11674 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11677 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11679 struct cntr_entry *entry;
11682 entry = &port_cntrs[index];
11683 sval = ppd->scntrs + entry->offset;
11685 if (vl != CNTR_INVALID_VL)
11688 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11689 (index <= C_RCV_HDR_OVF_LAST)) {
11690 /* We do not want to bother for disabled contexts */
11694 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11697 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11699 struct cntr_entry *entry;
11702 entry = &port_cntrs[index];
11703 sval = ppd->scntrs + entry->offset;
11705 if (vl != CNTR_INVALID_VL)
11708 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11709 (index <= C_RCV_HDR_OVF_LAST)) {
11710 /* We do not want to bother for disabled contexts */
11714 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11717 static void update_synth_timer(unsigned long opaque)
11724 struct hfi1_pportdata *ppd;
11725 struct cntr_entry *entry;
11727 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11730 * Rather than keep beating on the CSRs pick a minimal set that we can
11731 * check to watch for potential roll over. We can do this by looking at
11732 * the number of flits sent/recv. If the total flits exceeds 32bits then
11733 * we have to iterate all the counters and update.
11735 entry = &dev_cntrs[C_DC_RCV_FLITS];
11736 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11738 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11739 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11743 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11744 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11746 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11748 * May not be strictly necessary to update but it won't hurt and
11749 * simplifies the logic here.
11752 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11755 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11757 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11758 total_flits, (u64)CNTR_32BIT_MAX);
11759 if (total_flits >= CNTR_32BIT_MAX) {
11760 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11767 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11768 for (i = 0; i < DEV_CNTR_LAST; i++) {
11769 entry = &dev_cntrs[i];
11770 if (entry->flags & CNTR_VL) {
11771 for (vl = 0; vl < C_VL_COUNT; vl++)
11772 read_dev_cntr(dd, i, vl);
11774 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11777 ppd = (struct hfi1_pportdata *)(dd + 1);
11778 for (i = 0; i < dd->num_pports; i++, ppd++) {
11779 for (j = 0; j < PORT_CNTR_LAST; j++) {
11780 entry = &port_cntrs[j];
11781 if (entry->flags & CNTR_VL) {
11782 for (vl = 0; vl < C_VL_COUNT; vl++)
11783 read_port_cntr(ppd, j, vl);
11785 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11791 * We want the value in the register. The goal is to keep track
11792 * of the number of "ticks" not the counter value. In other
11793 * words if the register rolls we want to notice it and go ahead
11794 * and force an update.
11796 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11797 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11800 entry = &dev_cntrs[C_DC_RCV_FLITS];
11801 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11804 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11805 dd->unit, dd->last_tx, dd->last_rx);
11808 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11811 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11814 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11815 static int init_cntrs(struct hfi1_devdata *dd)
11817 int i, rcv_ctxts, j;
11820 char name[C_MAX_NAME];
11821 struct hfi1_pportdata *ppd;
11822 const char *bit_type_32 = ",32";
11823 const int bit_type_32_sz = strlen(bit_type_32);
11825 /* set up the stats timer; the add_timer is done at the end */
11826 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11827 (unsigned long)dd);
11829 /***********************/
11830 /* per device counters */
11831 /***********************/
11833 /* size names and determine how many we have*/
11837 for (i = 0; i < DEV_CNTR_LAST; i++) {
11838 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11839 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11843 if (dev_cntrs[i].flags & CNTR_VL) {
11844 dev_cntrs[i].offset = dd->ndevcntrs;
11845 for (j = 0; j < C_VL_COUNT; j++) {
11846 snprintf(name, C_MAX_NAME, "%s%d",
11847 dev_cntrs[i].name, vl_from_idx(j));
11848 sz += strlen(name);
11849 /* Add ",32" for 32-bit counters */
11850 if (dev_cntrs[i].flags & CNTR_32BIT)
11851 sz += bit_type_32_sz;
11855 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11856 dev_cntrs[i].offset = dd->ndevcntrs;
11857 for (j = 0; j < dd->chip_sdma_engines; j++) {
11858 snprintf(name, C_MAX_NAME, "%s%d",
11859 dev_cntrs[i].name, j);
11860 sz += strlen(name);
11861 /* Add ",32" for 32-bit counters */
11862 if (dev_cntrs[i].flags & CNTR_32BIT)
11863 sz += bit_type_32_sz;
11868 /* +1 for newline. */
11869 sz += strlen(dev_cntrs[i].name) + 1;
11870 /* Add ",32" for 32-bit counters */
11871 if (dev_cntrs[i].flags & CNTR_32BIT)
11872 sz += bit_type_32_sz;
11873 dev_cntrs[i].offset = dd->ndevcntrs;
11878 /* allocate space for the counter values */
11879 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11883 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11887 /* allocate space for the counter names */
11888 dd->cntrnameslen = sz;
11889 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11890 if (!dd->cntrnames)
11893 /* fill in the names */
11894 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
11895 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11897 } else if (dev_cntrs[i].flags & CNTR_VL) {
11898 for (j = 0; j < C_VL_COUNT; j++) {
11899 snprintf(name, C_MAX_NAME, "%s%d",
11902 memcpy(p, name, strlen(name));
11905 /* Counter is 32 bits */
11906 if (dev_cntrs[i].flags & CNTR_32BIT) {
11907 memcpy(p, bit_type_32, bit_type_32_sz);
11908 p += bit_type_32_sz;
11913 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11914 for (j = 0; j < dd->chip_sdma_engines; j++) {
11915 snprintf(name, C_MAX_NAME, "%s%d",
11916 dev_cntrs[i].name, j);
11917 memcpy(p, name, strlen(name));
11920 /* Counter is 32 bits */
11921 if (dev_cntrs[i].flags & CNTR_32BIT) {
11922 memcpy(p, bit_type_32, bit_type_32_sz);
11923 p += bit_type_32_sz;
11929 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11930 p += strlen(dev_cntrs[i].name);
11932 /* Counter is 32 bits */
11933 if (dev_cntrs[i].flags & CNTR_32BIT) {
11934 memcpy(p, bit_type_32, bit_type_32_sz);
11935 p += bit_type_32_sz;
11942 /*********************/
11943 /* per port counters */
11944 /*********************/
11947 * Go through the counters for the overflows and disable the ones we
11948 * don't need. This varies based on platform so we need to do it
11949 * dynamically here.
11951 rcv_ctxts = dd->num_rcv_contexts;
11952 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11953 i <= C_RCV_HDR_OVF_LAST; i++) {
11954 port_cntrs[i].flags |= CNTR_DISABLED;
11957 /* size port counter names and determine how many we have*/
11959 dd->nportcntrs = 0;
11960 for (i = 0; i < PORT_CNTR_LAST; i++) {
11961 if (port_cntrs[i].flags & CNTR_DISABLED) {
11962 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11966 if (port_cntrs[i].flags & CNTR_VL) {
11967 port_cntrs[i].offset = dd->nportcntrs;
11968 for (j = 0; j < C_VL_COUNT; j++) {
11969 snprintf(name, C_MAX_NAME, "%s%d",
11970 port_cntrs[i].name, vl_from_idx(j));
11971 sz += strlen(name);
11972 /* Add ",32" for 32-bit counters */
11973 if (port_cntrs[i].flags & CNTR_32BIT)
11974 sz += bit_type_32_sz;
11979 /* +1 for newline */
11980 sz += strlen(port_cntrs[i].name) + 1;
11981 /* Add ",32" for 32-bit counters */
11982 if (port_cntrs[i].flags & CNTR_32BIT)
11983 sz += bit_type_32_sz;
11984 port_cntrs[i].offset = dd->nportcntrs;
11989 /* allocate space for the counter names */
11990 dd->portcntrnameslen = sz;
11991 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11992 if (!dd->portcntrnames)
11995 /* fill in port cntr names */
11996 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11997 if (port_cntrs[i].flags & CNTR_DISABLED)
12000 if (port_cntrs[i].flags & CNTR_VL) {
12001 for (j = 0; j < C_VL_COUNT; j++) {
12002 snprintf(name, C_MAX_NAME, "%s%d",
12003 port_cntrs[i].name, vl_from_idx(j));
12004 memcpy(p, name, strlen(name));
12007 /* Counter is 32 bits */
12008 if (port_cntrs[i].flags & CNTR_32BIT) {
12009 memcpy(p, bit_type_32, bit_type_32_sz);
12010 p += bit_type_32_sz;
12016 memcpy(p, port_cntrs[i].name,
12017 strlen(port_cntrs[i].name));
12018 p += strlen(port_cntrs[i].name);
12020 /* Counter is 32 bits */
12021 if (port_cntrs[i].flags & CNTR_32BIT) {
12022 memcpy(p, bit_type_32, bit_type_32_sz);
12023 p += bit_type_32_sz;
12030 /* allocate per port storage for counter values */
12031 ppd = (struct hfi1_pportdata *)(dd + 1);
12032 for (i = 0; i < dd->num_pports; i++, ppd++) {
12033 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12037 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12042 /* CPU counters need to be allocated and zeroed */
12043 if (init_cpu_counters(dd))
12046 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12053 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12055 switch (chip_lstate) {
12058 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12062 return IB_PORT_DOWN;
12064 return IB_PORT_INIT;
12066 return IB_PORT_ARMED;
12067 case LSTATE_ACTIVE:
12068 return IB_PORT_ACTIVE;
12072 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12074 /* look at the HFI meta-states only */
12075 switch (chip_pstate & 0xf0) {
12077 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12081 return IB_PORTPHYSSTATE_DISABLED;
12083 return OPA_PORTPHYSSTATE_OFFLINE;
12085 return IB_PORTPHYSSTATE_POLLING;
12086 case PLS_CONFIGPHY:
12087 return IB_PORTPHYSSTATE_TRAINING;
12089 return IB_PORTPHYSSTATE_LINKUP;
12091 return IB_PORTPHYSSTATE_PHY_TEST;
12095 /* return the OPA port logical state name */
12096 const char *opa_lstate_name(u32 lstate)
12098 static const char * const port_logical_names[] = {
12104 "PORT_ACTIVE_DEFER",
12106 if (lstate < ARRAY_SIZE(port_logical_names))
12107 return port_logical_names[lstate];
12111 /* return the OPA port physical state name */
12112 const char *opa_pstate_name(u32 pstate)
12114 static const char * const port_physical_names[] = {
12121 "PHYS_LINK_ERR_RECOVER",
12128 if (pstate < ARRAY_SIZE(port_physical_names))
12129 return port_physical_names[pstate];
12134 * Read the hardware link state and set the driver's cached value of it.
12135 * Return the (new) current value.
12137 u32 get_logical_state(struct hfi1_pportdata *ppd)
12141 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12142 if (new_state != ppd->lstate) {
12143 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12144 opa_lstate_name(new_state), new_state);
12145 ppd->lstate = new_state;
12148 * Set port status flags in the page mapped into userspace
12149 * memory. Do it here to ensure a reliable state - this is
12150 * the only function called by all state handling code.
12151 * Always set the flags due to the fact that the cache value
12152 * might have been changed explicitly outside of this
12155 if (ppd->statusp) {
12156 switch (ppd->lstate) {
12159 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12160 HFI1_STATUS_IB_READY);
12162 case IB_PORT_ARMED:
12163 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12165 case IB_PORT_ACTIVE:
12166 *ppd->statusp |= HFI1_STATUS_IB_READY;
12170 return ppd->lstate;
12174 * wait_logical_linkstate - wait for an IB link state change to occur
12175 * @ppd: port device
12176 * @state: the state to wait for
12177 * @msecs: the number of milliseconds to wait
12179 * Wait up to msecs milliseconds for IB link state change to occur.
12180 * For now, take the easy polling route.
12181 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12183 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12186 unsigned long timeout;
12188 timeout = jiffies + msecs_to_jiffies(msecs);
12190 if (get_logical_state(ppd) == state)
12192 if (time_after(jiffies, timeout))
12196 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12201 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12206 pstate = read_physical_state(ppd->dd);
12207 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12208 if (ppd->last_pstate != ib_pstate) {
12209 dd_dev_info(ppd->dd,
12210 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12211 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12213 ppd->last_pstate = ib_pstate;
12219 * Read/modify/write ASIC_QSFP register bits as selected by mask
12220 * data: 0 or 1 in the positions depending on what needs to be written
12221 * dir: 0 for read, 1 for write
12222 * mask: select by setting
12226 u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12229 u64 qsfp_oe, target_oe;
12231 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12233 /* We are writing register bits, so lock access */
12237 qsfp_oe = read_csr(dd, target_oe);
12238 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12239 write_csr(dd, target_oe, qsfp_oe);
12241 /* We are exclusively reading bits here, but it is unlikely
12242 * we'll get valid data when we set the direction of the pin
12243 * in the same call, so read should call this function again
12244 * to get valid data
12246 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12249 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12250 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12252 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12253 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12255 int hfi1_init_ctxt(struct send_context *sc)
12258 struct hfi1_devdata *dd = sc->dd;
12260 u8 set = (sc->type == SC_USER ?
12261 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12262 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12263 reg = read_kctxt_csr(dd, sc->hw_context,
12264 SEND_CTXT_CHECK_ENABLE);
12266 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12268 SET_STATIC_RATE_CONTROL_SMASK(reg);
12269 write_kctxt_csr(dd, sc->hw_context,
12270 SEND_CTXT_CHECK_ENABLE, reg);
12275 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12280 if (dd->icode != ICODE_RTL_SILICON) {
12281 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12282 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12286 reg = read_csr(dd, ASIC_STS_THERM);
12287 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12288 ASIC_STS_THERM_CURR_TEMP_MASK);
12289 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12290 ASIC_STS_THERM_LO_TEMP_MASK);
12291 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12292 ASIC_STS_THERM_HI_TEMP_MASK);
12293 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12294 ASIC_STS_THERM_CRIT_TEMP_MASK);
12295 /* triggers is a 3-bit value - 1 bit per trigger. */
12296 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12301 /* ========================================================================= */
12304 * Enable/disable chip from delivering interrupts.
12306 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12311 * In HFI, the mask needs to be 1 to allow interrupts.
12314 /* enable all interrupts */
12315 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12316 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12320 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12321 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12326 * Clear all interrupt sources on the chip.
12328 static void clear_all_interrupts(struct hfi1_devdata *dd)
12332 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12333 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12335 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12336 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12337 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12338 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12339 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12340 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12341 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12342 for (i = 0; i < dd->chip_send_contexts; i++)
12343 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12344 for (i = 0; i < dd->chip_sdma_engines; i++)
12345 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12347 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12348 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12349 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12352 /* Move to pcie.c? */
12353 static void disable_intx(struct pci_dev *pdev)
12358 static void clean_up_interrupts(struct hfi1_devdata *dd)
12362 /* remove irqs - must happen before disabling/turning off */
12363 if (dd->num_msix_entries) {
12365 struct hfi1_msix_entry *me = dd->msix_entries;
12367 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12368 if (!me->arg) /* => no irq, no affinity */
12370 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12371 free_irq(me->msix.vector, me->arg);
12375 if (dd->requested_intx_irq) {
12376 free_irq(dd->pcidev->irq, dd);
12377 dd->requested_intx_irq = 0;
12381 /* turn off interrupts */
12382 if (dd->num_msix_entries) {
12384 pci_disable_msix(dd->pcidev);
12387 disable_intx(dd->pcidev);
12390 /* clean structures */
12391 kfree(dd->msix_entries);
12392 dd->msix_entries = NULL;
12393 dd->num_msix_entries = 0;
12397 * Remap the interrupt source from the general handler to the given MSI-X
12400 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12405 /* clear from the handled mask of the general interrupt */
12408 dd->gi_mask[m] &= ~((u64)1 << n);
12410 /* direct the chip source to the given MSI-X interrupt */
12413 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12414 reg &= ~((u64)0xff << (8 * n));
12415 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12416 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12419 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12420 int engine, int msix_intr)
12423 * SDMA engine interrupt sources grouped by type, rather than
12424 * engine. Per-engine interrupts are as follows:
12429 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12431 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12433 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12437 static int request_intx_irq(struct hfi1_devdata *dd)
12441 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12443 ret = request_irq(dd->pcidev->irq, general_interrupt,
12444 IRQF_SHARED, dd->intx_name, dd);
12446 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12449 dd->requested_intx_irq = 1;
12453 static int request_msix_irqs(struct hfi1_devdata *dd)
12455 int first_general, last_general;
12456 int first_sdma, last_sdma;
12457 int first_rx, last_rx;
12460 /* calculate the ranges we are going to use */
12462 last_general = first_general + 1;
12463 first_sdma = last_general;
12464 last_sdma = first_sdma + dd->num_sdma;
12465 first_rx = last_sdma;
12466 last_rx = first_rx + dd->n_krcv_queues;
12469 * Sanity check - the code expects all SDMA chip source
12470 * interrupts to be in the same CSR, starting at bit 0. Verify
12471 * that this is true by checking the bit location of the start.
12473 BUILD_BUG_ON(IS_SDMA_START % 64);
12475 for (i = 0; i < dd->num_msix_entries; i++) {
12476 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12477 const char *err_info;
12478 irq_handler_t handler;
12479 irq_handler_t thread = NULL;
12482 struct hfi1_ctxtdata *rcd = NULL;
12483 struct sdma_engine *sde = NULL;
12485 /* obtain the arguments to request_irq */
12486 if (first_general <= i && i < last_general) {
12487 idx = i - first_general;
12488 handler = general_interrupt;
12490 snprintf(me->name, sizeof(me->name),
12491 DRIVER_NAME "_%d", dd->unit);
12492 err_info = "general";
12493 me->type = IRQ_GENERAL;
12494 } else if (first_sdma <= i && i < last_sdma) {
12495 idx = i - first_sdma;
12496 sde = &dd->per_sdma[idx];
12497 handler = sdma_interrupt;
12499 snprintf(me->name, sizeof(me->name),
12500 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12502 remap_sdma_interrupts(dd, idx, i);
12503 me->type = IRQ_SDMA;
12504 } else if (first_rx <= i && i < last_rx) {
12505 idx = i - first_rx;
12506 rcd = dd->rcd[idx];
12507 /* no interrupt if no rcd */
12511 * Set the interrupt register and mask for this
12512 * context's interrupt.
12514 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12515 rcd->imask = ((u64)1) <<
12516 ((IS_RCVAVAIL_START + idx) % 64);
12517 handler = receive_context_interrupt;
12518 thread = receive_context_thread;
12520 snprintf(me->name, sizeof(me->name),
12521 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12522 err_info = "receive context";
12523 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12524 me->type = IRQ_RCVCTXT;
12526 /* not in our expected range - complain, then
12530 "Unexpected extra MSI-X interrupt %d\n", i);
12533 /* no argument, no interrupt */
12536 /* make sure the name is terminated */
12537 me->name[sizeof(me->name) - 1] = 0;
12539 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12543 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12544 err_info, me->msix.vector, idx, ret);
12548 * assign arg after request_irq call, so it will be
12553 ret = hfi1_get_irq_affinity(dd, me);
12556 "unable to pin IRQ %d\n", ret);
12563 * Set the general handler to accept all interrupts, remap all
12564 * chip interrupts back to MSI-X 0.
12566 static void reset_interrupts(struct hfi1_devdata *dd)
12570 /* all interrupts handled by the general handler */
12571 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12572 dd->gi_mask[i] = ~(u64)0;
12574 /* all chip interrupts map to MSI-X 0 */
12575 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12576 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12579 static int set_up_interrupts(struct hfi1_devdata *dd)
12581 struct hfi1_msix_entry *entries;
12582 u32 total, request;
12584 int single_interrupt = 0; /* we expect to have all the interrupts */
12588 * 1 general, "slow path" interrupt (includes the SDMA engines
12589 * slow source, SDMACleanupDone)
12590 * N interrupts - one per used SDMA engine
12591 * M interrupt - one per kernel receive context
12593 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12595 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12600 /* 1-1 MSI-X entry assignment */
12601 for (i = 0; i < total; i++)
12602 entries[i].msix.entry = i;
12604 /* ask for MSI-X interrupts */
12606 request_msix(dd, &request, entries);
12608 if (request == 0) {
12610 /* dd->num_msix_entries already zero */
12612 single_interrupt = 1;
12613 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12616 dd->num_msix_entries = request;
12617 dd->msix_entries = entries;
12619 if (request != total) {
12620 /* using MSI-X, with reduced interrupts */
12623 "cannot handle reduced interrupt case, want %u, got %u\n",
12628 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12631 /* mask all interrupts */
12632 set_intr_state(dd, 0);
12633 /* clear all pending interrupts */
12634 clear_all_interrupts(dd);
12636 /* reset general handler mask, chip MSI-X mappings */
12637 reset_interrupts(dd);
12639 if (single_interrupt)
12640 ret = request_intx_irq(dd);
12642 ret = request_msix_irqs(dd);
12649 clean_up_interrupts(dd);
12654 * Set up context values in dd. Sets:
12656 * num_rcv_contexts - number of contexts being used
12657 * n_krcv_queues - number of kernel contexts
12658 * first_user_ctxt - first non-kernel context in array of contexts
12659 * freectxts - number of free user contexts
12660 * num_send_contexts - number of PIO send contexts being used
12662 static int set_up_context_variables(struct hfi1_devdata *dd)
12664 int num_kernel_contexts;
12665 int total_contexts;
12670 * Kernel contexts: (to be fixed later):
12671 * - min or 2 or 1 context/numa
12672 * - Context 0 - control context (VL15/multicast/error)
12673 * - Context 1 - default context
12677 * Don't count context 0 in n_krcvqs since
12678 * is isn't used for normal verbs traffic.
12680 * krcvqs will reflect number of kernel
12681 * receive contexts above 0.
12683 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
12685 num_kernel_contexts = num_online_nodes() + 1;
12686 num_kernel_contexts =
12687 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12689 * Every kernel receive context needs an ACK send context.
12690 * one send context is allocated for each VL{0-7} and VL15
12692 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12694 "Reducing # kernel rcv contexts to: %d, from %d\n",
12695 (int)(dd->chip_send_contexts - num_vls - 1),
12696 (int)num_kernel_contexts);
12697 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12700 * User contexts: (to be fixed later)
12701 * - default to 1 user context per CPU if num_user_contexts is
12704 if (num_user_contexts < 0)
12705 num_user_contexts = num_online_cpus();
12707 total_contexts = num_kernel_contexts + num_user_contexts;
12710 * Adjust the counts given a global max.
12712 if (total_contexts > dd->chip_rcv_contexts) {
12714 "Reducing # user receive contexts to: %d, from %d\n",
12715 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12716 (int)num_user_contexts);
12717 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12719 total_contexts = num_kernel_contexts + num_user_contexts;
12722 /* the first N are kernel contexts, the rest are user contexts */
12723 dd->num_rcv_contexts = total_contexts;
12724 dd->n_krcv_queues = num_kernel_contexts;
12725 dd->first_user_ctxt = num_kernel_contexts;
12726 dd->num_user_contexts = num_user_contexts;
12727 dd->freectxts = num_user_contexts;
12729 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12730 (int)dd->chip_rcv_contexts,
12731 (int)dd->num_rcv_contexts,
12732 (int)dd->n_krcv_queues,
12733 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12736 * Receive array allocation:
12737 * All RcvArray entries are divided into groups of 8. This
12738 * is required by the hardware and will speed up writes to
12739 * consecutive entries by using write-combining of the entire
12742 * The number of groups are evenly divided among all contexts.
12743 * any left over groups will be given to the first N user
12746 dd->rcv_entries.group_size = RCV_INCREMENT;
12747 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12748 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12749 dd->rcv_entries.nctxt_extra = ngroups -
12750 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12751 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12752 dd->rcv_entries.ngroups,
12753 dd->rcv_entries.nctxt_extra);
12754 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12755 MAX_EAGER_ENTRIES * 2) {
12756 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12757 dd->rcv_entries.group_size;
12759 "RcvArray group count too high, change to %u\n",
12760 dd->rcv_entries.ngroups);
12761 dd->rcv_entries.nctxt_extra = 0;
12764 * PIO send contexts
12766 ret = init_sc_pools_and_sizes(dd);
12767 if (ret >= 0) { /* success */
12768 dd->num_send_contexts = ret;
12771 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12772 dd->chip_send_contexts,
12773 dd->num_send_contexts,
12774 dd->sc_sizes[SC_KERNEL].count,
12775 dd->sc_sizes[SC_ACK].count,
12776 dd->sc_sizes[SC_USER].count);
12777 ret = 0; /* success */
12784 * Set the device/port partition key table. The MAD code
12785 * will ensure that, at least, the partial management
12786 * partition key is present in the table.
12788 static void set_partition_keys(struct hfi1_pportdata *ppd)
12790 struct hfi1_devdata *dd = ppd->dd;
12794 dd_dev_info(dd, "Setting partition keys\n");
12795 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12796 reg |= (ppd->pkeys[i] &
12797 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12799 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12800 /* Each register holds 4 PKey values. */
12801 if ((i % 4) == 3) {
12802 write_csr(dd, RCV_PARTITION_KEY +
12803 ((i - 3) * 2), reg);
12808 /* Always enable HW pkeys check when pkeys table is set */
12809 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12813 * These CSRs and memories are uninitialized on reset and must be
12814 * written before reading to set the ECC/parity bits.
12816 * NOTE: All user context CSRs that are not mmaped write-only
12817 * (e.g. the TID flows) must be initialized even if the driver never
12820 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12825 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12826 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12828 /* SendCtxtCreditReturnAddr */
12829 for (i = 0; i < dd->chip_send_contexts; i++)
12830 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12832 /* PIO Send buffers */
12833 /* SDMA Send buffers */
12835 * These are not normally read, and (presently) have no method
12836 * to be read, so are not pre-initialized
12840 /* RcvHdrTailAddr */
12841 /* RcvTidFlowTable */
12842 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12843 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12844 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12845 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12846 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
12850 for (i = 0; i < dd->chip_rcv_array_count; i++)
12851 write_csr(dd, RCV_ARRAY + (8 * i),
12852 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12854 /* RcvQPMapTable */
12855 for (i = 0; i < 32; i++)
12856 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12860 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12862 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12865 unsigned long timeout;
12868 /* is the condition present? */
12869 reg = read_csr(dd, CCE_STATUS);
12870 if ((reg & status_bits) == 0)
12873 /* clear the condition */
12874 write_csr(dd, CCE_CTRL, ctrl_bits);
12876 /* wait for the condition to clear */
12877 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12879 reg = read_csr(dd, CCE_STATUS);
12880 if ((reg & status_bits) == 0)
12882 if (time_after(jiffies, timeout)) {
12884 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12885 status_bits, reg & status_bits);
12892 /* set CCE CSRs to chip reset defaults */
12893 static void reset_cce_csrs(struct hfi1_devdata *dd)
12897 /* CCE_REVISION read-only */
12898 /* CCE_REVISION2 read-only */
12899 /* CCE_CTRL - bits clear automatically */
12900 /* CCE_STATUS read-only, use CceCtrl to clear */
12901 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12902 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12903 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12904 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12905 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12906 /* CCE_ERR_STATUS read-only */
12907 write_csr(dd, CCE_ERR_MASK, 0);
12908 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12909 /* CCE_ERR_FORCE leave alone */
12910 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12911 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12912 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12913 /* CCE_PCIE_CTRL leave alone */
12914 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12915 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12916 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12917 CCE_MSIX_TABLE_UPPER_RESETCSR);
12919 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12920 /* CCE_MSIX_PBA read-only */
12921 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12922 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12924 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12925 write_csr(dd, CCE_INT_MAP, 0);
12926 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12927 /* CCE_INT_STATUS read-only */
12928 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12929 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12930 /* CCE_INT_FORCE leave alone */
12931 /* CCE_INT_BLOCKED read-only */
12933 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12934 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12937 /* set ASIC CSRs to chip reset defaults */
12938 static void reset_asic_csrs(struct hfi1_devdata *dd)
12943 * If the HFIs are shared between separate nodes or VMs,
12944 * then more will need to be done here. One idea is a module
12945 * parameter that returns early, letting the first power-on or
12946 * a known first load do the reset and blocking all others.
12949 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12952 if (dd->icode != ICODE_FPGA_EMULATION) {
12953 /* emulation does not have an SBus - leave these alone */
12955 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12957 * o The reset is not zero if aimed at the core. See the
12958 * SBus documentation for details.
12959 * o If the SBus firmware has been updated (e.g. by the BIOS),
12960 * will the reset revert that?
12962 /* ASIC_CFG_SBUS_REQUEST leave alone */
12963 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12965 /* ASIC_SBUS_RESULT read-only */
12966 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12967 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12968 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12969 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
12971 /* We might want to retain this state across FLR if we ever use it */
12972 write_csr(dd, ASIC_CFG_DRV_STR, 0);
12974 /* ASIC_CFG_THERM_POLL_EN leave alone */
12975 /* ASIC_STS_THERM read-only */
12976 /* ASIC_CFG_RESET leave alone */
12978 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12979 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12980 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12981 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12982 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12983 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12984 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12985 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12986 for (i = 0; i < 16; i++)
12987 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12989 /* ASIC_GPIO_IN read-only */
12990 write_csr(dd, ASIC_GPIO_OE, 0);
12991 write_csr(dd, ASIC_GPIO_INVERT, 0);
12992 write_csr(dd, ASIC_GPIO_OUT, 0);
12993 write_csr(dd, ASIC_GPIO_MASK, 0);
12994 /* ASIC_GPIO_STATUS read-only */
12995 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12996 /* ASIC_GPIO_FORCE leave alone */
12998 /* ASIC_QSFP1_IN read-only */
12999 write_csr(dd, ASIC_QSFP1_OE, 0);
13000 write_csr(dd, ASIC_QSFP1_INVERT, 0);
13001 write_csr(dd, ASIC_QSFP1_OUT, 0);
13002 write_csr(dd, ASIC_QSFP1_MASK, 0);
13003 /* ASIC_QSFP1_STATUS read-only */
13004 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
13005 /* ASIC_QSFP1_FORCE leave alone */
13007 /* ASIC_QSFP2_IN read-only */
13008 write_csr(dd, ASIC_QSFP2_OE, 0);
13009 write_csr(dd, ASIC_QSFP2_INVERT, 0);
13010 write_csr(dd, ASIC_QSFP2_OUT, 0);
13011 write_csr(dd, ASIC_QSFP2_MASK, 0);
13012 /* ASIC_QSFP2_STATUS read-only */
13013 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
13014 /* ASIC_QSFP2_FORCE leave alone */
13016 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
13017 /* this also writes a NOP command, clearing paging mode */
13018 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
13019 write_csr(dd, ASIC_EEP_DATA, 0);
13022 /* set MISC CSRs to chip reset defaults */
13023 static void reset_misc_csrs(struct hfi1_devdata *dd)
13027 for (i = 0; i < 32; i++) {
13028 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13029 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13030 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13033 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13034 * only be written 128-byte chunks
13036 /* init RSA engine to clear lingering errors */
13037 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13038 write_csr(dd, MISC_CFG_RSA_MU, 0);
13039 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13040 /* MISC_STS_8051_DIGEST read-only */
13041 /* MISC_STS_SBM_DIGEST read-only */
13042 /* MISC_STS_PCIE_DIGEST read-only */
13043 /* MISC_STS_FAB_DIGEST read-only */
13044 /* MISC_ERR_STATUS read-only */
13045 write_csr(dd, MISC_ERR_MASK, 0);
13046 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13047 /* MISC_ERR_FORCE leave alone */
13050 /* set TXE CSRs to chip reset defaults */
13051 static void reset_txe_csrs(struct hfi1_devdata *dd)
13058 write_csr(dd, SEND_CTRL, 0);
13059 __cm_reset(dd, 0); /* reset CM internal state */
13060 /* SEND_CONTEXTS read-only */
13061 /* SEND_DMA_ENGINES read-only */
13062 /* SEND_PIO_MEM_SIZE read-only */
13063 /* SEND_DMA_MEM_SIZE read-only */
13064 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13065 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13066 /* SEND_PIO_ERR_STATUS read-only */
13067 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13068 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13069 /* SEND_PIO_ERR_FORCE leave alone */
13070 /* SEND_DMA_ERR_STATUS read-only */
13071 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13072 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13073 /* SEND_DMA_ERR_FORCE leave alone */
13074 /* SEND_EGRESS_ERR_STATUS read-only */
13075 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13076 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13077 /* SEND_EGRESS_ERR_FORCE leave alone */
13078 write_csr(dd, SEND_BTH_QP, 0);
13079 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13080 write_csr(dd, SEND_SC2VLT0, 0);
13081 write_csr(dd, SEND_SC2VLT1, 0);
13082 write_csr(dd, SEND_SC2VLT2, 0);
13083 write_csr(dd, SEND_SC2VLT3, 0);
13084 write_csr(dd, SEND_LEN_CHECK0, 0);
13085 write_csr(dd, SEND_LEN_CHECK1, 0);
13086 /* SEND_ERR_STATUS read-only */
13087 write_csr(dd, SEND_ERR_MASK, 0);
13088 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13089 /* SEND_ERR_FORCE read-only */
13090 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13091 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13092 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13093 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13094 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13095 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13096 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13097 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13098 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13099 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13100 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13101 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13102 /* SEND_CM_CREDIT_USED_STATUS read-only */
13103 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13104 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13105 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13106 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13107 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13108 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13109 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13110 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13111 /* SEND_CM_CREDIT_USED_VL read-only */
13112 /* SEND_CM_CREDIT_USED_VL15 read-only */
13113 /* SEND_EGRESS_CTXT_STATUS read-only */
13114 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13115 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13116 /* SEND_EGRESS_ERR_INFO read-only */
13117 /* SEND_EGRESS_ERR_SOURCE read-only */
13120 * TXE Per-Context CSRs
13122 for (i = 0; i < dd->chip_send_contexts; i++) {
13123 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13124 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13125 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13126 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13127 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13128 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13129 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13130 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13131 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13132 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13133 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13134 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13138 * TXE Per-SDMA CSRs
13140 for (i = 0; i < dd->chip_sdma_engines; i++) {
13141 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13142 /* SEND_DMA_STATUS read-only */
13143 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13144 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13145 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13146 /* SEND_DMA_HEAD read-only */
13147 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13148 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13149 /* SEND_DMA_IDLE_CNT read-only */
13150 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13151 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13152 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13153 /* SEND_DMA_ENG_ERR_STATUS read-only */
13154 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13155 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13156 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13157 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13158 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13159 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13160 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13161 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13162 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13163 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13169 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13171 static void init_rbufs(struct hfi1_devdata *dd)
13177 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13182 reg = read_csr(dd, RCV_STATUS);
13183 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13184 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13187 * Give up after 1ms - maximum wait time.
13189 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13190 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13191 * 148 KB / (66% * 250MB/s) = 920us
13193 if (count++ > 500) {
13195 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13199 udelay(2); /* do not busy-wait the CSR */
13202 /* start the init - expect RcvCtrl to be 0 */
13203 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13206 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13207 * period after the write before RcvStatus.RxRbufInitDone is valid.
13208 * The delay in the first run through the loop below is sufficient and
13209 * required before the first read of RcvStatus.RxRbufInintDone.
13211 read_csr(dd, RCV_CTRL);
13213 /* wait for the init to finish */
13216 /* delay is required first time through - see above */
13217 udelay(2); /* do not busy-wait the CSR */
13218 reg = read_csr(dd, RCV_STATUS);
13219 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13222 /* give up after 100us - slowest possible at 33MHz is 73us */
13223 if (count++ > 50) {
13225 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13232 /* set RXE CSRs to chip reset defaults */
13233 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13240 write_csr(dd, RCV_CTRL, 0);
13242 /* RCV_STATUS read-only */
13243 /* RCV_CONTEXTS read-only */
13244 /* RCV_ARRAY_CNT read-only */
13245 /* RCV_BUF_SIZE read-only */
13246 write_csr(dd, RCV_BTH_QP, 0);
13247 write_csr(dd, RCV_MULTICAST, 0);
13248 write_csr(dd, RCV_BYPASS, 0);
13249 write_csr(dd, RCV_VL15, 0);
13250 /* this is a clear-down */
13251 write_csr(dd, RCV_ERR_INFO,
13252 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13253 /* RCV_ERR_STATUS read-only */
13254 write_csr(dd, RCV_ERR_MASK, 0);
13255 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13256 /* RCV_ERR_FORCE leave alone */
13257 for (i = 0; i < 32; i++)
13258 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13259 for (i = 0; i < 4; i++)
13260 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13261 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13262 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13263 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13264 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13265 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13266 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13267 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13268 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13270 for (i = 0; i < 32; i++)
13271 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13274 * RXE Kernel and User Per-Context CSRs
13276 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13278 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13279 /* RCV_CTXT_STATUS read-only */
13280 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13281 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13282 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13283 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13284 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13285 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13286 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13287 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13288 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13289 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13292 /* RCV_HDR_TAIL read-only */
13293 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13294 /* RCV_EGR_INDEX_TAIL read-only */
13295 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13296 /* RCV_EGR_OFFSET_TAIL read-only */
13297 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13298 write_uctxt_csr(dd, i,
13299 RCV_TID_FLOW_TABLE + (8 * j), 0);
13305 * Set sc2vl tables.
13307 * They power on to zeros, so to avoid send context errors
13308 * they need to be set:
13310 * SC 0-7 -> VL 0-7 (respectively)
13315 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13318 /* init per architecture spec, constrained by hardware capability */
13320 /* HFI maps sent packets */
13321 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13327 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13333 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13339 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13346 /* DC maps received packets */
13347 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13349 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13350 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13351 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13353 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13354 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13356 /* initialize the cached sc2vl values consistently with h/w */
13357 for (i = 0; i < 32; i++) {
13358 if (i < 8 || i == 15)
13359 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13361 *((u8 *)(dd->sc2vl) + i) = 0;
13366 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13367 * depend on the chip going through a power-on reset - a driver may be loaded
13368 * and unloaded many times.
13370 * Do not write any CSR values to the chip in this routine - there may be
13371 * a reset following the (possible) FLR in this routine.
13374 static void init_chip(struct hfi1_devdata *dd)
13379 * Put the HFI CSRs in a known state.
13380 * Combine this with a DC reset.
13382 * Stop the device from doing anything while we do a
13383 * reset. We know there are no other active users of
13384 * the device since we are now in charge. Turn off
13385 * off all outbound and inbound traffic and make sure
13386 * the device does not generate any interrupts.
13389 /* disable send contexts and SDMA engines */
13390 write_csr(dd, SEND_CTRL, 0);
13391 for (i = 0; i < dd->chip_send_contexts; i++)
13392 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13393 for (i = 0; i < dd->chip_sdma_engines; i++)
13394 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13395 /* disable port (turn off RXE inbound traffic) and contexts */
13396 write_csr(dd, RCV_CTRL, 0);
13397 for (i = 0; i < dd->chip_rcv_contexts; i++)
13398 write_csr(dd, RCV_CTXT_CTRL, 0);
13399 /* mask all interrupt sources */
13400 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13401 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13404 * DC Reset: do a full DC reset before the register clear.
13405 * A recommended length of time to hold is one CSR read,
13406 * so reread the CceDcCtrl. Then, hold the DC in reset
13407 * across the clear.
13409 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13410 (void)read_csr(dd, CCE_DC_CTRL);
13414 * A FLR will reset the SPC core and part of the PCIe.
13415 * The parts that need to be restored have already been
13418 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13420 /* do the FLR, the DC reset will remain */
13423 /* restore command and BARs */
13424 restore_pci_variables(dd);
13427 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13429 restore_pci_variables(dd);
13432 reset_asic_csrs(dd);
13434 dd_dev_info(dd, "Resetting CSRs with writes\n");
13435 reset_cce_csrs(dd);
13436 reset_txe_csrs(dd);
13437 reset_rxe_csrs(dd);
13438 reset_asic_csrs(dd);
13439 reset_misc_csrs(dd);
13441 /* clear the DC reset */
13442 write_csr(dd, CCE_DC_CTRL, 0);
13444 /* Set the LED off */
13448 * Clear the QSFP reset.
13449 * An FLR enforces a 0 on all out pins. The driver does not touch
13450 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
13451 * anything plugged constantly in reset, if it pays attention
13453 * Prime examples of this are optical cables. Set all pins high.
13454 * I2CCLK and I2CDAT will change per direction, and INT_N and
13455 * MODPRS_N are input only and their value is ignored.
13457 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13458 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13461 static void init_early_variables(struct hfi1_devdata *dd)
13465 /* assign link credit variables */
13467 dd->link_credits = CM_GLOBAL_CREDITS;
13469 dd->link_credits--;
13470 dd->vcu = cu_to_vcu(hfi1_cu);
13471 /* enough room for 8 MAD packets plus header - 17K */
13472 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13473 if (dd->vl15_init > dd->link_credits)
13474 dd->vl15_init = dd->link_credits;
13476 write_uninitialized_csrs_and_memories(dd);
13478 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13479 for (i = 0; i < dd->num_pports; i++) {
13480 struct hfi1_pportdata *ppd = &dd->pport[i];
13482 set_partition_keys(ppd);
13484 init_sc2vl_tables(dd);
13487 static void init_kdeth_qp(struct hfi1_devdata *dd)
13489 /* user changed the KDETH_QP */
13490 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13491 /* out of range or illegal value */
13492 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13495 if (kdeth_qp == 0) /* not set, or failed range check */
13496 kdeth_qp = DEFAULT_KDETH_QP;
13498 write_csr(dd, SEND_BTH_QP,
13499 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13500 SEND_BTH_QP_KDETH_QP_SHIFT);
13502 write_csr(dd, RCV_BTH_QP,
13503 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13504 RCV_BTH_QP_KDETH_QP_SHIFT);
13509 * @dd - device data
13510 * @first_ctxt - first context
13511 * @last_ctxt - first context
13513 * This return sets the qpn mapping table that
13514 * is indexed by qpn[8:1].
13516 * The routine will round robin the 256 settings
13517 * from first_ctxt to last_ctxt.
13519 * The first/last looks ahead to having specialized
13520 * receive contexts for mgmt and bypass. Normal
13521 * verbs traffic will assumed to be on a range
13522 * of receive contexts.
13524 static void init_qpmap_table(struct hfi1_devdata *dd,
13529 u64 regno = RCV_QP_MAP_TABLE;
13531 u64 ctxt = first_ctxt;
13533 for (i = 0; i < 256;) {
13534 reg |= ctxt << (8 * (i % 8));
13537 if (ctxt > last_ctxt)
13540 write_csr(dd, regno, reg);
13546 write_csr(dd, regno, reg);
13548 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13549 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13553 * init_qos - init RX qos
13554 * @dd - device data
13557 * This routine initializes Rule 0 and the
13558 * RSM map table to implement qos.
13560 * If all of the limit tests succeed,
13561 * qos is applied based on the array
13562 * interpretation of krcvqs where
13565 * The number of vl bits (n) and the number of qpn
13566 * bits (m) are computed to feed both the RSM map table
13567 * and the single rule.
13570 static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13573 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13576 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13579 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13583 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13584 if (krcvqs[i] > max_by_vl)
13585 max_by_vl = krcvqs[i];
13586 if (max_by_vl > 32)
13588 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13589 /* determine bits vl */
13590 n = ilog2(num_vls);
13591 /* determine bits for qpn */
13592 m = ilog2(qpns_per_vl);
13595 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13597 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
13600 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13601 /* init the local copy of the table */
13602 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13605 for (qpn = 0, tctxt = ctxt;
13606 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13607 unsigned idx, regoff, regidx;
13609 /* generate index <= 128 */
13610 idx = (qpn << n) ^ i;
13611 regoff = (idx % 8) * 8;
13613 reg = rsmmap[regidx];
13614 /* replace 0xff with context number */
13615 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13617 reg |= (u64)(tctxt++) << regoff;
13618 rsmmap[regidx] = reg;
13619 if (tctxt == ctxt + krcvqs[i])
13624 /* flush cached copies to chip */
13625 for (i = 0; i < NUM_MAP_REGS; i++)
13626 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13628 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13629 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK <<
13630 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13631 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13632 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13633 LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13634 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13635 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13636 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13637 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13638 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13639 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13640 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13641 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13642 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13643 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13645 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13647 /* map everything else to first context */
13648 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
13649 dd->qos_shift = n + 1;
13653 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13656 static void init_rxe(struct hfi1_devdata *dd)
13658 /* enable all receive errors */
13659 write_csr(dd, RCV_ERR_MASK, ~0ull);
13660 /* setup QPN map table - start where VL15 context leaves off */
13661 init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ?
13662 MIN_KERNEL_KCTXTS : 0);
13664 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13665 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13666 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13667 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13668 * Max_PayLoad_Size set to its minimum of 128.
13670 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13671 * (64 bytes). Max_Payload_Size is possibly modified upward in
13672 * tune_pcie_caps() which is called after this routine.
13676 static void init_other(struct hfi1_devdata *dd)
13678 /* enable all CCE errors */
13679 write_csr(dd, CCE_ERR_MASK, ~0ull);
13680 /* enable *some* Misc errors */
13681 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13682 /* enable all DC errors, except LCB */
13683 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13684 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13688 * Fill out the given AU table using the given CU. A CU is defined in terms
13689 * AUs. The table is a an encoding: given the index, how many AUs does that
13692 * NOTE: Assumes that the register layout is the same for the
13693 * local and remote tables.
13695 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13696 u32 csr0to3, u32 csr4to7)
13698 write_csr(dd, csr0to3,
13699 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13700 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13702 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13704 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13705 write_csr(dd, csr4to7,
13707 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13709 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13711 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13713 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13716 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13718 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13719 SEND_CM_LOCAL_AU_TABLE4_TO7);
13722 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13724 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13725 SEND_CM_REMOTE_AU_TABLE4_TO7);
13728 static void init_txe(struct hfi1_devdata *dd)
13732 /* enable all PIO, SDMA, general, and Egress errors */
13733 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13734 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13735 write_csr(dd, SEND_ERR_MASK, ~0ull);
13736 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13738 /* enable all per-context and per-SDMA engine errors */
13739 for (i = 0; i < dd->chip_send_contexts; i++)
13740 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13741 for (i = 0; i < dd->chip_sdma_engines; i++)
13742 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13744 /* set the local CU to AU mapping */
13745 assign_local_cm_au_table(dd, dd->vcu);
13748 * Set reasonable default for Credit Return Timer
13749 * Don't set on Simulator - causes it to choke.
13751 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13752 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13755 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13757 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13762 if (!rcd || !rcd->sc) {
13766 sctxt = rcd->sc->hw_context;
13767 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13768 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13769 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13770 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13771 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13772 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13773 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13775 * Enable send-side J_KEY integrity check, unless this is A0 h/w
13778 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13779 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13780 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13783 /* Enable J_KEY check on receive context. */
13784 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13785 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13786 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13787 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13792 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13794 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13799 if (!rcd || !rcd->sc) {
13803 sctxt = rcd->sc->hw_context;
13804 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13806 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13807 * This check would not have been enabled for A0 h/w, see
13811 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13812 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13813 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13815 /* Turn off the J_KEY on the receive side */
13816 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13821 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13823 struct hfi1_ctxtdata *rcd;
13828 if (ctxt < dd->num_rcv_contexts) {
13829 rcd = dd->rcd[ctxt];
13834 if (!rcd || !rcd->sc) {
13838 sctxt = rcd->sc->hw_context;
13839 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13840 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13841 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13842 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13843 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13844 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13849 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13851 struct hfi1_ctxtdata *rcd;
13856 if (ctxt < dd->num_rcv_contexts) {
13857 rcd = dd->rcd[ctxt];
13862 if (!rcd || !rcd->sc) {
13866 sctxt = rcd->sc->hw_context;
13867 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13868 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13869 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13870 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13876 * Start doing the clean up the the chip. Our clean up happens in multiple
13877 * stages and this is just the first.
13879 void hfi1_start_cleanup(struct hfi1_devdata *dd)
13884 clean_up_interrupts(dd);
13887 #define HFI_BASE_GUID(dev) \
13888 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13891 * Certain chip functions need to be initialized only once per asic
13892 * instead of per-device. This function finds the peer device and
13893 * checks whether that chip initialization needs to be done by this
13896 static void asic_should_init(struct hfi1_devdata *dd)
13898 unsigned long flags;
13899 struct hfi1_devdata *tmp, *peer = NULL;
13901 spin_lock_irqsave(&hfi1_devs_lock, flags);
13902 /* Find our peer device */
13903 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13904 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13905 dd->unit != tmp->unit) {
13912 * "Claim" the ASIC for initialization if it hasn't been
13915 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13916 dd->flags |= HFI1_DO_INIT_ASIC;
13917 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13921 * Set dd->boardname. Use a generic name if a name is not returned from
13922 * EFI variable space.
13924 * Return 0 on success, -ENOMEM if space could not be allocated.
13926 static int obtain_boardname(struct hfi1_devdata *dd)
13928 /* generic board description */
13929 const char generic[] =
13930 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13931 unsigned long size;
13934 ret = read_hfi1_efi_var(dd, "description", &size,
13935 (void **)&dd->boardname);
13937 dd_dev_info(dd, "Board description not found\n");
13938 /* use generic description */
13939 dd->boardname = kstrdup(generic, GFP_KERNEL);
13940 if (!dd->boardname)
13947 * Check the interrupt registers to make sure that they are mapped correctly.
13948 * It is intended to help user identify any mismapping by VMM when the driver
13949 * is running in a VM. This function should only be called before interrupt
13950 * is set up properly.
13952 * Return 0 on success, -EINVAL on failure.
13954 static int check_int_registers(struct hfi1_devdata *dd)
13957 u64 all_bits = ~(u64)0;
13960 /* Clear CceIntMask[0] to avoid raising any interrupts */
13961 mask = read_csr(dd, CCE_INT_MASK);
13962 write_csr(dd, CCE_INT_MASK, 0ull);
13963 reg = read_csr(dd, CCE_INT_MASK);
13967 /* Clear all interrupt status bits */
13968 write_csr(dd, CCE_INT_CLEAR, all_bits);
13969 reg = read_csr(dd, CCE_INT_STATUS);
13973 /* Set all interrupt status bits */
13974 write_csr(dd, CCE_INT_FORCE, all_bits);
13975 reg = read_csr(dd, CCE_INT_STATUS);
13976 if (reg != all_bits)
13979 /* Restore the interrupt mask */
13980 write_csr(dd, CCE_INT_CLEAR, all_bits);
13981 write_csr(dd, CCE_INT_MASK, mask);
13985 write_csr(dd, CCE_INT_MASK, mask);
13986 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
13991 * Allocate and initialize the device structure for the hfi.
13992 * @dev: the pci_dev for hfi1_ib device
13993 * @ent: pci_device_id struct for this dev
13995 * Also allocates, initializes, and returns the devdata struct for this
13998 * This is global, and is called directly at init to set up the
13999 * chip-specific function pointers for later use.
14001 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14002 const struct pci_device_id *ent)
14004 struct hfi1_devdata *dd;
14005 struct hfi1_pportdata *ppd;
14008 static const char * const inames[] = { /* implementation names */
14010 "RTL VCS simulation",
14011 "RTL FPGA emulation",
14012 "Functional simulator"
14014 struct pci_dev *parent = pdev->bus->self;
14016 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14017 sizeof(struct hfi1_pportdata));
14021 for (i = 0; i < dd->num_pports; i++, ppd++) {
14023 /* init common fields */
14024 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14025 /* DC supports 4 link widths */
14026 ppd->link_width_supported =
14027 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14028 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14029 ppd->link_width_downgrade_supported =
14030 ppd->link_width_supported;
14031 /* start out enabling only 4X */
14032 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14033 ppd->link_width_downgrade_enabled =
14034 ppd->link_width_downgrade_supported;
14035 /* link width active is 0 when link is down */
14036 /* link width downgrade active is 0 when link is down */
14038 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14039 num_vls > HFI1_MAX_VLS_SUPPORTED) {
14040 hfi1_early_err(&pdev->dev,
14041 "Invalid num_vls %u, using %u VLs\n",
14042 num_vls, HFI1_MAX_VLS_SUPPORTED);
14043 num_vls = HFI1_MAX_VLS_SUPPORTED;
14045 ppd->vls_supported = num_vls;
14046 ppd->vls_operational = ppd->vls_supported;
14047 ppd->actual_vls_operational = ppd->vls_supported;
14048 /* Set the default MTU. */
14049 for (vl = 0; vl < num_vls; vl++)
14050 dd->vld[vl].mtu = hfi1_max_mtu;
14051 dd->vld[15].mtu = MAX_MAD_PACKET;
14053 * Set the initial values to reasonable default, will be set
14054 * for real when link is up.
14056 ppd->lstate = IB_PORT_DOWN;
14057 ppd->overrun_threshold = 0x4;
14058 ppd->phy_error_threshold = 0xf;
14059 ppd->port_crc_mode_enabled = link_crc_mask;
14060 /* initialize supported LTP CRC mode */
14061 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14062 /* initialize enabled LTP CRC mode */
14063 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14064 /* start in offline */
14065 ppd->host_link_state = HLS_DN_OFFLINE;
14066 init_vl_arb_caches(ppd);
14067 ppd->last_pstate = 0xff; /* invalid value */
14070 dd->link_default = HLS_DN_POLL;
14073 * Do remaining PCIe setup and save PCIe values in dd.
14074 * Any error printing is already done by the init code.
14075 * On return, we have the chip mapped.
14077 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14081 /* verify that reads actually work, save revision for reset check */
14082 dd->revision = read_csr(dd, CCE_REVISION);
14083 if (dd->revision == ~(u64)0) {
14084 dd_dev_err(dd, "cannot read chip CSRs\n");
14088 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14089 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14090 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14091 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14094 * Check interrupt registers mapping if the driver has no access to
14095 * the upstream component. In this case, it is likely that the driver
14096 * is running in a VM.
14099 ret = check_int_registers(dd);
14105 * obtain the hardware ID - NOT related to unit, which is a
14106 * software enumeration
14108 reg = read_csr(dd, CCE_REVISION2);
14109 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14110 & CCE_REVISION2_HFI_ID_MASK;
14111 /* the variable size will remove unwanted bits */
14112 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14113 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14114 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14115 dd->icode < ARRAY_SIZE(inames) ?
14116 inames[dd->icode] : "unknown", (int)dd->irev);
14118 /* speeds the hardware can support */
14119 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14120 /* speeds allowed to run at */
14121 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14122 /* give a reasonable active value, will be set on link up */
14123 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14125 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14126 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14127 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14128 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14129 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14130 /* fix up link widths for emulation _p */
14132 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14133 ppd->link_width_supported =
14134 ppd->link_width_enabled =
14135 ppd->link_width_downgrade_supported =
14136 ppd->link_width_downgrade_enabled =
14139 /* insure num_vls isn't larger than number of sdma engines */
14140 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14141 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14142 num_vls, dd->chip_sdma_engines);
14143 num_vls = dd->chip_sdma_engines;
14144 ppd->vls_supported = dd->chip_sdma_engines;
14145 ppd->vls_operational = ppd->vls_supported;
14149 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14150 * Limit the max if larger than the field holds. If timeout is
14151 * non-zero, then the calculated field will be at least 1.
14153 * Must be after icode is set up - the cclock rate depends
14154 * on knowing the hardware being used.
14156 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14157 if (dd->rcv_intr_timeout_csr >
14158 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14159 dd->rcv_intr_timeout_csr =
14160 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14161 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14162 dd->rcv_intr_timeout_csr = 1;
14164 /* needs to be done before we look for the peer device */
14167 /* should this device init the ASIC block? */
14168 asic_should_init(dd);
14170 /* obtain chip sizes, reset chip CSRs */
14173 /* read in the PCIe link speed information */
14174 ret = pcie_speeds(dd);
14178 /* Needs to be called before hfi1_firmware_init */
14179 get_platform_config(dd);
14181 /* read in firmware */
14182 ret = hfi1_firmware_init(dd);
14187 * In general, the PCIe Gen3 transition must occur after the
14188 * chip has been idled (so it won't initiate any PCIe transactions
14189 * e.g. an interrupt) and before the driver changes any registers
14190 * (the transition will reset the registers).
14192 * In particular, place this call after:
14193 * - init_chip() - the chip will not initiate any PCIe transactions
14194 * - pcie_speeds() - reads the current link speed
14195 * - hfi1_firmware_init() - the needed firmware is ready to be
14198 ret = do_pcie_gen3_transition(dd);
14202 /* start setting dd values and adjusting CSRs */
14203 init_early_variables(dd);
14205 parse_platform_config(dd);
14207 ret = obtain_boardname(dd);
14211 snprintf(dd->boardversion, BOARD_VERS_MAX,
14212 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14213 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14216 (dd->revision >> CCE_REVISION_SW_SHIFT)
14217 & CCE_REVISION_SW_MASK);
14219 ret = set_up_context_variables(dd);
14223 /* set initial RXE CSRs */
14225 /* set initial TXE CSRs */
14227 /* set initial non-RXE, non-TXE CSRs */
14229 /* set up KDETH QP prefix in both RX and TX CSRs */
14232 ret = hfi1_dev_affinity_init(dd);
14236 /* send contexts must be set up before receive contexts */
14237 ret = init_send_contexts(dd);
14241 ret = hfi1_create_ctxts(dd);
14245 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14247 * rcd[0] is guaranteed to be valid by this point. Also, all
14248 * context are using the same value, as per the module parameter.
14250 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14252 ret = init_pervl_scs(dd);
14257 for (i = 0; i < dd->num_pports; ++i) {
14258 ret = sdma_init(dd, i);
14263 /* use contexts created by hfi1_create_ctxts */
14264 ret = set_up_interrupts(dd);
14268 /* set up LCB access - must be after set_up_interrupts() */
14269 init_lcb_access(dd);
14271 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14272 dd->base_guid & 0xFFFFFF);
14274 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14275 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14276 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14278 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14280 goto bail_clear_intr;
14281 check_fabric_firmware_versions(dd);
14285 ret = init_cntrs(dd);
14287 goto bail_clear_intr;
14289 ret = init_rcverr(dd);
14291 goto bail_free_cntrs;
14293 ret = eprom_init(dd);
14295 goto bail_free_rcverr;
14304 clean_up_interrupts(dd);
14306 hfi1_pcie_ddcleanup(dd);
14308 hfi1_free_devdata(dd);
14314 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14318 u32 current_egress_rate = ppd->current_egress_rate;
14319 /* rates here are in units of 10^6 bits/sec */
14321 if (desired_egress_rate == -1)
14322 return 0; /* shouldn't happen */
14324 if (desired_egress_rate >= current_egress_rate)
14325 return 0; /* we can't help go faster, only slower */
14327 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14328 egress_cycles(dw_len * 4, current_egress_rate);
14330 return (u16)delta_cycles;
14334 * create_pbc - build a pbc for transmission
14335 * @flags: special case flags or-ed in built pbc
14336 * @srate: static rate
14338 * @dwlen: dword length (header words + data words + pbc words)
14340 * Create a PBC with the given flags, rate, VL, and length.
14342 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14343 * for verbs, which does not use this PSM feature. The lone other caller
14344 * is for the diagnostic interface which calls this if the user does not
14345 * supply their own PBC.
14347 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14350 u64 pbc, delay = 0;
14352 if (unlikely(srate_mbs))
14353 delay = delay_cycles(ppd, srate_mbs, dw_len);
14356 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14357 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14358 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14359 | (dw_len & PBC_LENGTH_DWS_MASK)
14360 << PBC_LENGTH_DWS_SHIFT;
14365 #define SBUS_THERMAL 0x4f
14366 #define SBUS_THERM_MONITOR_MODE 0x1
14368 #define THERM_FAILURE(dev, ret, reason) \
14370 "Thermal sensor initialization failed: %s (%d)\n", \
14374 * Initialize the Avago Thermal sensor.
14376 * After initialization, enable polling of thermal sensor through
14377 * SBus interface. In order for this to work, the SBus Master
14378 * firmware has to be loaded due to the fact that the HW polling
14379 * logic uses SBus interrupts, which are not supported with
14380 * default firmware. Otherwise, no data will be returned through
14381 * the ASIC_STS_THERM CSR.
14383 static int thermal_init(struct hfi1_devdata *dd)
14387 if (dd->icode != ICODE_RTL_SILICON ||
14388 !(dd->flags & HFI1_DO_INIT_ASIC))
14391 acquire_hw_mutex(dd);
14392 dd_dev_info(dd, "Initializing thermal sensor\n");
14393 /* Disable polling of thermal readings */
14394 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14396 /* Thermal Sensor Initialization */
14397 /* Step 1: Reset the Thermal SBus Receiver */
14398 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14399 RESET_SBUS_RECEIVER, 0);
14401 THERM_FAILURE(dd, ret, "Bus Reset");
14404 /* Step 2: Set Reset bit in Thermal block */
14405 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14406 WRITE_SBUS_RECEIVER, 0x1);
14408 THERM_FAILURE(dd, ret, "Therm Block Reset");
14411 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14412 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14413 WRITE_SBUS_RECEIVER, 0x32);
14415 THERM_FAILURE(dd, ret, "Write Clock Div");
14418 /* Step 4: Select temperature mode */
14419 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14420 WRITE_SBUS_RECEIVER,
14421 SBUS_THERM_MONITOR_MODE);
14423 THERM_FAILURE(dd, ret, "Write Mode Sel");
14426 /* Step 5: De-assert block reset and start conversion */
14427 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14428 WRITE_SBUS_RECEIVER, 0x2);
14430 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14433 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14436 /* Enable polling of thermal readings */
14437 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14439 release_hw_mutex(dd);
14443 static void handle_temp_err(struct hfi1_devdata *dd)
14445 struct hfi1_pportdata *ppd = &dd->pport[0];
14447 * Thermal Critical Interrupt
14448 * Put the device into forced freeze mode, take link down to
14449 * offline, and put DC into reset.
14452 "Critical temperature reached! Forcing device into freeze mode!\n");
14453 dd->flags |= HFI1_FORCED_FREEZE;
14454 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14456 * Shut DC down as much and as quickly as possible.
14458 * Step 1: Take the link down to OFFLINE. This will cause the
14459 * 8051 to put the Serdes in reset. However, we don't want to
14460 * go through the entire link state machine since we want to
14461 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14462 * but rather an attempt to save the chip.
14463 * Code below is almost the same as quiet_serdes() but avoids
14464 * all the extra work and the sleeps.
14466 ppd->driver_link_ready = 0;
14467 ppd->link_enabled = 0;
14468 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14471 * Step 2: Shutdown LCB and 8051
14472 * After shutdown, do not restore DC_CFG_RESET value.